int size;
int i, j, num_ports;
- del_timer_sync(&xhci->cmd_timer);
+ cancel_delayed_work_sync(&xhci->cmd_timer);
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
INIT_LIST_HEAD(&xhci->cmd_list);
- /* init command timeout timer */
- setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
- (unsigned long)xhci);
+ /* init command timeout work */
+ INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
readl(&xhci->dba->doorbell[0]);
}
+static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
+{
+ return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
+}
+
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
{
u64 temp_64;
* but the completion event in never sent. Use the cmd timeout timer to
* handle those cases. Use twice the time to cover the bit polling retry
*/
- mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
+ xhci_mod_cmd_timer(xhci, 2 * XHCI_CMD_DEFAULT_TIMEOUT);
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
&xhci->op_regs->cmd_ring);
xhci_err(xhci, "Stopped the command ring failed, "
"maybe the host is dead\n");
- del_timer(&xhci->cmd_timer);
+ cancel_delayed_work(&xhci->cmd_timer);
xhci->xhc_state |= XHCI_STATE_DYING;
xhci_halt(xhci);
return -ESHUTDOWN;
if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
!(xhci->xhc_state & XHCI_STATE_DYING)) {
xhci->current_cmd = cur_cmd;
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
xhci_ring_cmd_db(xhci);
}
return;
}
-void xhci_handle_command_timeout(unsigned long data)
+void xhci_handle_command_timeout(struct work_struct *work)
{
struct xhci_hcd *xhci;
int ret;
unsigned long flags;
u64 hw_ring_state;
bool second_timeout = false;
- xhci = (struct xhci_hcd *) data;
+
+ xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
spin_lock_irqsave(&xhci->lock, flags);
* If timeout work is pending, or current_cmd is NULL, it means we
* raced with command completion. Command is handled so just return.
*/
- if (!xhci->current_cmd || timer_pending(&xhci->cmd_timer)) {
+ if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
- del_timer(&xhci->cmd_timer);
+ cancel_delayed_work(&xhci->cmd_timer);
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
if (cmd->cmd_list.next != &xhci->cmd_list) {
xhci->current_cmd = list_entry(cmd->cmd_list.next,
struct xhci_command, cmd_list);
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
} else if (xhci->current_cmd == cmd) {
xhci->current_cmd = NULL;
}
/* if there are no other commands queued we start the timeout timer */
if (xhci->cmd_list.next == &cmd->cmd_list &&
- !timer_pending(&xhci->cmd_timer)) {
+ !delayed_work_pending(&xhci->cmd_timer)) {
xhci->current_cmd = cmd;
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
}
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
#define CMD_RING_STATE_STOPPED (1 << 2)
struct list_head cmd_list;
unsigned int cmd_ring_reserved_trbs;
- struct timer_list cmd_timer;
+ struct delayed_work cmd_timer;
struct xhci_command *current_cmd;
struct xhci_ring *event_ring;
struct xhci_erst erst;
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
void xhci_stop_endpoint_command_watchdog(unsigned long arg);
-void xhci_handle_command_timeout(unsigned long data);
+void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);