]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blobdiff - block/blk-timeout.c
netfilter: remove unneeded switch fall-through
[mirror_ubuntu-hirsute-kernel.git] / block / blk-timeout.c
index f2cfd56e1606ed9d8e1da979a1e1e6cdcb506a38..124c26128bf6d8f626164d1553c4338d18efa245 100644 (file)
@@ -68,80 +68,6 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
 
 #endif /* CONFIG_FAIL_IO_TIMEOUT */
 
-/*
- * blk_delete_timer - Delete/cancel timer for a given function.
- * @req:       request that we are canceling timer for
- *
- */
-void blk_delete_timer(struct request *req)
-{
-       list_del_init(&req->timeout_list);
-}
-
-static void blk_rq_timed_out(struct request *req)
-{
-       struct request_queue *q = req->q;
-       enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
-
-       if (q->rq_timed_out_fn)
-               ret = q->rq_timed_out_fn(req);
-       switch (ret) {
-       case BLK_EH_RESET_TIMER:
-               blk_add_timer(req);
-               blk_clear_rq_complete(req);
-               break;
-       case BLK_EH_DONE:
-               /*
-                * LLD handles this for now but in the future
-                * we can send a request msg to abort the command
-                * and we can move more of the generic scsi eh code to
-                * the blk layer.
-                */
-               break;
-       default:
-               printk(KERN_ERR "block: bad eh return: %d\n", ret);
-               break;
-       }
-}
-
-static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
-                         unsigned int *next_set)
-{
-       const unsigned long deadline = blk_rq_deadline(rq);
-
-       if (time_after_eq(jiffies, deadline)) {
-               list_del_init(&rq->timeout_list);
-
-               /*
-                * Check if we raced with end io completion
-                */
-               if (!blk_mark_rq_complete(rq))
-                       blk_rq_timed_out(rq);
-       } else if (!*next_set || time_after(*next_timeout, deadline)) {
-               *next_timeout = deadline;
-               *next_set = 1;
-       }
-}
-
-void blk_timeout_work(struct work_struct *work)
-{
-       struct request_queue *q =
-               container_of(work, struct request_queue, timeout_work);
-       unsigned long flags, next = 0;
-       struct request *rq, *tmp;
-       int next_set = 0;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
-               blk_rq_check_expired(rq, &next, &next_set);
-
-       if (next_set)
-               mod_timer(&q->timeout, round_jiffies_up(next));
-
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
 /**
  * blk_abort_request -- Request request recovery for the specified command
  * @req:       pointer to the request of interest
@@ -149,24 +75,17 @@ void blk_timeout_work(struct work_struct *work)
  * This function requests that the block layer start recovery for the
  * request by deleting the timer and calling the q's timeout function.
  * LLDDs who implement their own error recovery MAY ignore the timeout
- * event if they generated blk_abort_req. Must hold queue lock.
+ * event if they generated blk_abort_request.
  */
 void blk_abort_request(struct request *req)
 {
-       if (req->q->mq_ops) {
-               /*
-                * All we need to ensure is that timeout scan takes place
-                * immediately and that scan sees the new timeout value.
-                * No need for fancy synchronizations.
-                */
-               blk_rq_set_deadline(req, jiffies);
-               kblockd_schedule_work(&req->q->timeout_work);
-       } else {
-               if (blk_mark_rq_complete(req))
-                       return;
-               blk_delete_timer(req);
-               blk_rq_timed_out(req);
-       }
+       /*
+        * All we need to ensure is that timeout scan takes place
+        * immediately and that scan sees the new timeout value.
+        * No need for fancy synchronizations.
+        */
+       WRITE_ONCE(req->deadline, jiffies);
+       kblockd_schedule_work(&req->q->timeout_work);
 }
 EXPORT_SYMBOL_GPL(blk_abort_request);
 
@@ -194,15 +113,6 @@ void blk_add_timer(struct request *req)
        struct request_queue *q = req->q;
        unsigned long expiry;
 
-       if (!q->mq_ops)
-               lockdep_assert_held(q->queue_lock);
-
-       /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
-       if (!q->mq_ops && !q->rq_timed_out_fn)
-               return;
-
-       BUG_ON(!list_empty(&req->timeout_list));
-
        /*
         * Some LLDs, like scsi, peek at the timeout to prevent a
         * command from being retried forever.
@@ -211,21 +121,16 @@ void blk_add_timer(struct request *req)
                req->timeout = q->rq_timeout;
 
        req->rq_flags &= ~RQF_TIMED_OUT;
-       blk_rq_set_deadline(req, jiffies + req->timeout);
 
-       /*
-        * Only the non-mq case needs to add the request to a protected list.
-        * For the mq case we simply scan the tag map.
-        */
-       if (!q->mq_ops)
-               list_add_tail(&req->timeout_list, &req->q->timeout_list);
+       expiry = jiffies + req->timeout;
+       WRITE_ONCE(req->deadline, expiry);
 
        /*
         * If the timer isn't already pending or this timeout is earlier
         * than an existing one, modify the timer. Round up to next nearest
         * second.
         */
-       expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
+       expiry = blk_rq_timeout(round_jiffies_up(expiry));
 
        if (!timer_pending(&q->timeout) ||
            time_before(expiry, q->timeout.expires)) {