]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
Merge tag 'v4.20-rc3' into for-4.21/block
authorJens Axboe <axboe@kernel.dk>
Sun, 18 Nov 2018 22:46:03 +0000 (15:46 -0700)
committerJens Axboe <axboe@kernel.dk>
Sun, 18 Nov 2018 22:46:03 +0000 (15:46 -0700)
Merge in -rc3 to resolve a few conflicts, but also to get a few
important fixes that have gone into mainline since the block
4.21 branch was forked off (most notably the SCSI queue issue,
which is both a conflict AND needed fix).

Signed-off-by: Jens Axboe <axboe@kernel.dk>
1  2 
block/blk-core.c
block/blk-merge.c
block/blk.h
drivers/block/floppy.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/scsi/Kconfig
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c

index 0b684a520a11e8ea241a4541833bed08d19996d0,deb56932f8c46e9cb0fe0950000b8da1922addfc..d6e8ab9ca99d1fca0c96f3810f40c3813bc16b44
@@@ -352,11 -798,10 +352,10 @@@ void blk_cleanup_queue(struct request_q
         * dispatch may still be in-progress since we dispatch requests
         * from more than one contexts.
         *
-        * No need to quiesce queue if it isn't initialized yet since
-        * blk_freeze_queue() should be enough for cases of passthrough
-        * request.
+        * We rely on driver to deal with the race in case that queue
+        * initialization isn't done.
         */
 -      if (q->mq_ops && blk_queue_init_done(q))
 +      if (queue_is_mq(q) && blk_queue_init_done(q))
                blk_mq_quiesce_queue(q);
  
        /* for synchronous bio-based driver finish in-flight integrity i/o */
Simple merge
diff --cc block/blk.h
index 027a0ccc175e261fc995e1f29e51025fbaa6fe35,0089fefdf771d7082ee05ca97504005090a26025..816a9abb87cdde80ac4144b79a455aa8fc73dcf2
@@@ -233,6 -380,31 +233,16 @@@ static inline void req_set_nomerge(stru
                q->last_merge = NULL;
  }
  
 -/*
 - * Steal a bit from this field for legacy IO path atomic IO marking. Note that
 - * setting the deadline clears the bottom bit, potentially clearing the
 - * completed bit. The user has to be OK with this (current ones are fine).
 - */
 -static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
 -{
 -      rq->__deadline = time & ~0x1UL;
 -}
 -
 -static inline unsigned long blk_rq_deadline(struct request *rq)
 -{
 -      return rq->__deadline & ~0x1UL;
 -}
 -
+ /*
+  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
+  * is defined as 'unsigned int', meantime it has to aligned to with logical
+  * block size which is the minimum accepted unit by hardware.
+  */
+ static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
+ {
+       return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
+ }
  /*
   * Internal io_context interface
   */
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 5d83a162d03b8ed3dff87d51aaf0c22565f9db6a,fa6e0c3b3aa678cd1e62f91021dc89036211aa8a..0df15cb738d2bff71f7685df53cb36eb31ed38bb
@@@ -601,23 -687,46 +601,30 @@@ static bool scsi_end_request(struct req
                destroy_rcu_head(&cmd->rcu);
        }
  
 -      if (req->mq_ctx) {
 -              /*
 -               * In the MQ case the command gets freed by __blk_mq_end_request,
 -               * so we have to do all cleanup that depends on it earlier.
 -               *
 -               * We also can't kick the queues from irq context, so we
 -               * will have to defer it to a workqueue.
 -               */
 -              scsi_mq_uninit_cmd(cmd);
 -
 -              /*
 -               * queue is still alive, so grab the ref for preventing it
 -               * from being cleaned up during running queue.
 -               */
 -              percpu_ref_get(&q->q_usage_counter);
 -
 -              __blk_mq_end_request(req, error);
 -
 -              if (scsi_target(sdev)->single_lun ||
 -                  !list_empty(&sdev->host->starved_list))
 -                      kblockd_schedule_work(&sdev->requeue_work);
 -              else
 -                      blk_mq_run_hw_queues(q, true);
 -
 -              percpu_ref_put(&q->q_usage_counter);
 -      } else {
 -              unsigned long flags;
 +      /*
 +       * In the MQ case the command gets freed by __blk_mq_end_request,
 +       * so we have to do all cleanup that depends on it earlier.
 +       *
 +       * We also can't kick the queues from irq context, so we
 +       * will have to defer it to a workqueue.
 +       */
 +      scsi_mq_uninit_cmd(cmd);
  
 -              if (bidi_bytes)
 -                      scsi_release_bidi_buffers(cmd);
 -              scsi_release_buffers(cmd);
 -              scsi_put_command(cmd);
++      /*
++       * queue is still alive, so grab the ref for preventing it
++       * from being cleaned up during running queue.
++       */
++      percpu_ref_get(&q->q_usage_counter);
 -              spin_lock_irqsave(q->queue_lock, flags);
 -              blk_finish_request(req, error);
 -              spin_unlock_irqrestore(q->queue_lock, flags);
 +      __blk_mq_end_request(req, error);
  
 -              scsi_run_queue(q);
 -      }
 +      if (scsi_target(sdev)->single_lun ||
 +          !list_empty(&sdev->host->starved_list))
 +              kblockd_schedule_work(&sdev->requeue_work);
 +      else
 +              blk_mq_run_hw_queues(q, true);
  
++      percpu_ref_put(&q->q_usage_counter);
        put_device(&sdev->sdev_gendev);
        return false;
  }