]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Nov 2015 04:35:54 +0000 (20:35 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Nov 2015 04:35:54 +0000 (20:35 -0800)
Pull final round of SCSI updates from James Bottomley:
 "Sorry for the delay in this patch which was mostly caused by getting
  the merger of the mpt2/mpt3sas driver, which was seen as an essential
  item of maintenance work to do before the drivers diverge too much.
  Unfortunately, this caused a compile failure (detected by linux-next),
  which then had to be fixed up and incubated.

  In addition to the mpt2/3sas rework, there are updates from pm80xx,
  lpfc, bnx2fc, hpsa, ipr, aacraid, megaraid_sas, storvsc and ufs plus
  an assortment of changes including some year 2038 issues, a fix for a
  remove before detach issue in some drivers and a couple of other minor
  issues"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (141 commits)
  mpt3sas: fix inline markers on non inline function declarations
  sd: Clear PS bit before Mode Select.
  ibmvscsi: set max_lun to 32
  ibmvscsi: display default value for max_id, max_lun and max_channel.
  mptfusion: don't allow negative bytes in kbuf_alloc_2_sgl()
  scsi: pmcraid: replace struct timeval with ktime_get_real_seconds()
  mvumi: 64bit value for seconds_since1970
  be2iscsi: Fix bogus WARN_ON length check
  scsi_scan: don't dump trace when scsi_prep_async_scan() is called twice
  mpt3sas: Bump mpt3sas driver version to 09.102.00.00
  mpt3sas: Single driver module which supports both SAS 2.0 & SAS 3.0 HBAs
  mpt2sas, mpt3sas: Update the driver versions
  mpt3sas: setpci reset kernel oops fix
  mpt3sas: Added OEM Gen2 PnP ID branding names
  mpt3sas: Refcount fw_events and fix unsafe list usage
  mpt3sas: Refcount sas_device objects and fix unsafe list usage
  mpt3sas: sysfs attribute to report Backup Rail Monitor Status
  mpt3sas: Ported WarpDrive product SSS6200 support
  mpt3sas: fix for driver fails EEH, recovery from injected pci bus error
  mpt3sas: Manage MSI-X vectors according to HBA device type
  ...

1  2 
drivers/ata/libata-scsi.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/scsi/Kconfig
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/target/loopback/tcm_loop.c
drivers/usb/storage/uas.c

index 8b3a7861fa44c205490788d4538e5da300270ffb,ade388648fe7015ead6fc4c96b99c6fb74c0bff6..7e959f90c0203f9b94124ab73867e4a72bd11188
@@@ -1757,15 -1757,6 +1757,15 @@@ nothing_to_do
        return 1;
  }
  
 +static void ata_qc_done(struct ata_queued_cmd *qc)
 +{
 +      struct scsi_cmnd *cmd = qc->scsicmd;
 +      void (*done)(struct scsi_cmnd *) = qc->scsidone;
 +
 +      ata_qc_free(qc);
 +      done(cmd);
 +}
 +
  static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
  {
        struct ata_port *ap = qc->ap;
         * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
         */
        if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
 -          ((cdb[2] & 0x20) || need_sense)) {
 +          ((cdb[2] & 0x20) || need_sense))
                ata_gen_passthru_sense(qc);
 -      } else {
 -              if (!need_sense) {
 -                      cmd->result = SAM_STAT_GOOD;
 -              } else {
 -                      /* TODO: decide which descriptor format to use
 -                       * for 48b LBA devices and call that here
 -                       * instead of the fixed desc, which is only
 -                       * good for smaller LBA (and maybe CHS?)
 -                       * devices.
 -                       */
 -                      ata_gen_ata_sense(qc);
 -              }
 -      }
 +      else if (need_sense)
 +              ata_gen_ata_sense(qc);
 +      else
 +              cmd->result = SAM_STAT_GOOD;
  
        if (need_sense && !ap->ops->error_handler)
                ata_dump_status(ap->print_id, &qc->result_tf);
  
 -      qc->scsidone(cmd);
 -
 -      ata_qc_free(qc);
 +      ata_qc_done(qc);
  }
  
  /**
@@@ -2013,11 -2015,8 +2013,11 @@@ static unsigned int ata_scsiop_inq_std(
  
        VPRINTK("ENTER\n");
  
 -      /* set scsi removable (RMB) bit per ata bit */
 -      if (ata_id_removable(args->id))
 +      /* set scsi removable (RMB) bit per ata bit, or if the
 +       * AHCI port says it's external (Hotplug-capable, eSATA).
 +       */
 +      if (ata_id_removable(args->id) ||
 +          (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
                hdr[1] |= (1 << 7);
  
        if (args->dev->class == ATA_DEV_ZAC) {
@@@ -2595,7 -2594,8 +2595,7 @@@ static void atapi_sense_complete(struc
                ata_gen_passthru_sense(qc);
        }
  
 -      qc->scsidone(qc->scsicmd);
 -      ata_qc_free(qc);
 +      ata_qc_done(qc);
  }
  
  /* is it pointless to prefer PIO for "safety reasons"? */
@@@ -2690,7 -2690,8 +2690,7 @@@ static void atapi_qc_complete(struct at
                        qc->dev->sdev->locked = 0;
  
                qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
 -              qc->scsidone(cmd);
 -              ata_qc_free(qc);
 +              ata_qc_done(qc);
                return;
        }
  
                cmd->result = SAM_STAT_GOOD;
        }
  
 -      qc->scsidone(cmd);
 -      ata_qc_free(qc);
 +      ata_qc_done(qc);
  }
  /**
   *    atapi_xlat - Initialize PACKET taskfile
@@@ -2912,14 -2914,12 +2912,14 @@@ ata_scsi_map_proto(u8 byte1
        case 5:         /* PIO Data-out */
                return ATA_PROT_PIO;
  
 +      case 12:        /* FPDMA */
 +              return ATA_PROT_NCQ;
 +
        case 0:         /* Hard Reset */
        case 1:         /* SRST */
        case 8:         /* Device Diagnostic */
        case 9:         /* Device Reset */
        case 7:         /* DMA Queued */
 -      case 12:        /* FPDMA */
        case 15:        /* Return Response Info */
        default:        /* Reserved */
                break;
@@@ -2947,9 -2947,6 +2947,9 @@@ static unsigned int ata_scsi_pass_thru(
        if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
                goto invalid_fld;
  
 +      /* enable LBA */
 +      tf->flags |= ATA_TFLAG_LBA;
 +
        /*
         * 12 and 16 byte CDBs use different offsets to
         * provide the various register values.
                tf->command = cdb[9];
        }
  
 +      /* For NCQ commands with FPDMA protocol, copy the tag value */
 +      if (tf->protocol == ATA_PROT_NCQ)
 +              tf->nsect = qc->tag << 3;
 +
        /* enforce correct master/slave bit */
        tf->device = dev->devno ?
                tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
@@@ -3696,9 -3689,6 +3696,6 @@@ int ata_scsi_add_hosts(struct ata_host 
                 */
                shost->max_host_blocked = 1;
  
-               if (scsi_init_shared_tag_map(shost, host->n_tags))
-                       goto err_add;
                rc = scsi_add_host_with_dma(ap->scsi_host,
                                                &ap->tdev, ap->host->dev);
                if (rc)
index 32f79624dd28565d3846384f24049e435aef83e7,96014dc4b6dd2b7d3ddd81e4fce18cf250dea699..9909022dc6c3eebd4c52ef3e7dca5221dc8b6038
@@@ -340,6 -340,8 +340,6 @@@ static void srp_destroy_fr_pool(struct 
                return;
  
        for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
 -              if (d->frpl)
 -                      ib_free_fast_reg_page_list(d->frpl);
                if (d->mr)
                        ib_dereg_mr(d->mr);
        }
@@@ -360,6 -362,7 +360,6 @@@ static struct srp_fr_pool *srp_create_f
        struct srp_fr_pool *pool;
        struct srp_fr_desc *d;
        struct ib_mr *mr;
 -      struct ib_fast_reg_page_list *frpl;
        int i, ret = -EINVAL;
  
        if (pool_size <= 0)
                        goto destroy_pool;
                }
                d->mr = mr;
 -              frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
 -              if (IS_ERR(frpl)) {
 -                      ret = PTR_ERR(frpl);
 -                      goto destroy_pool;
 -              }
 -              d->frpl = frpl;
                list_add_tail(&d->entry, &pool->free_list);
        }
  
@@@ -840,12 -849,11 +840,12 @@@ static void srp_free_req_data(struct sr
  
        for (i = 0; i < target->req_ring_size; ++i) {
                req = &ch->req_ring[i];
 -              if (dev->use_fast_reg)
 +              if (dev->use_fast_reg) {
                        kfree(req->fr_list);
 -              else
 +              } else {
                        kfree(req->fmr_list);
 -              kfree(req->map_page);
 +                      kfree(req->map_page);
 +              }
                if (req->indirect_dma_addr) {
                        ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
                                            target->indirect_size,
@@@ -879,15 -887,14 +879,15 @@@ static int srp_alloc_req_data(struct sr
                                  GFP_KERNEL);
                if (!mr_list)
                        goto out;
 -              if (srp_dev->use_fast_reg)
 +              if (srp_dev->use_fast_reg) {
                        req->fr_list = mr_list;
 -              else
 +              } else {
                        req->fmr_list = mr_list;
 -              req->map_page = kmalloc(srp_dev->max_pages_per_mr *
 -                                      sizeof(void *), GFP_KERNEL);
 -              if (!req->map_page)
 -                      goto out;
 +                      req->map_page = kmalloc(srp_dev->max_pages_per_mr *
 +                                              sizeof(void *), GFP_KERNEL);
 +                      if (!req->map_page)
 +                              goto out;
 +              }
                req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
                if (!req->indirect_desc)
                        goto out;
@@@ -1279,17 -1286,6 +1279,17 @@@ static int srp_map_finish_fmr(struct sr
        if (state->fmr.next >= state->fmr.end)
                return -ENOMEM;
  
 +      WARN_ON_ONCE(!dev->use_fmr);
 +
 +      if (state->npages == 0)
 +              return 0;
 +
 +      if (state->npages == 1 && target->global_mr) {
 +              srp_map_desc(state, state->base_dma_addr, state->dma_len,
 +                           target->global_mr->rkey);
 +              goto reset_state;
 +      }
 +
        fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
                                   state->npages, io_addr);
        if (IS_ERR(fmr))
        srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
                     state->dma_len, fmr->fmr->rkey);
  
 +reset_state:
 +      state->npages = 0;
 +      state->dma_len = 0;
 +
        return 0;
  }
  
@@@ -1314,26 -1306,13 +1314,26 @@@ static int srp_map_finish_fr(struct srp
        struct srp_target_port *target = ch->target;
        struct srp_device *dev = target->srp_host->srp_dev;
        struct ib_send_wr *bad_wr;
 -      struct ib_send_wr wr;
 +      struct ib_reg_wr wr;
        struct srp_fr_desc *desc;
        u32 rkey;
 +      int n, err;
  
        if (state->fr.next >= state->fr.end)
                return -ENOMEM;
  
 +      WARN_ON_ONCE(!dev->use_fast_reg);
 +
 +      if (state->sg_nents == 0)
 +              return 0;
 +
 +      if (state->sg_nents == 1 && target->global_mr) {
 +              srp_map_desc(state, sg_dma_address(state->sg),
 +                           sg_dma_len(state->sg),
 +                           target->global_mr->rkey);
 +              return 1;
 +      }
 +
        desc = srp_fr_pool_get(ch->fr_pool);
        if (!desc)
                return -ENOMEM;
        rkey = ib_inc_rkey(desc->mr->rkey);
        ib_update_fast_reg_key(desc->mr, rkey);
  
 -      memcpy(desc->frpl->page_list, state->pages,
 -             sizeof(state->pages[0]) * state->npages);
 -
 -      memset(&wr, 0, sizeof(wr));
 -      wr.opcode = IB_WR_FAST_REG_MR;
 -      wr.wr_id = FAST_REG_WR_ID_MASK;
 -      wr.wr.fast_reg.iova_start = state->base_dma_addr;
 -      wr.wr.fast_reg.page_list = desc->frpl;
 -      wr.wr.fast_reg.page_list_len = state->npages;
 -      wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
 -      wr.wr.fast_reg.length = state->dma_len;
 -      wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
 -                                     IB_ACCESS_REMOTE_READ |
 -                                     IB_ACCESS_REMOTE_WRITE);
 -      wr.wr.fast_reg.rkey = desc->mr->lkey;
 +      n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
 +                       dev->mr_page_size);
 +      if (unlikely(n < 0))
 +              return n;
 +
 +      wr.wr.next = NULL;
 +      wr.wr.opcode = IB_WR_REG_MR;
 +      wr.wr.wr_id = FAST_REG_WR_ID_MASK;
 +      wr.wr.num_sge = 0;
 +      wr.wr.send_flags = 0;
 +      wr.mr = desc->mr;
 +      wr.key = desc->mr->rkey;
 +      wr.access = (IB_ACCESS_LOCAL_WRITE |
 +                   IB_ACCESS_REMOTE_READ |
 +                   IB_ACCESS_REMOTE_WRITE);
  
        *state->fr.next++ = desc;
        state->nmdesc++;
  
 -      srp_map_desc(state, state->base_dma_addr, state->dma_len,
 -                   desc->mr->rkey);
 +      srp_map_desc(state, desc->mr->iova,
 +                   desc->mr->length, desc->mr->rkey);
  
 -      return ib_post_send(ch->qp, &wr, &bad_wr);
 -}
 +      err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
 +      if (unlikely(err))
 +              return err;
  
 -static int srp_finish_mapping(struct srp_map_state *state,
 -                            struct srp_rdma_ch *ch)
 -{
 -      struct srp_target_port *target = ch->target;
 -      struct srp_device *dev = target->srp_host->srp_dev;
 -      int ret = 0;
 -
 -      WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
 -
 -      if (state->npages == 0)
 -              return 0;
 -
 -      if (state->npages == 1 && target->global_mr)
 -              srp_map_desc(state, state->base_dma_addr, state->dma_len,
 -                           target->global_mr->rkey);
 -      else
 -              ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
 -                      srp_map_finish_fmr(state, ch);
 -
 -      if (ret == 0) {
 -              state->npages = 0;
 -              state->dma_len = 0;
 -      }
 -
 -      return ret;
 +      return n;
  }
  
  static int srp_map_sg_entry(struct srp_map_state *state,
        while (dma_len) {
                unsigned offset = dma_addr & ~dev->mr_page_mask;
                if (state->npages == dev->max_pages_per_mr || offset != 0) {
 -                      ret = srp_finish_mapping(state, ch);
 +                      ret = srp_map_finish_fmr(state, ch);
                        if (ret)
                                return ret;
                }
         */
        ret = 0;
        if (len != dev->mr_page_size)
 -              ret = srp_finish_mapping(state, ch);
 +              ret = srp_map_finish_fmr(state, ch);
        return ret;
  }
  
 -static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
 -                    struct srp_request *req, struct scatterlist *scat,
 -                    int count)
 +static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
 +                        struct srp_request *req, struct scatterlist *scat,
 +                        int count)
  {
 -      struct srp_target_port *target = ch->target;
 -      struct srp_device *dev = target->srp_host->srp_dev;
        struct scatterlist *sg;
        int i, ret;
  
 -      state->desc     = req->indirect_desc;
 -      state->pages    = req->map_page;
 -      if (dev->use_fast_reg) {
 -              state->fr.next = req->fr_list;
 -              state->fr.end = req->fr_list + target->cmd_sg_cnt;
 -      } else if (dev->use_fmr) {
 -              state->fmr.next = req->fmr_list;
 -              state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
 -      }
 +      state->desc = req->indirect_desc;
 +      state->pages = req->map_page;
 +      state->fmr.next = req->fmr_list;
 +      state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
  
 -      if (dev->use_fast_reg || dev->use_fmr) {
 -              for_each_sg(scat, sg, count, i) {
 -                      ret = srp_map_sg_entry(state, ch, sg, i);
 -                      if (ret)
 -                              goto out;
 -              }
 -              ret = srp_finish_mapping(state, ch);
 +      for_each_sg(scat, sg, count, i) {
 +              ret = srp_map_sg_entry(state, ch, sg, i);
                if (ret)
 -                      goto out;
 -      } else {
 -              for_each_sg(scat, sg, count, i) {
 -                      srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
 -                                   ib_sg_dma_len(dev->dev, sg),
 -                                   target->global_mr->rkey);
 -              }
 +                      return ret;
        }
  
 +      ret = srp_map_finish_fmr(state, ch);
 +      if (ret)
 +              return ret;
 +
        req->nmdesc = state->nmdesc;
 -      ret = 0;
  
 -out:
 -      return ret;
 +      return 0;
 +}
 +
 +static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
 +                       struct srp_request *req, struct scatterlist *scat,
 +                       int count)
 +{
 +      state->desc = req->indirect_desc;
 +      state->fr.next = req->fr_list;
 +      state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
 +      state->sg = scat;
 +      state->sg_nents = scsi_sg_count(req->scmnd);
 +
 +      while (state->sg_nents) {
 +              int i, n;
 +
 +              n = srp_map_finish_fr(state, ch);
 +              if (unlikely(n < 0))
 +                      return n;
 +
 +              state->sg_nents -= n;
 +              for (i = 0; i < n; i++)
 +                      state->sg = sg_next(state->sg);
 +      }
 +
 +      req->nmdesc = state->nmdesc;
 +
 +      return 0;
 +}
 +
 +static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
 +                        struct srp_request *req, struct scatterlist *scat,
 +                        int count)
 +{
 +      struct srp_target_port *target = ch->target;
 +      struct srp_device *dev = target->srp_host->srp_dev;
 +      struct scatterlist *sg;
 +      int i;
 +
 +      state->desc = req->indirect_desc;
 +      for_each_sg(scat, sg, count, i) {
 +              srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
 +                           ib_sg_dma_len(dev->dev, sg),
 +                           target->global_mr->rkey);
 +      }
 +
 +      req->nmdesc = state->nmdesc;
 +
 +      return 0;
  }
  
  /*
@@@ -1504,7 -1474,6 +1504,7 @@@ static int srp_map_idb(struct srp_rdma_
        struct srp_map_state state;
        struct srp_direct_buf idb_desc;
        u64 idb_pages[1];
 +      struct scatterlist idb_sg[1];
        int ret;
  
        memset(&state, 0, sizeof(state));
        state.gen.next = next_mr;
        state.gen.end = end_mr;
        state.desc = &idb_desc;
 -      state.pages = idb_pages;
 -      state.pages[0] = (req->indirect_dma_addr &
 -                        dev->mr_page_mask);
 -      state.npages = 1;
        state.base_dma_addr = req->indirect_dma_addr;
        state.dma_len = idb_len;
 -      ret = srp_finish_mapping(&state, ch);
 -      if (ret < 0)
 -              goto out;
 +
 +      if (dev->use_fast_reg) {
 +              state.sg = idb_sg;
 +              state.sg_nents = 1;
 +              sg_set_buf(idb_sg, req->indirect_desc, idb_len);
 +              idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
 +              ret = srp_map_finish_fr(&state, ch);
 +              if (ret < 0)
 +                      return ret;
 +      } else if (dev->use_fmr) {
 +              state.pages = idb_pages;
 +              state.pages[0] = (req->indirect_dma_addr &
 +                                dev->mr_page_mask);
 +              state.npages = 1;
 +              ret = srp_map_finish_fmr(&state, ch);
 +              if (ret < 0)
 +                      return ret;
 +      } else {
 +              return -EINVAL;
 +      }
  
        *idb_rkey = idb_desc.key;
  
 -out:
 -      return ret;
 +      return 0;
  }
  
  static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
                                   target->indirect_size, DMA_TO_DEVICE);
  
        memset(&state, 0, sizeof(state));
 -      srp_map_sg(&state, ch, req, scat, count);
 +      if (dev->use_fast_reg)
 +              srp_map_sg_fr(&state, ch, req, scat, count);
 +      else if (dev->use_fmr)
 +              srp_map_sg_fmr(&state, ch, req, scat, count);
 +      else
 +              srp_map_sg_dma(&state, ch, req, scat, count);
  
        /* We've mapped the request, now pull as much of the indirect
         * descriptor table as we can into the command buffer. If this
@@@ -2798,7 -2750,6 +2798,6 @@@ static struct scsi_host_template srp_te
        .cmd_per_lun                    = SRP_DEFAULT_CMD_SQ_SIZE,
        .use_clustering                 = ENABLE_CLUSTERING,
        .shost_attrs                    = srp_host_attrs,
-       .use_blk_tags                   = 1,
        .track_queue_depth              = 1,
  };
  
@@@ -3229,10 -3180,6 +3228,6 @@@ static ssize_t srp_create_target(struc
        if (ret)
                goto out;
  
-       ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
-       if (ret)
-               goto out;
        target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
  
        if (!srp_conn_unique(target->srp_host, target)) {
        INIT_WORK(&target->tl_err_work, srp_tl_err_work);
        INIT_WORK(&target->remove_work, srp_remove_work);
        spin_lock_init(&target->lock);
 -      ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
 +      ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
        if (ret)
                goto out;
  
diff --combined drivers/scsi/Kconfig
index d2f480b04a52ed4f882fe3ca9b0b79113ef804a4,8aed855dd391de862eae8750f132ccbc8c893560..5f692ae4074959e019a74eb0c81405cb2869195c
@@@ -242,6 -242,13 +242,6 @@@ config SCSI_SCAN_ASYN
          system continues booting, and even probe devices on different
          busses in parallel, leading to a significant speed-up.
  
 -        If you have built SCSI as modules, enabling this option can
 -        be a problem as the devices may not have been found by the
 -        time your system expects them to have been.  You can load the
 -        scsi_wait_scan module to ensure that all scans have completed.
 -        If you build your SCSI drivers into the kernel, then everything
 -        will work fine if you say Y here.
 -
          You can override this choice by specifying "scsi_mod.scan=sync"
          or async on the kernel's command line.
  
@@@ -534,7 -541,6 +534,6 @@@ config SCSI_ARCMS
  
  source "drivers/scsi/esas2r/Kconfig"
  source "drivers/scsi/megaraid/Kconfig.megaraid"
- source "drivers/scsi/mpt2sas/Kconfig"
  source "drivers/scsi/mpt3sas/Kconfig"
  source "drivers/scsi/ufs/Kconfig"
  
index dff8fafb741c1bff625131e73e7fe4425375ced2,658843bb162ad5357192f05229cf2ad73cb846e9..8d2312239ae0cda01238ae549f0ae8007de233ea
@@@ -399,8 -399,6 +399,8 @@@ static void scsi_device_dev_release_use
  
        sdev = container_of(work, struct scsi_device, ew.work);
  
 +      scsi_dh_release_device(sdev);
 +
        parent = sdev->sdev_gendev.parent;
  
        spin_lock_irqsave(sdev->host->host_lock, flags);
@@@ -775,6 -773,29 +775,29 @@@ static struct bin_attribute dev_attr_vp
  sdev_vpd_pg_attr(pg83);
  sdev_vpd_pg_attr(pg80);
  
+ static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
+                           struct bin_attribute *bin_attr,
+                           char *buf, loff_t off, size_t count)
+ {
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct scsi_device *sdev = to_scsi_device(dev);
+       if (!sdev->inquiry)
+               return -EINVAL;
+       return memory_read_from_buffer(buf, count, &off, sdev->inquiry,
+                                      sdev->inquiry_len);
+ }
+ static struct bin_attribute dev_attr_inquiry = {
+       .attr = {
+               .name = "inquiry",
+               .mode = S_IRUGO,
+       },
+       .size = 0,
+       .read = show_inquiry,
+ };
  static ssize_t
  show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
                        char *buf)
@@@ -900,7 -921,7 +923,7 @@@ sdev_store_queue_ramp_up_period(struct 
                return -EINVAL;
  
        sdev->queue_ramp_up_period = msecs_to_jiffies(period);
-       return period;
+       return count;
  }
  
  static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
@@@ -959,6 -980,7 +982,7 @@@ static struct attribute *scsi_sdev_attr
  static struct bin_attribute *scsi_sdev_bin_attrs[] = {
        &dev_attr_vpd_pg83,
        &dev_attr_vpd_pg80,
+       &dev_attr_inquiry,
        NULL
  };
  static struct attribute_group scsi_sdev_attr_group = {
@@@ -1086,9 -1108,7 +1110,7 @@@ void __scsi_remove_device(struct scsi_d
                device_unregister(&sdev->sdev_dev);
                transport_remove_device(dev);
                scsi_dh_remove_device(sdev);
-               device_del(dev);
-       } else
-               put_device(&sdev->sdev_dev);
+       }
  
        /*
         * Stop accepting new requests and wait until all queuecommand() and
        blk_cleanup_queue(sdev->request_queue);
        cancel_work_sync(&sdev->requeue_work);
  
+       /*
+        * Remove the device after blk_cleanup_queue() has been called such
+        * a possible bdi_register() call with the same name occurs after
+        * blk_cleanup_queue() has called bdi_destroy().
+        */
+       if (sdev->is_visible)
+               device_del(dev);
+       else
+               put_device(&sdev->sdev_dev);
        if (sdev->host->hostt->slave_destroy)
                sdev->host->hostt->slave_destroy(sdev);
        transport_destroy_device(dev);
@@@ -1160,31 -1190,23 +1192,23 @@@ static void __scsi_remove_target(struc
  void scsi_remove_target(struct device *dev)
  {
        struct Scsi_Host *shost = dev_to_shost(dev->parent);
-       struct scsi_target *starget, *last = NULL;
+       struct scsi_target *starget;
        unsigned long flags;
  
-       /* remove targets being careful to lookup next entry before
-        * deleting the last
-        */
+ restart:
        spin_lock_irqsave(shost->host_lock, flags);
        list_for_each_entry(starget, &shost->__targets, siblings) {
                if (starget->state == STARGET_DEL)
                        continue;
                if (starget->dev.parent == dev || &starget->dev == dev) {
-                       /* assuming new targets arrive at the end */
                        kref_get(&starget->reap_ref);
                        spin_unlock_irqrestore(shost->host_lock, flags);
-                       if (last)
-                               scsi_target_reap(last);
-                       last = starget;
                        __scsi_remove_target(starget);
-                       spin_lock_irqsave(shost->host_lock, flags);
+                       scsi_target_reap(starget);
+                       goto restart;
                }
        }
        spin_unlock_irqrestore(shost->host_lock, flags);
-       if (last)
-               scsi_target_reap(last);
  }
  EXPORT_SYMBOL(scsi_remove_target);
  
diff --combined drivers/scsi/sd.c
index 5e170a6809fde2fe3c8c6ff51d382c2a570485ea,f7247778c225f30447999ea44fcb24951298780b..54519804c46a57b99ce680ee665b4fe28dd91148
@@@ -51,7 -51,6 +51,7 @@@
  #include <linux/async.h>
  #include <linux/slab.h>
  #include <linux/pm_runtime.h>
 +#include <linux/pr.h>
  #include <asm/uaccess.h>
  #include <asm/unaligned.h>
  
@@@ -205,6 -204,7 +205,7 @@@ cache_type_store(struct device *dev, st
        buffer_data[2] &= ~0x05;
        buffer_data[2] |= wce << 2 | rcd;
        sp = buffer_data[0] & 0x80 ? 1 : 0;
+       buffer_data[0] &= ~0x80;
  
        if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
                             SD_MAX_RETRIES, &data, &sshdr)) {
@@@ -1536,100 -1536,6 +1537,100 @@@ static int sd_compat_ioctl(struct block
  }
  #endif
  
 +static char sd_pr_type(enum pr_type type)
 +{
 +      switch (type) {
 +      case PR_WRITE_EXCLUSIVE:
 +              return 0x01;
 +      case PR_EXCLUSIVE_ACCESS:
 +              return 0x03;
 +      case PR_WRITE_EXCLUSIVE_REG_ONLY:
 +              return 0x05;
 +      case PR_EXCLUSIVE_ACCESS_REG_ONLY:
 +              return 0x06;
 +      case PR_WRITE_EXCLUSIVE_ALL_REGS:
 +              return 0x07;
 +      case PR_EXCLUSIVE_ACCESS_ALL_REGS:
 +              return 0x08;
 +      default:
 +              return 0;
 +      }
 +};
 +
 +static int sd_pr_command(struct block_device *bdev, u8 sa,
 +              u64 key, u64 sa_key, u8 type, u8 flags)
 +{
 +      struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
 +      struct scsi_sense_hdr sshdr;
 +      int result;
 +      u8 cmd[16] = { 0, };
 +      u8 data[24] = { 0, };
 +
 +      cmd[0] = PERSISTENT_RESERVE_OUT;
 +      cmd[1] = sa;
 +      cmd[2] = type;
 +      put_unaligned_be32(sizeof(data), &cmd[5]);
 +
 +      put_unaligned_be64(key, &data[0]);
 +      put_unaligned_be64(sa_key, &data[8]);
 +      data[20] = flags;
 +
 +      result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
 +                      &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
 +
 +      if ((driver_byte(result) & DRIVER_SENSE) &&
 +          (scsi_sense_valid(&sshdr))) {
 +              sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
 +              scsi_print_sense_hdr(sdev, NULL, &sshdr);
 +      }
 +
 +      return result;
 +}
 +
 +static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
 +              u32 flags)
 +{
 +      if (flags & ~PR_FL_IGNORE_KEY)
 +              return -EOPNOTSUPP;
 +      return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
 +                      old_key, new_key, 0,
 +                      (1 << 0) /* APTPL */ |
 +                      (1 << 2) /* ALL_TG_PT */);
 +}
 +
 +static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
 +              u32 flags)
 +{
 +      if (flags)
 +              return -EOPNOTSUPP;
 +      return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
 +}
 +
 +static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
 +{
 +      return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
 +}
 +
 +static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
 +              enum pr_type type, bool abort)
 +{
 +      return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
 +                           sd_pr_type(type), 0);
 +}
 +
 +static int sd_pr_clear(struct block_device *bdev, u64 key)
 +{
 +      return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
 +}
 +
 +static const struct pr_ops sd_pr_ops = {
 +      .pr_register    = sd_pr_register,
 +      .pr_reserve     = sd_pr_reserve,
 +      .pr_release     = sd_pr_release,
 +      .pr_preempt     = sd_pr_preempt,
 +      .pr_clear       = sd_pr_clear,
 +};
 +
  static const struct block_device_operations sd_fops = {
        .owner                  = THIS_MODULE,
        .open                   = sd_open,
        .check_events           = sd_check_events,
        .revalidate_disk        = sd_revalidate_disk,
        .unlock_native_capacity = sd_unlock_native_capacity,
 +      .pr_ops                 = &sd_pr_ops,
  };
  
  /**
@@@ -3164,6 -3069,7 +3165,6 @@@ static void scsi_disk_release(struct de
        ida_remove(&sd_index_ida, sdkp->index);
        spin_unlock(&sd_index_lock);
  
 -      blk_integrity_unregister(disk);
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
index 999b6eba52e8b4b86ae2d0a61192502392988705,081f1cfefeb33871129b005ac397a73949a95a9c..4fb0eca86857e2f3229603397138d4c793b57eff
@@@ -34,6 -34,7 +34,6 @@@
  
  #include <target/target_core_base.h>
  #include <target/target_core_fabric.h>
 -#include <target/target_core_fabric_configfs.h>
  
  #include "tcm_loop.h"
  
@@@ -376,7 -377,6 +376,6 @@@ static struct scsi_host_template tcm_lo
        .use_clustering         = DISABLE_CLUSTERING,
        .slave_alloc            = tcm_loop_slave_alloc,
        .module                 = THIS_MODULE,
-       .use_blk_tags           = 1,
        .track_queue_depth      = 1,
  };
  
@@@ -762,20 -762,21 +761,20 @@@ static void tcm_loop_port_unlink
  
  /* End items for tcm_loop_port_cit */
  
 -static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type(
 -      struct se_portal_group *se_tpg,
 -      char *page)
 +static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
 +              struct config_item *item, char *page)
  {
 +      struct se_portal_group *se_tpg = attrib_to_tpg(item);
        struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
                                                   tl_se_tpg);
  
        return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
  }
  
 -static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type(
 -      struct se_portal_group *se_tpg,
 -      const char *page,
 -      size_t count)
 +static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
 +              struct config_item *item, const char *page, size_t count)
  {
 +      struct se_portal_group *se_tpg = attrib_to_tpg(item);
        struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
                                                   tl_se_tpg);
        unsigned long val;
        return count;
  }
  
 -TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR);
 +CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
  
  static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
 -      &tcm_loop_tpg_attrib_fabric_prot_type.attr,
 +      &tcm_loop_tpg_attrib_attr_fabric_prot_type,
        NULL,
  };
  
@@@ -892,9 -893,10 +891,9 @@@ static int tcm_loop_drop_nexus
  
  /* End items for tcm_loop_nexus_cit */
  
 -static ssize_t tcm_loop_tpg_show_nexus(
 -      struct se_portal_group *se_tpg,
 -      char *page)
 +static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
  {
 +      struct se_portal_group *se_tpg = to_tpg(item);
        struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
                        struct tcm_loop_tpg, tl_se_tpg);
        struct tcm_loop_nexus *tl_nexus;
        return ret;
  }
  
 -static ssize_t tcm_loop_tpg_store_nexus(
 -      struct se_portal_group *se_tpg,
 -      const char *page,
 -      size_t count)
 +static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
 +              const char *page, size_t count)
  {
 +      struct se_portal_group *se_tpg = to_tpg(item);
        struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
                        struct tcm_loop_tpg, tl_se_tpg);
        struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
@@@ -988,10 -991,12 +987,10 @@@ check_newline
        return count;
  }
  
 -TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
 -
 -static ssize_t tcm_loop_tpg_show_transport_status(
 -      struct se_portal_group *se_tpg,
 -      char *page)
 +static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
 +              char *page)
  {
 +      struct se_portal_group *se_tpg = to_tpg(item);
        struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
                        struct tcm_loop_tpg, tl_se_tpg);
        const char *status = NULL;
        return ret;
  }
  
 -static ssize_t tcm_loop_tpg_store_transport_status(
 -      struct se_portal_group *se_tpg,
 -      const char *page,
 -      size_t count)
 +static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
 +              const char *page, size_t count)
  {
 +      struct se_portal_group *se_tpg = to_tpg(item);
        struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
                        struct tcm_loop_tpg, tl_se_tpg);
  
        return -EINVAL;
  }
  
 -TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
 +CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
 +CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
  
  static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
 -      &tcm_loop_tpg_nexus.attr,
 -      &tcm_loop_tpg_transport_status.attr,
 +      &tcm_loop_tpg_attr_nexus,
 +      &tcm_loop_tpg_attr_transport_status,
        NULL,
  };
  
@@@ -1210,15 -1215,17 +1209,15 @@@ static void tcm_loop_drop_scsi_hba
  }
  
  /* Start items for tcm_loop_cit */
 -static ssize_t tcm_loop_wwn_show_attr_version(
 -      struct target_fabric_configfs *tf,
 -      char *page)
 +static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
  {
        return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
  }
  
 -TF_WWN_ATTR_RO(tcm_loop, version);
 +CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
  
  static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
 -      &tcm_loop_wwn_version.attr,
 +      &tcm_loop_wwn_attr_version,
        NULL,
  };
  
index 48ca9c204354168f079a1e9e5ceaf2ac4b74a0f5,2850663f51263712c78f1f52fec7c91b2e374679..e6915166443607283f8229eaca83fac38ed68377
@@@ -257,16 -257,17 +257,16 @@@ static void uas_stat_cmplt(struct urb *
        struct uas_cmd_info *cmdinfo;
        unsigned long flags;
        unsigned int idx;
 +      int status = urb->status;
  
        spin_lock_irqsave(&devinfo->lock, flags);
  
        if (devinfo->resetting)
                goto out;
  
 -      if (urb->status) {
 -              if (urb->status != -ENOENT && urb->status != -ECONNRESET) {
 -                      dev_err(&urb->dev->dev, "stat urb: status %d\n",
 -                              urb->status);
 -              }
 +      if (status) {
 +              if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
 +                      dev_err(&urb->dev->dev, "stat urb: status %d\n", status);
                goto out;
        }
  
@@@ -347,7 -348,6 +347,7 @@@ static void uas_data_cmplt(struct urb *
        struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
        struct scsi_data_buffer *sdb = NULL;
        unsigned long flags;
 +      int status = urb->status;
  
        spin_lock_irqsave(&devinfo->lock, flags);
  
                goto out;
        }
  
 -      if (urb->status) {
 -              if (urb->status != -ENOENT && urb->status != -ECONNRESET)
 -                      uas_log_cmd_state(cmnd, "data cmplt err", urb->status);
 +      if (status) {
 +              if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
 +                      uas_log_cmd_state(cmnd, "data cmplt err", status);
                /* error: no data transfered */
                sdb->resid = sdb->length;
        } else {
@@@ -812,7 -812,6 +812,6 @@@ static struct scsi_host_template uas_ho
        .this_id = -1,
        .sg_tablesize = SG_NONE,
        .skip_settle_delay = 1,
-       .use_blk_tags = 1,
  };
  
  #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
@@@ -929,10 -928,6 +928,6 @@@ static int uas_probe(struct usb_interfa
        if (result)
                goto set_alt0;
  
-       result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 2);
-       if (result)
-               goto free_streams;
        usb_set_intfdata(intf, shost);
        result = scsi_add_host(shost, &intf->dev);
        if (result)