return 1;
}
+static void ata_qc_done(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ void (*done)(struct scsi_cmnd *) = qc->scsidone;
+
+ ata_qc_free(qc);
+ done(cmd);
+}
+
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
- ((cdb[2] & 0x20) || need_sense)) {
+ ((cdb[2] & 0x20) || need_sense))
ata_gen_passthru_sense(qc);
- } else {
- if (!need_sense) {
- cmd->result = SAM_STAT_GOOD;
- } else {
- /* TODO: decide which descriptor format to use
- * for 48b LBA devices and call that here
- * instead of the fixed desc, which is only
- * good for smaller LBA (and maybe CHS?)
- * devices.
- */
- ata_gen_ata_sense(qc);
- }
- }
+ else if (need_sense)
+ ata_gen_ata_sense(qc);
+ else
+ cmd->result = SAM_STAT_GOOD;
if (need_sense && !ap->ops->error_handler)
ata_dump_status(ap->print_id, &qc->result_tf);
- qc->scsidone(cmd);
-
- ata_qc_free(qc);
+ ata_qc_done(qc);
}
/**
VPRINTK("ENTER\n");
- /* set scsi removable (RMB) bit per ata bit */
- if (ata_id_removable(args->id))
+ /* set scsi removable (RMB) bit per ata bit, or if the
+ * AHCI port says it's external (Hotplug-capable, eSATA).
+ */
+ if (ata_id_removable(args->id) ||
+ (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
hdr[1] |= (1 << 7);
if (args->dev->class == ATA_DEV_ZAC) {
ata_gen_passthru_sense(qc);
}
- qc->scsidone(qc->scsicmd);
- ata_qc_free(qc);
+ ata_qc_done(qc);
}
/* is it pointless to prefer PIO for "safety reasons"? */
qc->dev->sdev->locked = 0;
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
- qc->scsidone(cmd);
- ata_qc_free(qc);
+ ata_qc_done(qc);
return;
}
cmd->result = SAM_STAT_GOOD;
}
- qc->scsidone(cmd);
- ata_qc_free(qc);
+ ata_qc_done(qc);
}
/**
* atapi_xlat - Initialize PACKET taskfile
case 5: /* PIO Data-out */
return ATA_PROT_PIO;
+ case 12: /* FPDMA */
+ return ATA_PROT_NCQ;
+
case 0: /* Hard Reset */
case 1: /* SRST */
case 8: /* Device Diagnostic */
case 9: /* Device Reset */
case 7: /* DMA Queued */
- case 12: /* FPDMA */
case 15: /* Return Response Info */
default: /* Reserved */
break;
if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
goto invalid_fld;
+ /* enable LBA */
+ tf->flags |= ATA_TFLAG_LBA;
+
/*
* 12 and 16 byte CDBs use different offsets to
* provide the various register values.
tf->command = cdb[9];
}
+ /* For NCQ commands with FPDMA protocol, copy the tag value */
+ if (tf->protocol == ATA_PROT_NCQ)
+ tf->nsect = qc->tag << 3;
+
/* enforce correct master/slave bit */
tf->device = dev->devno ?
tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
*/
shost->max_host_blocked = 1;
- if (scsi_init_shared_tag_map(shost, host->n_tags))
- goto err_add;
-
rc = scsi_add_host_with_dma(ap->scsi_host,
&ap->tdev, ap->host->dev);
if (rc)
return;
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
- if (d->frpl)
- ib_free_fast_reg_page_list(d->frpl);
if (d->mr)
ib_dereg_mr(d->mr);
}
struct srp_fr_pool *pool;
struct srp_fr_desc *d;
struct ib_mr *mr;
- struct ib_fast_reg_page_list *frpl;
int i, ret = -EINVAL;
if (pool_size <= 0)
goto destroy_pool;
}
d->mr = mr;
- frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
- if (IS_ERR(frpl)) {
- ret = PTR_ERR(frpl);
- goto destroy_pool;
- }
- d->frpl = frpl;
list_add_tail(&d->entry, &pool->free_list);
}
for (i = 0; i < target->req_ring_size; ++i) {
req = &ch->req_ring[i];
- if (dev->use_fast_reg)
+ if (dev->use_fast_reg) {
kfree(req->fr_list);
- else
+ } else {
kfree(req->fmr_list);
- kfree(req->map_page);
+ kfree(req->map_page);
+ }
if (req->indirect_dma_addr) {
ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
target->indirect_size,
GFP_KERNEL);
if (!mr_list)
goto out;
- if (srp_dev->use_fast_reg)
+ if (srp_dev->use_fast_reg) {
req->fr_list = mr_list;
- else
+ } else {
req->fmr_list = mr_list;
- req->map_page = kmalloc(srp_dev->max_pages_per_mr *
- sizeof(void *), GFP_KERNEL);
- if (!req->map_page)
- goto out;
+ req->map_page = kmalloc(srp_dev->max_pages_per_mr *
+ sizeof(void *), GFP_KERNEL);
+ if (!req->map_page)
+ goto out;
+ }
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
if (!req->indirect_desc)
goto out;
if (state->fmr.next >= state->fmr.end)
return -ENOMEM;
+ WARN_ON_ONCE(!dev->use_fmr);
+
+ if (state->npages == 0)
+ return 0;
+
+ if (state->npages == 1 && target->global_mr) {
+ srp_map_desc(state, state->base_dma_addr, state->dma_len,
+ target->global_mr->rkey);
+ goto reset_state;
+ }
+
fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
state->npages, io_addr);
if (IS_ERR(fmr))
srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
state->dma_len, fmr->fmr->rkey);
+reset_state:
+ state->npages = 0;
+ state->dma_len = 0;
+
return 0;
}
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_send_wr *bad_wr;
- struct ib_send_wr wr;
+ struct ib_reg_wr wr;
struct srp_fr_desc *desc;
u32 rkey;
+ int n, err;
if (state->fr.next >= state->fr.end)
return -ENOMEM;
+ WARN_ON_ONCE(!dev->use_fast_reg);
+
+ if (state->sg_nents == 0)
+ return 0;
+
+ if (state->sg_nents == 1 && target->global_mr) {
+ srp_map_desc(state, sg_dma_address(state->sg),
+ sg_dma_len(state->sg),
+ target->global_mr->rkey);
+ return 1;
+ }
+
desc = srp_fr_pool_get(ch->fr_pool);
if (!desc)
return -ENOMEM;
rkey = ib_inc_rkey(desc->mr->rkey);
ib_update_fast_reg_key(desc->mr, rkey);
- memcpy(desc->frpl->page_list, state->pages,
- sizeof(state->pages[0]) * state->npages);
-
- memset(&wr, 0, sizeof(wr));
- wr.opcode = IB_WR_FAST_REG_MR;
- wr.wr_id = FAST_REG_WR_ID_MASK;
- wr.wr.fast_reg.iova_start = state->base_dma_addr;
- wr.wr.fast_reg.page_list = desc->frpl;
- wr.wr.fast_reg.page_list_len = state->npages;
- wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
- wr.wr.fast_reg.length = state->dma_len;
- wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE);
- wr.wr.fast_reg.rkey = desc->mr->lkey;
+ n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
+ dev->mr_page_size);
+ if (unlikely(n < 0))
+ return n;
+
+ wr.wr.next = NULL;
+ wr.wr.opcode = IB_WR_REG_MR;
+ wr.wr.wr_id = FAST_REG_WR_ID_MASK;
+ wr.wr.num_sge = 0;
+ wr.wr.send_flags = 0;
+ wr.mr = desc->mr;
+ wr.key = desc->mr->rkey;
+ wr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
*state->fr.next++ = desc;
state->nmdesc++;
- srp_map_desc(state, state->base_dma_addr, state->dma_len,
- desc->mr->rkey);
+ srp_map_desc(state, desc->mr->iova,
+ desc->mr->length, desc->mr->rkey);
- return ib_post_send(ch->qp, &wr, &bad_wr);
-}
+ err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
+ if (unlikely(err))
+ return err;
-static int srp_finish_mapping(struct srp_map_state *state,
- struct srp_rdma_ch *ch)
-{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
- int ret = 0;
-
- WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
-
- if (state->npages == 0)
- return 0;
-
- if (state->npages == 1 && target->global_mr)
- srp_map_desc(state, state->base_dma_addr, state->dma_len,
- target->global_mr->rkey);
- else
- ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
- srp_map_finish_fmr(state, ch);
-
- if (ret == 0) {
- state->npages = 0;
- state->dma_len = 0;
- }
-
- return ret;
+ return n;
}
static int srp_map_sg_entry(struct srp_map_state *state,
while (dma_len) {
unsigned offset = dma_addr & ~dev->mr_page_mask;
if (state->npages == dev->max_pages_per_mr || offset != 0) {
- ret = srp_finish_mapping(state, ch);
+ ret = srp_map_finish_fmr(state, ch);
if (ret)
return ret;
}
*/
ret = 0;
if (len != dev->mr_page_size)
- ret = srp_finish_mapping(state, ch);
+ ret = srp_map_finish_fmr(state, ch);
return ret;
}
-static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
- struct srp_request *req, struct scatterlist *scat,
- int count)
+static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
+ struct srp_request *req, struct scatterlist *scat,
+ int count)
{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
struct scatterlist *sg;
int i, ret;
- state->desc = req->indirect_desc;
- state->pages = req->map_page;
- if (dev->use_fast_reg) {
- state->fr.next = req->fr_list;
- state->fr.end = req->fr_list + target->cmd_sg_cnt;
- } else if (dev->use_fmr) {
- state->fmr.next = req->fmr_list;
- state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
- }
+ state->desc = req->indirect_desc;
+ state->pages = req->map_page;
+ state->fmr.next = req->fmr_list;
+ state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
- if (dev->use_fast_reg || dev->use_fmr) {
- for_each_sg(scat, sg, count, i) {
- ret = srp_map_sg_entry(state, ch, sg, i);
- if (ret)
- goto out;
- }
- ret = srp_finish_mapping(state, ch);
+ for_each_sg(scat, sg, count, i) {
+ ret = srp_map_sg_entry(state, ch, sg, i);
if (ret)
- goto out;
- } else {
- for_each_sg(scat, sg, count, i) {
- srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
- ib_sg_dma_len(dev->dev, sg),
- target->global_mr->rkey);
- }
+ return ret;
}
+ ret = srp_map_finish_fmr(state, ch);
+ if (ret)
+ return ret;
+
req->nmdesc = state->nmdesc;
- ret = 0;
-out:
- return ret;
+ return 0;
+}
+
+static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
+ struct srp_request *req, struct scatterlist *scat,
+ int count)
+{
+ state->desc = req->indirect_desc;
+ state->fr.next = req->fr_list;
+ state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
+ state->sg = scat;
+ state->sg_nents = scsi_sg_count(req->scmnd);
+
+ while (state->sg_nents) {
+ int i, n;
+
+ n = srp_map_finish_fr(state, ch);
+ if (unlikely(n < 0))
+ return n;
+
+ state->sg_nents -= n;
+ for (i = 0; i < n; i++)
+ state->sg = sg_next(state->sg);
+ }
+
+ req->nmdesc = state->nmdesc;
+
+ return 0;
+}
+
+static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
+ struct srp_request *req, struct scatterlist *scat,
+ int count)
+{
+ struct srp_target_port *target = ch->target;
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct scatterlist *sg;
+ int i;
+
+ state->desc = req->indirect_desc;
+ for_each_sg(scat, sg, count, i) {
+ srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
+ ib_sg_dma_len(dev->dev, sg),
+ target->global_mr->rkey);
+ }
+
+ req->nmdesc = state->nmdesc;
+
+ return 0;
}
/*
struct srp_map_state state;
struct srp_direct_buf idb_desc;
u64 idb_pages[1];
+ struct scatterlist idb_sg[1];
int ret;
memset(&state, 0, sizeof(state));
state.gen.next = next_mr;
state.gen.end = end_mr;
state.desc = &idb_desc;
- state.pages = idb_pages;
- state.pages[0] = (req->indirect_dma_addr &
- dev->mr_page_mask);
- state.npages = 1;
state.base_dma_addr = req->indirect_dma_addr;
state.dma_len = idb_len;
- ret = srp_finish_mapping(&state, ch);
- if (ret < 0)
- goto out;
+
+ if (dev->use_fast_reg) {
+ state.sg = idb_sg;
+ state.sg_nents = 1;
+ sg_set_buf(idb_sg, req->indirect_desc, idb_len);
+ idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
+ ret = srp_map_finish_fr(&state, ch);
+ if (ret < 0)
+ return ret;
+ } else if (dev->use_fmr) {
+ state.pages = idb_pages;
+ state.pages[0] = (req->indirect_dma_addr &
+ dev->mr_page_mask);
+ state.npages = 1;
+ ret = srp_map_finish_fmr(&state, ch);
+ if (ret < 0)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
*idb_rkey = idb_desc.key;
-out:
- return ret;
+ return 0;
}
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
- srp_map_sg(&state, ch, req, scat, count);
+ if (dev->use_fast_reg)
+ srp_map_sg_fr(&state, ch, req, scat, count);
+ else if (dev->use_fmr)
+ srp_map_sg_fmr(&state, ch, req, scat, count);
+ else
+ srp_map_sg_dma(&state, ch, req, scat, count);
/* We've mapped the request, now pull as much of the indirect
* descriptor table as we can into the command buffer. If this
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs,
- .use_blk_tags = 1,
.track_queue_depth = 1,
};
if (ret)
goto out;
- ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
- if (ret)
- goto out;
-
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
if (!srp_conn_unique(target->srp_host, target)) {
INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
- ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
+ ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
if (ret)
goto out;
system continues booting, and even probe devices on different
busses in parallel, leading to a significant speed-up.
- If you have built SCSI as modules, enabling this option can
- be a problem as the devices may not have been found by the
- time your system expects them to have been. You can load the
- scsi_wait_scan module to ensure that all scans have completed.
- If you build your SCSI drivers into the kernel, then everything
- will work fine if you say Y here.
-
You can override this choice by specifying "scsi_mod.scan=sync"
or async on the kernel's command line.
source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid"
- source "drivers/scsi/mpt2sas/Kconfig"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/ufs/Kconfig"
sdev = container_of(work, struct scsi_device, ew.work);
+ scsi_dh_release_device(sdev);
+
parent = sdev->sdev_gendev.parent;
spin_lock_irqsave(sdev->host->host_lock, flags);
sdev_vpd_pg_attr(pg83);
sdev_vpd_pg_attr(pg80);
+ static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+ {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!sdev->inquiry)
+ return -EINVAL;
+
+ return memory_read_from_buffer(buf, count, &off, sdev->inquiry,
+ sdev->inquiry_len);
+ }
+
+ static struct bin_attribute dev_attr_inquiry = {
+ .attr = {
+ .name = "inquiry",
+ .mode = S_IRUGO,
+ },
+ .size = 0,
+ .read = show_inquiry,
+ };
+
static ssize_t
show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
char *buf)
return -EINVAL;
sdev->queue_ramp_up_period = msecs_to_jiffies(period);
- return period;
+ return count;
}
static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
static struct bin_attribute *scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
+ &dev_attr_inquiry,
NULL
};
static struct attribute_group scsi_sdev_attr_group = {
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
scsi_dh_remove_device(sdev);
- device_del(dev);
- } else
- put_device(&sdev->sdev_dev);
+ }
/*
* Stop accepting new requests and wait until all queuecommand() and
blk_cleanup_queue(sdev->request_queue);
cancel_work_sync(&sdev->requeue_work);
+ /*
+ * Remove the device after blk_cleanup_queue() has been called such
+ * a possible bdi_register() call with the same name occurs after
+ * blk_cleanup_queue() has called bdi_destroy().
+ */
+ if (sdev->is_visible)
+ device_del(dev);
+ else
+ put_device(&sdev->sdev_dev);
+
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
transport_destroy_device(dev);
void scsi_remove_target(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev->parent);
- struct scsi_target *starget, *last = NULL;
+ struct scsi_target *starget;
unsigned long flags;
- /* remove targets being careful to lookup next entry before
- * deleting the last
- */
+ restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) {
if (starget->state == STARGET_DEL)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
- /* assuming new targets arrive at the end */
kref_get(&starget->reap_ref);
spin_unlock_irqrestore(shost->host_lock, flags);
- if (last)
- scsi_target_reap(last);
- last = starget;
__scsi_remove_target(starget);
- spin_lock_irqsave(shost->host_lock, flags);
+ scsi_target_reap(starget);
+ goto restart;
}
}
spin_unlock_irqrestore(shost->host_lock, flags);
-
- if (last)
- scsi_target_reap(last);
}
EXPORT_SYMBOL(scsi_remove_target);
#include <linux/async.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pr.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
buffer_data[2] &= ~0x05;
buffer_data[2] |= wce << 2 | rcd;
sp = buffer_data[0] & 0x80 ? 1 : 0;
+ buffer_data[0] &= ~0x80;
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
SD_MAX_RETRIES, &data, &sshdr)) {
}
#endif
+static char sd_pr_type(enum pr_type type)
+{
+ switch (type) {
+ case PR_WRITE_EXCLUSIVE:
+ return 0x01;
+ case PR_EXCLUSIVE_ACCESS:
+ return 0x03;
+ case PR_WRITE_EXCLUSIVE_REG_ONLY:
+ return 0x05;
+ case PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ return 0x06;
+ case PR_WRITE_EXCLUSIVE_ALL_REGS:
+ return 0x07;
+ case PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ return 0x08;
+ default:
+ return 0;
+ }
+};
+
+static int sd_pr_command(struct block_device *bdev, u8 sa,
+ u64 key, u64 sa_key, u8 type, u8 flags)
+{
+ struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ struct scsi_sense_hdr sshdr;
+ int result;
+ u8 cmd[16] = { 0, };
+ u8 data[24] = { 0, };
+
+ cmd[0] = PERSISTENT_RESERVE_OUT;
+ cmd[1] = sa;
+ cmd[2] = type;
+ put_unaligned_be32(sizeof(data), &cmd[5]);
+
+ put_unaligned_be64(key, &data[0]);
+ put_unaligned_be64(sa_key, &data[8]);
+ data[20] = flags;
+
+ result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
+ &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+
+ if ((driver_byte(result) & DRIVER_SENSE) &&
+ (scsi_sense_valid(&sshdr))) {
+ sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
+ scsi_print_sense_hdr(sdev, NULL, &sshdr);
+ }
+
+ return result;
+}
+
+static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
+ u32 flags)
+{
+ if (flags & ~PR_FL_IGNORE_KEY)
+ return -EOPNOTSUPP;
+ return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
+ old_key, new_key, 0,
+ (1 << 0) /* APTPL */ |
+ (1 << 2) /* ALL_TG_PT */);
+}
+
+static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
+ u32 flags)
+{
+ if (flags)
+ return -EOPNOTSUPP;
+ return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
+}
+
+static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+{
+ return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
+}
+
+static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
+ enum pr_type type, bool abort)
+{
+ return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
+ sd_pr_type(type), 0);
+}
+
+static int sd_pr_clear(struct block_device *bdev, u64 key)
+{
+ return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
+}
+
+static const struct pr_ops sd_pr_ops = {
+ .pr_register = sd_pr_register,
+ .pr_reserve = sd_pr_reserve,
+ .pr_release = sd_pr_release,
+ .pr_preempt = sd_pr_preempt,
+ .pr_clear = sd_pr_clear,
+};
+
static const struct block_device_operations sd_fops = {
.owner = THIS_MODULE,
.open = sd_open,
.check_events = sd_check_events,
.revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
+ .pr_ops = &sd_pr_ops,
};
/**
ida_remove(&sd_index_ida, sdkp->index);
spin_unlock(&sd_index_lock);
- blk_integrity_unregister(disk);
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_fabric_configfs.h>
#include "tcm_loop.h"
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = tcm_loop_slave_alloc,
.module = THIS_MODULE,
- .use_blk_tags = 1,
.track_queue_depth = 1,
};
/* End items for tcm_loop_port_cit */
-static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type(
- struct se_portal_group *se_tpg,
- char *page)
+static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
+ struct config_item *item, char *page)
{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
tl_se_tpg);
return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
}
-static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
+ struct config_item *item, const char *page, size_t count)
{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
tl_se_tpg);
unsigned long val;
return count;
}
-TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
- &tcm_loop_tpg_attrib_fabric_prot_type.attr,
+ &tcm_loop_tpg_attrib_attr_fabric_prot_type,
NULL,
};
/* End items for tcm_loop_nexus_cit */
-static ssize_t tcm_loop_tpg_show_nexus(
- struct se_portal_group *se_tpg,
- char *page)
+static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
{
+ struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_nexus *tl_nexus;
return ret;
}
-static ssize_t tcm_loop_tpg_store_nexus(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
return count;
}
-TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
-
-static ssize_t tcm_loop_tpg_show_transport_status(
- struct se_portal_group *se_tpg,
- char *page)
+static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
+ char *page)
{
+ struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
const char *status = NULL;
return ret;
}
-static ssize_t tcm_loop_tpg_store_transport_status(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
return -EINVAL;
}
-TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
+CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
- &tcm_loop_tpg_nexus.attr,
- &tcm_loop_tpg_transport_status.attr,
+ &tcm_loop_tpg_attr_nexus,
+ &tcm_loop_tpg_attr_transport_status,
NULL,
};
}
/* Start items for tcm_loop_cit */
-static ssize_t tcm_loop_wwn_show_attr_version(
- struct target_fabric_configfs *tf,
- char *page)
+static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
{
return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
}
-TF_WWN_ATTR_RO(tcm_loop, version);
+CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
- &tcm_loop_wwn_version.attr,
+ &tcm_loop_wwn_attr_version,
NULL,
};
struct uas_cmd_info *cmdinfo;
unsigned long flags;
unsigned int idx;
+ int status = urb->status;
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting)
goto out;
- if (urb->status) {
- if (urb->status != -ENOENT && urb->status != -ECONNRESET) {
- dev_err(&urb->dev->dev, "stat urb: status %d\n",
- urb->status);
- }
+ if (status) {
+ if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
+ dev_err(&urb->dev->dev, "stat urb: status %d\n", status);
goto out;
}
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct scsi_data_buffer *sdb = NULL;
unsigned long flags;
+ int status = urb->status;
spin_lock_irqsave(&devinfo->lock, flags);
goto out;
}
- if (urb->status) {
- if (urb->status != -ENOENT && urb->status != -ECONNRESET)
- uas_log_cmd_state(cmnd, "data cmplt err", urb->status);
+ if (status) {
+ if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
+ uas_log_cmd_state(cmnd, "data cmplt err", status);
/* error: no data transfered */
sdb->resid = sdb->length;
} else {
.this_id = -1,
.sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
- .use_blk_tags = 1,
};
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
if (result)
goto set_alt0;
- result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 2);
- if (result)
- goto free_streams;
-
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)