]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/scsi/lpfc/lpfc_scsi.c
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / lpfc / lpfc_scsi.c
CommitLineData
d85296cf 1/*******************************************************************
dea3101e 2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
67073c69
JS
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
51f4ca3c 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
dea3101e 23#include <linux/pci.h>
5a0e3ad6 24#include <linux/slab.h>
dea3101e 25#include <linux/interrupt.h>
09703660 26#include <linux/export.h>
a90f5684 27#include <linux/delay.h>
e2a0a9d6 28#include <asm/unaligned.h>
128b6f9f 29#include <linux/t10-pi.h>
737d4248 30#include <linux/crc-t10dif.h>
dc50715e 31#include <linux/blk-cgroup.h>
737d4248 32#include <net/checksum.h>
dea3101e
JB
33
34#include <scsi/scsi.h>
35#include <scsi/scsi_device.h>
e2a0a9d6 36#include <scsi/scsi_eh.h>
dea3101e
JB
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_tcq.h>
39#include <scsi/scsi_transport_fc.h>
40
41#include "lpfc_version.h"
da0436e9 42#include "lpfc_hw4.h"
dea3101e
JB
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
da0436e9 45#include "lpfc_sli4.h"
ea2151b4 46#include "lpfc_nl.h"
dea3101e 47#include "lpfc_disc.h"
dea3101e 48#include "lpfc.h"
9a6b09c0 49#include "lpfc_scsi.h"
dea3101e
JB
50#include "lpfc_logmsg.h"
51#include "lpfc_crtn.h"
92d7f7b0 52#include "lpfc_vport.h"
dea3101e
JB
53
54#define LPFC_RESET_WAIT 2
55#define LPFC_ABORT_WAIT 2
56
e2a0a9d6 57static char *dif_op_str[] = {
9a6b09c0
JS
58 "PROT_NORMAL",
59 "PROT_READ_INSERT",
60 "PROT_WRITE_STRIP",
61 "PROT_READ_STRIP",
62 "PROT_WRITE_INSERT",
63 "PROT_READ_PASS",
64 "PROT_WRITE_PASS",
65};
66
f9bb2da1
JS
67struct scsi_dif_tuple {
68 __be16 guard_tag; /* Checksum */
69 __be16 app_tag; /* Opaque storage */
70 __be32 ref_tag; /* Target LBA or indirect LBA */
71};
72
1ba981fd
JS
73static struct lpfc_rport_data *
74lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
75{
76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
77
f38fa0bb 78 if (vport->phba->cfg_fof)
1ba981fd
JS
79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
80 else
81 return (struct lpfc_rport_data *)sdev->hostdata;
82}
83
da0436e9 84static void
c490850a 85lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
1c6f4ef5 86static void
c490850a 87lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
9c6aa9d7
JS
88static int
89lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
dc50715e
GS
90static void
91lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
92 struct lpfc_vmid *vmp);
93static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
94 *cmd, struct lpfc_vmid *vmp,
95 union lpfc_vmid_io_tag *tag);
96static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
97 struct lpfc_vmid *vmid);
e2a0a9d6 98
f1126688
JS
99/**
100 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
101 * @phba: Pointer to HBA object.
102 * @lpfc_cmd: lpfc scsi command object pointer.
103 *
104 * This function is called from the lpfc_prep_task_mgmt_cmd function to
105 * set the last bit in the response sge entry.
106 **/
107static void
108lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
c490850a 109 struct lpfc_io_buf *lpfc_cmd)
f1126688 110{
0794d601 111 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
f1126688
JS
112 if (sgl) {
113 sgl += 1;
114 sgl->word2 = le32_to_cpu(sgl->word2);
115 bf_set(lpfc_sli4_sge_last, sgl, 1);
116 sgl->word2 = cpu_to_le32(sgl->word2);
117 }
118}
119
68a6a66c
JS
120#define LPFC_INVALID_REFTAG ((u32)-1)
121
ea2151b4 122/**
3621a710 123 * lpfc_update_stats - Update statistical data for the command completion
9df0a038 124 * @vport: The virtual port on which this call is executing.
ea2151b4
JS
125 * @lpfc_cmd: lpfc scsi command object pointer.
126 *
127 * This function is called when there is a command completion and this
128 * function updates the statistical data for the command completion.
129 **/
130static void
9df0a038 131lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
ea2151b4 132{
9df0a038 133 struct lpfc_hba *phba = vport->phba;
18027a8c
JS
134 struct lpfc_rport_data *rdata;
135 struct lpfc_nodelist *pnode;
ea2151b4
JS
136 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
137 unsigned long flags;
9df0a038 138 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
ea2151b4
JS
139 unsigned long latency;
140 int i;
141
18027a8c
JS
142 if (!vport->stat_data_enabled ||
143 vport->stat_data_blocked ||
144 (cmd->result))
ea2151b4
JS
145 return;
146
9f1e1b50 147 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
18027a8c
JS
148 rdata = lpfc_cmd->rdata;
149 pnode = rdata->pnode;
9f1e1b50 150
ea2151b4 151 spin_lock_irqsave(shost->host_lock, flags);
18027a8c
JS
152 if (!pnode ||
153 !pnode->lat_data ||
154 (phba->bucket_type == LPFC_NO_BUCKET)) {
ea2151b4
JS
155 spin_unlock_irqrestore(shost->host_lock, flags);
156 return;
157 }
ea2151b4
JS
158
159 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
160 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
161 phba->bucket_step;
9f1e1b50
JS
162 /* check array subscript bounds */
163 if (i < 0)
164 i = 0;
165 else if (i >= LPFC_MAX_BUCKET_COUNT)
166 i = LPFC_MAX_BUCKET_COUNT - 1;
ea2151b4
JS
167 } else {
168 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
169 if (latency <= (phba->bucket_base +
170 ((1<<i)*phba->bucket_step)))
171 break;
172 }
173
174 pnode->lat_data[i].cmd_count++;
175 spin_unlock_irqrestore(shost->host_lock, flags);
176}
177
9bad7671 178/**
3621a710 179 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
9bad7671
JS
180 * @phba: The Hba for which this call is being executed.
181 *
182 * This routine is called when there is resource error in driver or firmware.
183 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
184 * posts at most 1 event each second. This routine wakes up worker thread of
185 * @phba to process WORKER_RAM_DOWN_EVENT event.
186 *
187 * This routine should be called with no lock held.
188 **/
92d7f7b0 189void
eaf15d5b 190lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
92d7f7b0
JS
191{
192 unsigned long flags;
5e9d9b82 193 uint32_t evt_posted;
0d4aec13 194 unsigned long expires;
92d7f7b0
JS
195
196 spin_lock_irqsave(&phba->hbalock, flags);
197 atomic_inc(&phba->num_rsrc_err);
198 phba->last_rsrc_error_time = jiffies;
199
0d4aec13
MS
200 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
201 if (time_after(expires, jiffies)) {
92d7f7b0
JS
202 spin_unlock_irqrestore(&phba->hbalock, flags);
203 return;
204 }
205
206 phba->last_ramp_down_time = jiffies;
207
208 spin_unlock_irqrestore(&phba->hbalock, flags);
209
210 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
211 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
212 if (!evt_posted)
92d7f7b0 213 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
92d7f7b0
JS
214 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
215
5e9d9b82
JS
216 if (!evt_posted)
217 lpfc_worker_wake_up(phba);
92d7f7b0
JS
218 return;
219}
220
9bad7671 221/**
3621a710 222 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
9bad7671
JS
223 * @phba: The Hba for which this call is being executed.
224 *
225 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
226 * thread.This routine reduces queue depth for all scsi device on each vport
227 * associated with @phba.
228 **/
92d7f7b0
JS
229void
230lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
231{
549e55cd
JS
232 struct lpfc_vport **vports;
233 struct Scsi_Host *shost;
92d7f7b0 234 struct scsi_device *sdev;
5ffc266e 235 unsigned long new_queue_depth;
92d7f7b0 236 unsigned long num_rsrc_err, num_cmd_success;
549e55cd 237 int i;
92d7f7b0
JS
238
239 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
240 num_cmd_success = atomic_read(&phba->num_cmd_success);
241
75ad83a4
JS
242 /*
243 * The error and success command counters are global per
244 * driver instance. If another handler has already
245 * operated on this error event, just exit.
246 */
247 if (num_rsrc_err == 0)
248 return;
249
549e55cd
JS
250 vports = lpfc_create_vport_work_array(phba);
251 if (vports != NULL)
21e9a0a5 252 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
253 shost = lpfc_shost_from_vport(vports[i]);
254 shost_for_each_device(sdev, shost) {
92d7f7b0 255 new_queue_depth =
549e55cd
JS
256 sdev->queue_depth * num_rsrc_err /
257 (num_rsrc_err + num_cmd_success);
258 if (!new_queue_depth)
259 new_queue_depth = sdev->queue_depth - 1;
260 else
261 new_queue_depth = sdev->queue_depth -
262 new_queue_depth;
db5ed4df 263 scsi_change_queue_depth(sdev, new_queue_depth);
549e55cd 264 }
92d7f7b0 265 }
09372820 266 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
267 atomic_set(&phba->num_rsrc_err, 0);
268 atomic_set(&phba->num_cmd_success, 0);
269}
270
a8e497d5 271/**
3621a710 272 * lpfc_scsi_dev_block - set all scsi hosts to block state
a8e497d5
JS
273 * @phba: Pointer to HBA context object.
274 *
275 * This function walks vport list and set each SCSI host to block state
276 * by invoking fc_remote_port_delete() routine. This function is invoked
277 * with EEH when device's PCI slot has been permanently disabled.
278 **/
279void
280lpfc_scsi_dev_block(struct lpfc_hba *phba)
281{
282 struct lpfc_vport **vports;
283 struct Scsi_Host *shost;
284 struct scsi_device *sdev;
285 struct fc_rport *rport;
286 int i;
287
288 vports = lpfc_create_vport_work_array(phba);
289 if (vports != NULL)
21e9a0a5 290 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8e497d5
JS
291 shost = lpfc_shost_from_vport(vports[i]);
292 shost_for_each_device(sdev, shost) {
293 rport = starget_to_rport(scsi_target(sdev));
294 fc_remote_port_delete(rport);
295 }
296 }
297 lpfc_destroy_vport_work_array(phba, vports);
298}
299
9bad7671 300/**
3772a991 301 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
9bad7671 302 * @vport: The virtual port for which this call being executed.
eceee00e 303 * @num_to_alloc: The requested number of buffers to allocate.
9bad7671 304 *
3772a991
JS
305 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
306 * the scsi buffer contains all the necessary information needed to initiate
307 * a SCSI I/O. The non-DMAable buffer region contains information to build
308 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
309 * and the initial BPL. In addition to allocating memory, the FCP CMND and
310 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
9bad7671
JS
311 *
312 * Return codes:
3772a991
JS
313 * int - number of scsi buffers that were allocated.
314 * 0 = failure, less than num_to_alloc is a partial failure.
9bad7671 315 **/
3772a991
JS
316static int
317lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
dea3101e 318{
2e0fef85 319 struct lpfc_hba *phba = vport->phba;
c490850a 320 struct lpfc_io_buf *psb;
dea3101e
JB
321 struct ulp_bde64 *bpl;
322 IOCB_t *iocb;
34b02dcd
JS
323 dma_addr_t pdma_phys_fcp_cmd;
324 dma_addr_t pdma_phys_fcp_rsp;
0794d601 325 dma_addr_t pdma_phys_sgl;
604a3e30 326 uint16_t iotag;
96f7077f
JS
327 int bcnt, bpl_size;
328
329 bpl_size = phba->cfg_sg_dma_buf_size -
330 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
331
332 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
333 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
334 num_to_alloc, phba->cfg_sg_dma_buf_size,
335 (int)sizeof(struct fcp_cmnd),
336 (int)sizeof(struct fcp_rsp), bpl_size);
dea3101e 337
3772a991 338 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
c490850a 339 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
3772a991
JS
340 if (!psb)
341 break;
dea3101e 342
3772a991
JS
343 /*
344 * Get memory from the pci pool to map the virt space to pci
345 * bus space for an I/O. The DMA buffer includes space for the
346 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
347 * necessary to support the sg_tablesize.
348 */
771db5c0 349 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
3772a991
JS
350 GFP_KERNEL, &psb->dma_handle);
351 if (!psb->data) {
352 kfree(psb);
353 break;
354 }
355
3772a991
JS
356
357 /* Allocate iotag for psb->cur_iocbq. */
358 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
359 if (iotag == 0) {
771db5c0 360 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
895427bd 361 psb->data, psb->dma_handle);
3772a991
JS
362 kfree(psb);
363 break;
364 }
365 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
366
367 psb->fcp_cmnd = psb->data;
368 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
0794d601 369 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
34b02dcd 370 sizeof(struct fcp_rsp);
dea3101e 371
3772a991 372 /* Initialize local short-hand pointers. */
c490850a 373 bpl = (struct ulp_bde64 *)psb->dma_sgl;
3772a991
JS
374 pdma_phys_fcp_cmd = psb->dma_handle;
375 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
0794d601 376 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
3772a991
JS
377 sizeof(struct fcp_rsp);
378
379 /*
380 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
381 * are sg list bdes. Initialize the first two and leave the
382 * rest for queuecommand.
383 */
384 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
385 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
386 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
387 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
388 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
389
390 /* Setup the physical region for the FCP RSP */
391 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
392 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
393 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
394 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
395 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
396
397 /*
398 * Since the IOCB for the FCP I/O is built into this
399 * lpfc_scsi_buf, initialize it with all known data now.
400 */
401 iocb = &psb->cur_iocbq.iocb;
402 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
403 if ((phba->sli_rev == 3) &&
404 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
405 /* fill in immediate fcp command BDE */
406 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
407 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
408 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
409 unsli3.fcp_ext.icd);
410 iocb->un.fcpi64.bdl.addrHigh = 0;
411 iocb->ulpBdeCount = 0;
412 iocb->ulpLe = 0;
25985edc 413 /* fill in response BDE */
3772a991
JS
414 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
415 BUFF_TYPE_BDE_64;
416 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
417 sizeof(struct fcp_rsp);
418 iocb->unsli3.fcp_ext.rbde.addrLow =
419 putPaddrLow(pdma_phys_fcp_rsp);
420 iocb->unsli3.fcp_ext.rbde.addrHigh =
421 putPaddrHigh(pdma_phys_fcp_rsp);
422 } else {
423 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
424 iocb->un.fcpi64.bdl.bdeSize =
425 (2 * sizeof(struct ulp_bde64));
426 iocb->un.fcpi64.bdl.addrLow =
0794d601 427 putPaddrLow(pdma_phys_sgl);
3772a991 428 iocb->un.fcpi64.bdl.addrHigh =
0794d601 429 putPaddrHigh(pdma_phys_sgl);
3772a991
JS
430 iocb->ulpBdeCount = 1;
431 iocb->ulpLe = 1;
432 }
433 iocb->ulpClass = CLASS3;
434 psb->status = IOSTAT_SUCCESS;
da0436e9 435 /* Put it back into the SCSI buffer list */
eee8877e 436 psb->cur_iocbq.context1 = psb;
c2017260 437 spin_lock_init(&psb->buf_lock);
1c6f4ef5 438 lpfc_release_scsi_buf_s3(phba, psb);
dea3101e 439
34b02dcd 440 }
dea3101e 441
3772a991 442 return bcnt;
dea3101e
JB
443}
444
1151e3ec
JS
445/**
446 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
447 * @vport: pointer to lpfc vport data structure.
448 *
449 * This routine is invoked by the vport cleanup for deletions and the cleanup
450 * for an ndlp on removal.
451 **/
452void
453lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
454{
455 struct lpfc_hba *phba = vport->phba;
c490850a 456 struct lpfc_io_buf *psb, *next_psb;
5e5b511d 457 struct lpfc_sli4_hdw_queue *qp;
1151e3ec 458 unsigned long iflag = 0;
5e5b511d 459 int idx;
1151e3ec 460
f6e84790 461 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
895427bd 462 return;
5e5b511d 463
1151e3ec 464 spin_lock_irqsave(&phba->hbalock, iflag);
5e5b511d
JS
465 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
466 qp = &phba->sli4_hba.hdwq[idx];
467
c00f62e6 468 spin_lock(&qp->abts_io_buf_list_lock);
5e5b511d 469 list_for_each_entry_safe(psb, next_psb,
c00f62e6 470 &qp->lpfc_abts_io_buf_list, list) {
c438d062 471 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
c00f62e6
JS
472 continue;
473
5e5b511d
JS
474 if (psb->rdata && psb->rdata->pnode &&
475 psb->rdata->pnode->vport == vport)
476 psb->rdata = NULL;
477 }
c00f62e6 478 spin_unlock(&qp->abts_io_buf_list_lock);
1151e3ec 479 }
1151e3ec
JS
480 spin_unlock_irqrestore(&phba->hbalock, iflag);
481}
482
da0436e9 483/**
c00f62e6 484 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
da0436e9
JS
485 * @phba: pointer to lpfc hba data structure.
486 * @axri: pointer to the fcp xri abort wcqe structure.
eceee00e 487 * @idx: index into hdwq
da0436e9
JS
488 *
489 * This routine is invoked by the worker thread to process a SLI4 fast-path
c00f62e6 490 * FCP or NVME aborted xri.
da0436e9
JS
491 **/
492void
c00f62e6
JS
493lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
494 struct sli4_wcqe_xri_aborted *axri, int idx)
da0436e9 495{
8e176fe2
JS
496 u16 xri = 0;
497 u16 rxid = 0;
c490850a 498 struct lpfc_io_buf *psb, *next_psb;
5e5b511d 499 struct lpfc_sli4_hdw_queue *qp;
da0436e9 500 unsigned long iflag = 0;
0f65ff68
JS
501 struct lpfc_iocbq *iocbq;
502 int i;
19ca7609
JS
503 struct lpfc_nodelist *ndlp;
504 int rrq_empty = 0;
895427bd 505 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
3e49af93 506 struct scsi_cmnd *cmd;
8e176fe2 507 int offline = 0;
da0436e9 508
895427bd
JS
509 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
510 return;
8e176fe2
JS
511 offline = pci_channel_offline(phba->pcidev);
512 if (!offline) {
513 xri = bf_get(lpfc_wcqe_xa_xri, axri);
514 rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
515 }
5e5b511d 516 qp = &phba->sli4_hba.hdwq[idx];
0f65ff68 517 spin_lock_irqsave(&phba->hbalock, iflag);
c00f62e6 518 spin_lock(&qp->abts_io_buf_list_lock);
da0436e9 519 list_for_each_entry_safe(psb, next_psb,
c00f62e6 520 &qp->lpfc_abts_io_buf_list, list) {
8e176fe2
JS
521 if (offline)
522 xri = psb->cur_iocbq.sli4_xritag;
da0436e9 523 if (psb->cur_iocbq.sli4_xritag == xri) {
c00f62e6 524 list_del_init(&psb->list);
324e1c40 525 psb->flags &= ~LPFC_SBUF_XBUSY;
da0436e9 526 psb->status = IOSTAT_SUCCESS;
c438d062 527 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
c00f62e6
JS
528 qp->abts_nvme_io_bufs--;
529 spin_unlock(&qp->abts_io_buf_list_lock);
530 spin_unlock_irqrestore(&phba->hbalock, iflag);
8e176fe2
JS
531 if (!offline) {
532 lpfc_sli4_nvme_xri_aborted(phba, axri,
533 psb);
534 return;
535 }
536 lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
537 spin_lock_irqsave(&phba->hbalock, iflag);
538 spin_lock(&qp->abts_io_buf_list_lock);
539 continue;
c00f62e6 540 }
c00f62e6
JS
541 qp->abts_scsi_io_bufs--;
542 spin_unlock(&qp->abts_io_buf_list_lock);
543
1151e3ec
JS
544 if (psb->rdata && psb->rdata->pnode)
545 ndlp = psb->rdata->pnode;
546 else
547 ndlp = NULL;
548
19ca7609 549 rrq_empty = list_empty(&phba->active_rrq_list);
0f65ff68 550 spin_unlock_irqrestore(&phba->hbalock, iflag);
8e176fe2 551 if (ndlp && !offline) {
ee0f4fe1
JS
552 lpfc_set_rrq_active(phba, ndlp,
553 psb->cur_iocbq.sli4_lxritag, rxid, 1);
cb69f7de
JS
554 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
555 }
3e49af93 556
8e176fe2 557 if (phba->cfg_fcp_wait_abts_rsp || offline) {
3e49af93
JS
558 spin_lock_irqsave(&psb->buf_lock, iflag);
559 cmd = psb->pCmd;
560 psb->pCmd = NULL;
561 spin_unlock_irqrestore(&psb->buf_lock, iflag);
562
563 /* The sdev is not guaranteed to be valid post
564 * scsi_done upcall.
565 */
566 if (cmd)
567 cmd->scsi_done(cmd);
568
569 /*
570 * We expect there is an abort thread waiting
571 * for command completion wake up the thread.
572 */
573 spin_lock_irqsave(&psb->buf_lock, iflag);
574 psb->cur_iocbq.iocb_flag &=
575 ~LPFC_DRIVER_ABORTED;
576 if (psb->waitq)
577 wake_up(psb->waitq);
578 spin_unlock_irqrestore(&psb->buf_lock, iflag);
579 }
580
da0436e9 581 lpfc_release_scsi_buf_s4(phba, psb);
19ca7609
JS
582 if (rrq_empty)
583 lpfc_worker_wake_up(phba);
8e176fe2
JS
584 if (!offline)
585 return;
586 spin_lock_irqsave(&phba->hbalock, iflag);
587 spin_lock(&qp->abts_io_buf_list_lock);
588 continue;
da0436e9
JS
589 }
590 }
c00f62e6 591 spin_unlock(&qp->abts_io_buf_list_lock);
8e176fe2
JS
592 if (!offline) {
593 for (i = 1; i <= phba->sli.last_iotag; i++) {
594 iocbq = phba->sli.iocbq_lookup[i];
0f65ff68 595
8e176fe2
JS
596 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
597 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
598 continue;
599 if (iocbq->sli4_xritag != xri)
600 continue;
601 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
602 psb->flags &= ~LPFC_SBUF_XBUSY;
603 spin_unlock_irqrestore(&phba->hbalock, iflag);
604 if (!list_empty(&pring->txq))
605 lpfc_worker_wake_up(phba);
606 return;
607 }
0f65ff68
JS
608 }
609 spin_unlock_irqrestore(&phba->hbalock, iflag);
da0436e9
JS
610}
611
3772a991 612/**
19ca7609 613 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
3772a991 614 * @phba: The HBA for which this call is being executed.
eceee00e
LJ
615 * @ndlp: pointer to a node-list data structure.
616 * @cmnd: Pointer to scsi_cmnd data structure.
9bad7671
JS
617 *
618 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
619 * and returns to caller.
620 *
621 * Return codes:
622 * NULL - Error
623 * Pointer to lpfc_scsi_buf - Success
624 **/
c490850a 625static struct lpfc_io_buf *
ace44e48
JS
626lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
627 struct scsi_cmnd *cmnd)
dea3101e 628{
c490850a 629 struct lpfc_io_buf *lpfc_cmd = NULL;
a40fc5f0 630 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
164cecd1 631 unsigned long iflag = 0;
a40fc5f0 632
164cecd1 633 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
c490850a 634 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
a40fc5f0
JS
635 list);
636 if (!lpfc_cmd) {
164cecd1 637 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
638 list_splice(&phba->lpfc_scsi_buf_list_put,
639 &phba->lpfc_scsi_buf_list_get);
640 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
641 list_remove_head(scsi_buf_list_get, lpfc_cmd,
c490850a 642 struct lpfc_io_buf, list);
164cecd1 643 spin_unlock(&phba->scsi_buf_list_put_lock);
1dcb58e5 644 }
164cecd1 645 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
2a5b7d62
JS
646
647 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
648 atomic_inc(&ndlp->cmd_pending);
649 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
650 }
0bd4ca25
JSEC
651 return lpfc_cmd;
652}
19ca7609 653/**
5e5b511d 654 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
19ca7609 655 * @phba: The HBA for which this call is being executed.
eceee00e
LJ
656 * @ndlp: pointer to a node-list data structure.
657 * @cmnd: Pointer to scsi_cmnd data structure.
19ca7609 658 *
5e5b511d 659 * This routine removes a scsi buffer from head of @hdwq io_buf_list
19ca7609
JS
660 * and returns to caller.
661 *
662 * Return codes:
663 * NULL - Error
664 * Pointer to lpfc_scsi_buf - Success
665 **/
c490850a 666static struct lpfc_io_buf *
ace44e48
JS
667lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
668 struct scsi_cmnd *cmnd)
19ca7609 669{
c490850a 670 struct lpfc_io_buf *lpfc_cmd;
5e5b511d 671 struct lpfc_sli4_hdw_queue *qp;
0794d601 672 struct sli4_sge *sgl;
0794d601
JS
673 dma_addr_t pdma_phys_fcp_rsp;
674 dma_addr_t pdma_phys_fcp_cmd;
d79c9e9d 675 uint32_t cpu, idx;
ace44e48 676 int tag;
d79c9e9d 677 struct fcp_cmd_rsp_buf *tmp = NULL;
19ca7609 678
d6d189ce 679 cpu = raw_smp_processor_id();
45aa312e 680 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
4221c8a4 681 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
ace44e48
JS
682 idx = blk_mq_unique_tag_to_hwq(tag);
683 } else {
6a828b0f 684 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
ace44e48 685 }
5e5b511d 686
c490850a
JS
687 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
688 !phba->cfg_xri_rebalancing);
689 if (!lpfc_cmd) {
690 qp = &phba->sli4_hba.hdwq[idx];
5e5b511d 691 qp->empty_io_bufs++;
1151e3ec 692 return NULL;
5e5b511d 693 }
2a5b7d62 694
0794d601
JS
695 /* Setup key fields in buffer that may have been changed
696 * if other protocols used this buffer.
697 */
698 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
699 lpfc_cmd->prot_seg_cnt = 0;
700 lpfc_cmd->seg_cnt = 0;
0794d601
JS
701 lpfc_cmd->timeout = 0;
702 lpfc_cmd->flags = 0;
703 lpfc_cmd->start_time = jiffies;
704 lpfc_cmd->waitq = NULL;
5e5b511d 705 lpfc_cmd->cpu = cpu;
0794d601
JS
706#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
707 lpfc_cmd->prot_data_type = 0;
708#endif
d79c9e9d 709 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
0ab384a4
JS
710 if (!tmp) {
711 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
d79c9e9d 712 return NULL;
0ab384a4 713 }
d79c9e9d
JS
714
715 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
716 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
0794d601
JS
717
718 /*
719 * The first two SGEs are the FCP_CMD and FCP_RSP.
720 * The balance are sg list bdes. Initialize the
721 * first two and leave the rest for queuecommand.
722 */
723 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
d79c9e9d 724 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
0794d601
JS
725 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
726 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
727 sgl->word2 = le32_to_cpu(sgl->word2);
728 bf_set(lpfc_sli4_sge_last, sgl, 0);
729 sgl->word2 = cpu_to_le32(sgl->word2);
730 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
731 sgl++;
732
733 /* Setup the physical region for the FCP RSP */
734 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
735 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
736 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
737 sgl->word2 = le32_to_cpu(sgl->word2);
738 bf_set(lpfc_sli4_sge_last, sgl, 1);
739 sgl->word2 = cpu_to_le32(sgl->word2);
740 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
741
26c724a6 742 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2a5b7d62
JS
743 atomic_inc(&ndlp->cmd_pending);
744 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
745 }
a40fc5f0 746 return lpfc_cmd;
19ca7609
JS
747}
748/**
749 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
750 * @phba: The HBA for which this call is being executed.
eceee00e
LJ
751 * @ndlp: pointer to a node-list data structure.
752 * @cmnd: Pointer to scsi_cmnd data structure.
19ca7609
JS
753 *
754 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
755 * and returns to caller.
756 *
757 * Return codes:
758 * NULL - Error
759 * Pointer to lpfc_scsi_buf - Success
760 **/
c490850a 761static struct lpfc_io_buf*
ace44e48
JS
762lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
763 struct scsi_cmnd *cmnd)
19ca7609 764{
ace44e48 765 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
19ca7609 766}
dea3101e 767
9bad7671 768/**
0bb87e01 769 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
9bad7671
JS
770 * @phba: The Hba for which this call is being executed.
771 * @psb: The scsi buffer which is being released.
772 *
773 * This routine releases @psb scsi buffer by adding it to tail of @phba
774 * lpfc_scsi_buf_list list.
775 **/
0bd4ca25 776static void
c490850a 777lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
0bd4ca25 778{
875fbdfe 779 unsigned long iflag = 0;
dea3101e 780
a40fc5f0 781 psb->seg_cnt = 0;
a40fc5f0
JS
782 psb->prot_seg_cnt = 0;
783
784 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
0bd4ca25 785 psb->pCmd = NULL;
6a485eb9 786 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
a40fc5f0
JS
787 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
788 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
dea3101e
JB
789}
790
da0436e9
JS
791/**
792 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
793 * @phba: The Hba for which this call is being executed.
794 * @psb: The scsi buffer which is being released.
795 *
5e5b511d
JS
796 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
797 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
da0436e9
JS
798 * and cannot be reused for at least RA_TOV amount of time if it was
799 * aborted.
800 **/
801static void
c490850a 802lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
da0436e9 803{
5e5b511d 804 struct lpfc_sli4_hdw_queue *qp;
da0436e9
JS
805 unsigned long iflag = 0;
806
a40fc5f0 807 psb->seg_cnt = 0;
a40fc5f0
JS
808 psb->prot_seg_cnt = 0;
809
1fbf9742 810 qp = psb->hdwq;
324e1c40 811 if (psb->flags & LPFC_SBUF_XBUSY) {
c00f62e6 812 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
3e49af93
JS
813 if (!phba->cfg_fcp_wait_abts_rsp)
814 psb->pCmd = NULL;
c00f62e6 815 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
5e5b511d 816 qp->abts_scsi_io_bufs++;
c00f62e6 817 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
da0436e9 818 } else {
c490850a 819 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
da0436e9
JS
820 }
821}
822
9bad7671 823/**
3772a991
JS
824 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
825 * @phba: The Hba for which this call is being executed.
826 * @psb: The scsi buffer which is being released.
827 *
828 * This routine releases @psb scsi buffer by adding it to tail of @phba
829 * lpfc_scsi_buf_list list.
830 **/
831static void
c490850a 832lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3772a991 833{
2a5b7d62
JS
834 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
835 atomic_dec(&psb->ndlp->cmd_pending);
3772a991 836
2a5b7d62 837 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
3772a991
JS
838 phba->lpfc_release_scsi_buf(phba, psb);
839}
840
da255e2e
JS
841/**
842 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
843 * @data: A pointer to the immediate command data portion of the IOCB.
844 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
845 *
846 * The routine copies the entire FCP command from @fcp_cmnd to @data while
847 * byte swapping the data to big endian format for transmission on the wire.
848 **/
849static void
850lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
851{
852 int i, j;
853
854 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
855 i += sizeof(uint32_t), j++) {
856 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
857 }
858}
859
3772a991
JS
860/**
861 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
9bad7671
JS
862 * @phba: The Hba for which this call is being executed.
863 * @lpfc_cmd: The scsi buffer which is going to be mapped.
864 *
865 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3772a991 866 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
4b160ae8 867 * through sg elements and format the bde. This routine also initializes all
3772a991 868 * IOCB fields which are dependent on scsi command request buffer.
9bad7671
JS
869 *
870 * Return codes:
871 * 1 - Error
872 * 0 - Success
873 **/
dea3101e 874static int
c490850a 875lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
dea3101e
JB
876{
877 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
878 struct scatterlist *sgel = NULL;
879 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
c490850a 880 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
0f65ff68 881 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
dea3101e 882 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
34b02dcd 883 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dea3101e 884 dma_addr_t physaddr;
34b02dcd 885 uint32_t num_bde = 0;
a0b4f78f 886 int nseg, datadir = scsi_cmnd->sc_data_direction;
dea3101e
JB
887
888 /*
889 * There are three possibilities here - use scatter-gather segment, use
890 * the single mapping, or neither. Start the lpfc command prep by
891 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
892 * data bde entry.
893 */
894 bpl += 2;
c59fd9eb 895 if (scsi_sg_count(scsi_cmnd)) {
dea3101e
JB
896 /*
897 * The driver stores the segment count returned from pci_map_sg
898 * because this a count of dma-mappings used to map the use_sg
899 * pages. They are not guaranteed to be the same for those
900 * architectures that implement an IOMMU.
901 */
dea3101e 902
c59fd9eb
FT
903 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
904 scsi_sg_count(scsi_cmnd), datadir);
905 if (unlikely(!nseg))
906 return 1;
907
a0b4f78f 908 lpfc_cmd->seg_cnt = nseg;
dea3101e 909 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
372c187b
DK
910 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
911 "9064 BLKGRD: %s: Too many sg segments"
912 " from dma_map_sg. Config %d, seg_cnt"
913 " %d\n", __func__, phba->cfg_sg_seg_cnt,
914 lpfc_cmd->seg_cnt);
5e0e2318 915 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
96f7077f 916 lpfc_cmd->seg_cnt = 0;
a0b4f78f 917 scsi_dma_unmap(scsi_cmnd);
5e0e2318 918 return 2;
dea3101e
JB
919 }
920
921 /*
922 * The driver established a maximum scatter-gather segment count
923 * during probe that limits the number of sg elements in any
924 * single scsi command. Just run through the seg_cnt and format
925 * the bde's.
34b02dcd
JS
926 * When using SLI-3 the driver will try to fit all the BDEs into
927 * the IOCB. If it can't then the BDEs get added to a BPL as it
928 * does for SLI-2 mode.
dea3101e 929 */
34b02dcd 930 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
dea3101e 931 physaddr = sg_dma_address(sgel);
34b02dcd 932 if (phba->sli_rev == 3 &&
e2a0a9d6 933 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
0f65ff68 934 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
34b02dcd
JS
935 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
936 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
937 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
938 data_bde->addrLow = putPaddrLow(physaddr);
939 data_bde->addrHigh = putPaddrHigh(physaddr);
940 data_bde++;
941 } else {
942 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
943 bpl->tus.f.bdeSize = sg_dma_len(sgel);
944 bpl->tus.w = le32_to_cpu(bpl->tus.w);
945 bpl->addrLow =
946 le32_to_cpu(putPaddrLow(physaddr));
947 bpl->addrHigh =
948 le32_to_cpu(putPaddrHigh(physaddr));
949 bpl++;
950 }
dea3101e 951 }
c59fd9eb 952 }
dea3101e
JB
953
954 /*
955 * Finish initializing those IOCB fields that are dependent on the
34b02dcd
JS
956 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
957 * explicitly reinitialized and for SLI-3 the extended bde count is
958 * explicitly reinitialized since all iocb memory resources are reused.
dea3101e 959 */
e2a0a9d6 960 if (phba->sli_rev == 3 &&
0f65ff68
JS
961 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
962 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
34b02dcd
JS
963 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
964 /*
965 * The extended IOCB format can only fit 3 BDE or a BPL.
966 * This I/O has more than 3 BDE so the 1st data bde will
967 * be a BPL that is filled in here.
968 */
969 physaddr = lpfc_cmd->dma_handle;
970 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
971 data_bde->tus.f.bdeSize = (num_bde *
972 sizeof(struct ulp_bde64));
973 physaddr += (sizeof(struct fcp_cmnd) +
974 sizeof(struct fcp_rsp) +
975 (2 * sizeof(struct ulp_bde64)));
976 data_bde->addrHigh = putPaddrHigh(physaddr);
977 data_bde->addrLow = putPaddrLow(physaddr);
25985edc 978 /* ebde count includes the response bde and data bpl */
34b02dcd
JS
979 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
980 } else {
25985edc 981 /* ebde count includes the response bde and data bdes */
34b02dcd
JS
982 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
983 }
984 } else {
985 iocb_cmd->un.fcpi64.bdl.bdeSize =
986 ((num_bde + 2) * sizeof(struct ulp_bde64));
0f65ff68 987 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
34b02dcd 988 }
09372820 989 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
e2a0a9d6
JS
990
991 /*
992 * Due to difference in data length between DIF/non-DIF paths,
993 * we need to set word 4 of IOCB here
994 */
a257bf90 995 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
da255e2e 996 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
e2a0a9d6
JS
997 return 0;
998}
999
f9bb2da1 1000#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 1001
4b160ae8 1002/* Return BG_ERR_INIT if error injection is detected by Initiator */
9a6b09c0 1003#define BG_ERR_INIT 0x1
4b160ae8 1004/* Return BG_ERR_TGT if error injection is detected by Target */
9a6b09c0 1005#define BG_ERR_TGT 0x2
4b160ae8 1006/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
9a6b09c0 1007#define BG_ERR_SWAP 0x10
0bb87e01 1008/*
4b160ae8
MG
1009 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1010 * error injection
0bb87e01 1011 */
9a6b09c0 1012#define BG_ERR_CHECK 0x20
acd6859b
JS
1013
1014/**
1015 * lpfc_bg_err_inject - Determine if we should inject an error
1016 * @phba: The Hba for which this call is being executed.
f9bb2da1
JS
1017 * @sc: The SCSI command to examine
1018 * @reftag: (out) BlockGuard reference tag for transmitted data
1019 * @apptag: (out) BlockGuard application tag for transmitted data
eceee00e 1020 * @new_guard: (in) Value to replace CRC with if needed
f9bb2da1 1021 *
9a6b09c0 1022 * Returns BG_ERR_* bit mask or 0 if request ignored
acd6859b 1023 **/
f9bb2da1
JS
1024static int
1025lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1026 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1027{
1028 struct scatterlist *sgpe; /* s/g prot entry */
c490850a 1029 struct lpfc_io_buf *lpfc_cmd = NULL;
acd6859b 1030 struct scsi_dif_tuple *src = NULL;
4ac9b226
JS
1031 struct lpfc_nodelist *ndlp;
1032 struct lpfc_rport_data *rdata;
f9bb2da1
JS
1033 uint32_t op = scsi_get_prot_op(sc);
1034 uint32_t blksize;
1035 uint32_t numblks;
68a6a66c 1036 u32 lba;
f9bb2da1 1037 int rc = 0;
acd6859b 1038 int blockoff = 0;
f9bb2da1
JS
1039
1040 if (op == SCSI_PROT_NORMAL)
1041 return 0;
1042
acd6859b 1043 sgpe = scsi_prot_sglist(sc);
125c12f7 1044 lba = scsi_prot_ref_tag(sc);
68a6a66c
JS
1045 if (lba == LPFC_INVALID_REFTAG)
1046 return 0;
4ac9b226
JS
1047
1048 /* First check if we need to match the LBA */
f9bb2da1 1049 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
125c12f7 1050 blksize = scsi_prot_interval(sc);
f9bb2da1
JS
1051 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1052
1053 /* Make sure we have the right LBA if one is specified */
68a6a66c
JS
1054 if (phba->lpfc_injerr_lba < (u64)lba ||
1055 (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
f9bb2da1 1056 return 0;
acd6859b 1057 if (sgpe) {
68a6a66c 1058 blockoff = phba->lpfc_injerr_lba - (u64)lba;
acd6859b
JS
1059 numblks = sg_dma_len(sgpe) /
1060 sizeof(struct scsi_dif_tuple);
1061 if (numblks < blockoff)
1062 blockoff = numblks;
acd6859b 1063 }
f9bb2da1
JS
1064 }
1065
4ac9b226 1066 /* Next check if we need to match the remote NPortID or WWPN */
1ba981fd 1067 rdata = lpfc_rport_data_from_scsi_device(sc->device);
4ac9b226
JS
1068 if (rdata && rdata->pnode) {
1069 ndlp = rdata->pnode;
1070
1071 /* Make sure we have the right NPortID if one is specified */
1072 if (phba->lpfc_injerr_nportid &&
1073 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1074 return 0;
1075
1076 /*
1077 * Make sure we have the right WWPN if one is specified.
1078 * wwn[0] should be a non-zero NAA in a good WWPN.
1079 */
1080 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1081 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1082 sizeof(struct lpfc_name)) != 0))
1083 return 0;
1084 }
1085
1086 /* Setup a ptr to the protection data if the SCSI host provides it */
1087 if (sgpe) {
1088 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1089 src += blockoff;
c490850a 1090 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
4ac9b226
JS
1091 }
1092
f9bb2da1
JS
1093 /* Should we change the Reference Tag */
1094 if (reftag) {
acd6859b
JS
1095 if (phba->lpfc_injerr_wref_cnt) {
1096 switch (op) {
1097 case SCSI_PROT_WRITE_PASS:
9a6b09c0
JS
1098 if (src) {
1099 /*
1100 * For WRITE_PASS, force the error
1101 * to be sent on the wire. It should
1102 * be detected by the Target.
1103 * If blockoff != 0 error will be
1104 * inserted in middle of the IO.
1105 */
acd6859b 1106
372c187b
DK
1107 lpfc_printf_log(phba, KERN_ERR,
1108 LOG_TRACE_EVENT,
acd6859b
JS
1109 "9076 BLKGRD: Injecting reftag error: "
1110 "write lba x%lx + x%x oldrefTag x%x\n",
1111 (unsigned long)lba, blockoff,
9a6b09c0 1112 be32_to_cpu(src->ref_tag));
f9bb2da1 1113
acd6859b 1114 /*
9a6b09c0
JS
1115 * Save the old ref_tag so we can
1116 * restore it on completion.
acd6859b 1117 */
9a6b09c0
JS
1118 if (lpfc_cmd) {
1119 lpfc_cmd->prot_data_type =
1120 LPFC_INJERR_REFTAG;
1121 lpfc_cmd->prot_data_segment =
1122 src;
1123 lpfc_cmd->prot_data =
1124 src->ref_tag;
1125 }
1126 src->ref_tag = cpu_to_be32(0xDEADBEEF);
acd6859b 1127 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1128 if (phba->lpfc_injerr_wref_cnt == 0) {
1129 phba->lpfc_injerr_nportid = 0;
1130 phba->lpfc_injerr_lba =
1131 LPFC_INJERR_LBA_OFF;
1132 memset(&phba->lpfc_injerr_wwpn,
1133 0, sizeof(struct lpfc_name));
1134 }
9a6b09c0
JS
1135 rc = BG_ERR_TGT | BG_ERR_CHECK;
1136
acd6859b
JS
1137 break;
1138 }
df561f66 1139 fallthrough;
9a6b09c0 1140 case SCSI_PROT_WRITE_INSERT:
acd6859b 1141 /*
9a6b09c0
JS
1142 * For WRITE_INSERT, force the error
1143 * to be sent on the wire. It should be
1144 * detected by the Target.
acd6859b 1145 */
9a6b09c0 1146 /* DEADBEEF will be the reftag on the wire */
acd6859b
JS
1147 *reftag = 0xDEADBEEF;
1148 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1149 if (phba->lpfc_injerr_wref_cnt == 0) {
1150 phba->lpfc_injerr_nportid = 0;
1151 phba->lpfc_injerr_lba =
1152 LPFC_INJERR_LBA_OFF;
1153 memset(&phba->lpfc_injerr_wwpn,
1154 0, sizeof(struct lpfc_name));
1155 }
9a6b09c0 1156 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b 1157
372c187b 1158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9a6b09c0 1159 "9078 BLKGRD: Injecting reftag error: "
acd6859b
JS
1160 "write lba x%lx\n", (unsigned long)lba);
1161 break;
9a6b09c0 1162 case SCSI_PROT_WRITE_STRIP:
acd6859b 1163 /*
9a6b09c0
JS
1164 * For WRITE_STRIP and WRITE_PASS,
1165 * force the error on data
1166 * being copied from SLI-Host to SLI-Port.
acd6859b 1167 */
f9bb2da1
JS
1168 *reftag = 0xDEADBEEF;
1169 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1170 if (phba->lpfc_injerr_wref_cnt == 0) {
1171 phba->lpfc_injerr_nportid = 0;
1172 phba->lpfc_injerr_lba =
1173 LPFC_INJERR_LBA_OFF;
1174 memset(&phba->lpfc_injerr_wwpn,
1175 0, sizeof(struct lpfc_name));
1176 }
9a6b09c0 1177 rc = BG_ERR_INIT;
f9bb2da1 1178
372c187b 1179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9a6b09c0 1180 "9077 BLKGRD: Injecting reftag error: "
f9bb2da1 1181 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1182 break;
f9bb2da1 1183 }
acd6859b
JS
1184 }
1185 if (phba->lpfc_injerr_rref_cnt) {
1186 switch (op) {
1187 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1188 case SCSI_PROT_READ_STRIP:
1189 case SCSI_PROT_READ_PASS:
1190 /*
1191 * For READ_STRIP and READ_PASS, force the
1192 * error on data being read off the wire. It
1193 * should force an IO error to the driver.
1194 */
f9bb2da1
JS
1195 *reftag = 0xDEADBEEF;
1196 phba->lpfc_injerr_rref_cnt--;
4ac9b226
JS
1197 if (phba->lpfc_injerr_rref_cnt == 0) {
1198 phba->lpfc_injerr_nportid = 0;
1199 phba->lpfc_injerr_lba =
1200 LPFC_INJERR_LBA_OFF;
1201 memset(&phba->lpfc_injerr_wwpn,
1202 0, sizeof(struct lpfc_name));
1203 }
acd6859b 1204 rc = BG_ERR_INIT;
f9bb2da1 1205
372c187b 1206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
acd6859b 1207 "9079 BLKGRD: Injecting reftag error: "
f9bb2da1 1208 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1209 break;
f9bb2da1
JS
1210 }
1211 }
1212 }
1213
1214 /* Should we change the Application Tag */
1215 if (apptag) {
acd6859b
JS
1216 if (phba->lpfc_injerr_wapp_cnt) {
1217 switch (op) {
1218 case SCSI_PROT_WRITE_PASS:
4ac9b226 1219 if (src) {
9a6b09c0
JS
1220 /*
1221 * For WRITE_PASS, force the error
1222 * to be sent on the wire. It should
1223 * be detected by the Target.
1224 * If blockoff != 0 error will be
1225 * inserted in middle of the IO.
1226 */
1227
372c187b
DK
1228 lpfc_printf_log(phba, KERN_ERR,
1229 LOG_TRACE_EVENT,
acd6859b
JS
1230 "9080 BLKGRD: Injecting apptag error: "
1231 "write lba x%lx + x%x oldappTag x%x\n",
1232 (unsigned long)lba, blockoff,
9a6b09c0 1233 be16_to_cpu(src->app_tag));
acd6859b
JS
1234
1235 /*
9a6b09c0
JS
1236 * Save the old app_tag so we can
1237 * restore it on completion.
acd6859b 1238 */
9a6b09c0
JS
1239 if (lpfc_cmd) {
1240 lpfc_cmd->prot_data_type =
1241 LPFC_INJERR_APPTAG;
1242 lpfc_cmd->prot_data_segment =
1243 src;
1244 lpfc_cmd->prot_data =
1245 src->app_tag;
1246 }
1247 src->app_tag = cpu_to_be16(0xDEAD);
acd6859b 1248 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1249 if (phba->lpfc_injerr_wapp_cnt == 0) {
1250 phba->lpfc_injerr_nportid = 0;
1251 phba->lpfc_injerr_lba =
1252 LPFC_INJERR_LBA_OFF;
1253 memset(&phba->lpfc_injerr_wwpn,
1254 0, sizeof(struct lpfc_name));
1255 }
9a6b09c0 1256 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b
JS
1257 break;
1258 }
df561f66 1259 fallthrough;
9a6b09c0 1260 case SCSI_PROT_WRITE_INSERT:
acd6859b 1261 /*
9a6b09c0
JS
1262 * For WRITE_INSERT, force the
1263 * error to be sent on the wire. It should be
1264 * detected by the Target.
acd6859b 1265 */
9a6b09c0 1266 /* DEAD will be the apptag on the wire */
acd6859b
JS
1267 *apptag = 0xDEAD;
1268 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1269 if (phba->lpfc_injerr_wapp_cnt == 0) {
1270 phba->lpfc_injerr_nportid = 0;
1271 phba->lpfc_injerr_lba =
1272 LPFC_INJERR_LBA_OFF;
1273 memset(&phba->lpfc_injerr_wwpn,
1274 0, sizeof(struct lpfc_name));
1275 }
9a6b09c0 1276 rc = BG_ERR_TGT | BG_ERR_CHECK;
f9bb2da1 1277
372c187b 1278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9a6b09c0 1279 "0813 BLKGRD: Injecting apptag error: "
acd6859b
JS
1280 "write lba x%lx\n", (unsigned long)lba);
1281 break;
9a6b09c0 1282 case SCSI_PROT_WRITE_STRIP:
acd6859b 1283 /*
9a6b09c0
JS
1284 * For WRITE_STRIP and WRITE_PASS,
1285 * force the error on data
1286 * being copied from SLI-Host to SLI-Port.
acd6859b 1287 */
f9bb2da1
JS
1288 *apptag = 0xDEAD;
1289 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1290 if (phba->lpfc_injerr_wapp_cnt == 0) {
1291 phba->lpfc_injerr_nportid = 0;
1292 phba->lpfc_injerr_lba =
1293 LPFC_INJERR_LBA_OFF;
1294 memset(&phba->lpfc_injerr_wwpn,
1295 0, sizeof(struct lpfc_name));
1296 }
9a6b09c0 1297 rc = BG_ERR_INIT;
f9bb2da1 1298
372c187b 1299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9a6b09c0 1300 "0812 BLKGRD: Injecting apptag error: "
f9bb2da1 1301 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1302 break;
f9bb2da1 1303 }
acd6859b
JS
1304 }
1305 if (phba->lpfc_injerr_rapp_cnt) {
1306 switch (op) {
1307 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1308 case SCSI_PROT_READ_STRIP:
1309 case SCSI_PROT_READ_PASS:
1310 /*
1311 * For READ_STRIP and READ_PASS, force the
1312 * error on data being read off the wire. It
1313 * should force an IO error to the driver.
1314 */
f9bb2da1
JS
1315 *apptag = 0xDEAD;
1316 phba->lpfc_injerr_rapp_cnt--;
4ac9b226
JS
1317 if (phba->lpfc_injerr_rapp_cnt == 0) {
1318 phba->lpfc_injerr_nportid = 0;
1319 phba->lpfc_injerr_lba =
1320 LPFC_INJERR_LBA_OFF;
1321 memset(&phba->lpfc_injerr_wwpn,
1322 0, sizeof(struct lpfc_name));
1323 }
acd6859b 1324 rc = BG_ERR_INIT;
f9bb2da1 1325
372c187b 1326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
acd6859b 1327 "0814 BLKGRD: Injecting apptag error: "
f9bb2da1 1328 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1329 break;
f9bb2da1
JS
1330 }
1331 }
1332 }
1333
acd6859b 1334
f9bb2da1 1335 /* Should we change the Guard Tag */
acd6859b
JS
1336 if (new_guard) {
1337 if (phba->lpfc_injerr_wgrd_cnt) {
1338 switch (op) {
1339 case SCSI_PROT_WRITE_PASS:
9a6b09c0 1340 rc = BG_ERR_CHECK;
df561f66 1341 fallthrough;
9a6b09c0
JS
1342
1343 case SCSI_PROT_WRITE_INSERT:
acd6859b 1344 /*
9a6b09c0
JS
1345 * For WRITE_INSERT, force the
1346 * error to be sent on the wire. It should be
1347 * detected by the Target.
acd6859b
JS
1348 */
1349 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1350 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1351 phba->lpfc_injerr_nportid = 0;
1352 phba->lpfc_injerr_lba =
1353 LPFC_INJERR_LBA_OFF;
1354 memset(&phba->lpfc_injerr_wwpn,
1355 0, sizeof(struct lpfc_name));
1356 }
f9bb2da1 1357
9a6b09c0 1358 rc |= BG_ERR_TGT | BG_ERR_SWAP;
acd6859b 1359 /* Signals the caller to swap CRC->CSUM */
f9bb2da1 1360
372c187b 1361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9a6b09c0 1362 "0817 BLKGRD: Injecting guard error: "
acd6859b
JS
1363 "write lba x%lx\n", (unsigned long)lba);
1364 break;
9a6b09c0 1365 case SCSI_PROT_WRITE_STRIP:
acd6859b 1366 /*
9a6b09c0
JS
1367 * For WRITE_STRIP and WRITE_PASS,
1368 * force the error on data
1369 * being copied from SLI-Host to SLI-Port.
acd6859b
JS
1370 */
1371 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1372 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1373 phba->lpfc_injerr_nportid = 0;
1374 phba->lpfc_injerr_lba =
1375 LPFC_INJERR_LBA_OFF;
1376 memset(&phba->lpfc_injerr_wwpn,
1377 0, sizeof(struct lpfc_name));
1378 }
f9bb2da1 1379
9a6b09c0 1380 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1381 /* Signals the caller to swap CRC->CSUM */
1382
372c187b 1383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9a6b09c0 1384 "0816 BLKGRD: Injecting guard error: "
acd6859b
JS
1385 "write lba x%lx\n", (unsigned long)lba);
1386 break;
1387 }
1388 }
1389 if (phba->lpfc_injerr_rgrd_cnt) {
1390 switch (op) {
1391 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1392 case SCSI_PROT_READ_STRIP:
1393 case SCSI_PROT_READ_PASS:
1394 /*
1395 * For READ_STRIP and READ_PASS, force the
1396 * error on data being read off the wire. It
1397 * should force an IO error to the driver.
1398 */
acd6859b 1399 phba->lpfc_injerr_rgrd_cnt--;
4ac9b226
JS
1400 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1401 phba->lpfc_injerr_nportid = 0;
1402 phba->lpfc_injerr_lba =
1403 LPFC_INJERR_LBA_OFF;
1404 memset(&phba->lpfc_injerr_wwpn,
1405 0, sizeof(struct lpfc_name));
1406 }
acd6859b 1407
9a6b09c0 1408 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1409 /* Signals the caller to swap CRC->CSUM */
1410
372c187b 1411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
acd6859b
JS
1412 "0818 BLKGRD: Injecting guard error: "
1413 "read lba x%lx\n", (unsigned long)lba);
1414 }
f9bb2da1
JS
1415 }
1416 }
acd6859b 1417
f9bb2da1
JS
1418 return rc;
1419}
1420#endif
1421
acd6859b
JS
1422/**
1423 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1424 * the specified SCSI command.
1425 * @phba: The Hba for which this call is being executed.
6c8eea54 1426 * @sc: The SCSI command to examine
eceee00e
LJ
1427 * @txop: (out) BlockGuard operation for transmitted data
1428 * @rxop: (out) BlockGuard operation for received data
6c8eea54
JS
1429 *
1430 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1431 *
acd6859b 1432 **/
e2a0a9d6 1433static int
6c8eea54
JS
1434lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1435 uint8_t *txop, uint8_t *rxop)
e2a0a9d6 1436{
6c8eea54 1437 uint8_t ret = 0;
e2a0a9d6 1438
125c12f7 1439 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
e2a0a9d6
JS
1440 switch (scsi_get_prot_op(sc)) {
1441 case SCSI_PROT_READ_INSERT:
1442 case SCSI_PROT_WRITE_STRIP:
6c8eea54 1443 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1444 *txop = BG_OP_IN_CSUM_OUT_NODIF;
e2a0a9d6
JS
1445 break;
1446
1447 case SCSI_PROT_READ_STRIP:
1448 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1449 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1450 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1451 break;
1452
c6af4042
MP
1453 case SCSI_PROT_READ_PASS:
1454 case SCSI_PROT_WRITE_PASS:
6c8eea54 1455 *rxop = BG_OP_IN_CRC_OUT_CSUM;
4ac9b226 1456 *txop = BG_OP_IN_CSUM_OUT_CRC;
e2a0a9d6
JS
1457 break;
1458
e2a0a9d6
JS
1459 case SCSI_PROT_NORMAL:
1460 default:
372c187b 1461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7c56b9fd
JS
1462 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1463 scsi_get_prot_op(sc));
6c8eea54 1464 ret = 1;
e2a0a9d6
JS
1465 break;
1466
1467 }
7c56b9fd 1468 } else {
e2a0a9d6
JS
1469 switch (scsi_get_prot_op(sc)) {
1470 case SCSI_PROT_READ_STRIP:
1471 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1472 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1473 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1474 break;
1475
1476 case SCSI_PROT_READ_PASS:
1477 case SCSI_PROT_WRITE_PASS:
6c8eea54 1478 *rxop = BG_OP_IN_CRC_OUT_CRC;
4ac9b226 1479 *txop = BG_OP_IN_CRC_OUT_CRC;
e2a0a9d6
JS
1480 break;
1481
e2a0a9d6
JS
1482 case SCSI_PROT_READ_INSERT:
1483 case SCSI_PROT_WRITE_STRIP:
7c56b9fd 1484 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1485 *txop = BG_OP_IN_CRC_OUT_NODIF;
7c56b9fd
JS
1486 break;
1487
e2a0a9d6
JS
1488 case SCSI_PROT_NORMAL:
1489 default:
372c187b 1490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7c56b9fd
JS
1491 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1492 scsi_get_prot_op(sc));
6c8eea54 1493 ret = 1;
e2a0a9d6
JS
1494 break;
1495 }
e2a0a9d6
JS
1496 }
1497
6c8eea54 1498 return ret;
e2a0a9d6
JS
1499}
1500
acd6859b
JS
1501#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1502/**
1503 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1504 * the specified SCSI command in order to force a guard tag error.
1505 * @phba: The Hba for which this call is being executed.
1506 * @sc: The SCSI command to examine
eceee00e
LJ
1507 * @txop: (out) BlockGuard operation for transmitted data
1508 * @rxop: (out) BlockGuard operation for received data
acd6859b
JS
1509 *
1510 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1511 *
1512 **/
1513static int
1514lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1515 uint8_t *txop, uint8_t *rxop)
1516{
acd6859b 1517
125c12f7 1518 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
acd6859b
JS
1519 switch (scsi_get_prot_op(sc)) {
1520 case SCSI_PROT_READ_INSERT:
1521 case SCSI_PROT_WRITE_STRIP:
acd6859b 1522 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1523 *txop = BG_OP_IN_CRC_OUT_NODIF;
acd6859b
JS
1524 break;
1525
1526 case SCSI_PROT_READ_STRIP:
1527 case SCSI_PROT_WRITE_INSERT:
acd6859b 1528 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1529 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1530 break;
1531
1532 case SCSI_PROT_READ_PASS:
1533 case SCSI_PROT_WRITE_PASS:
4ac9b226 1534 *rxop = BG_OP_IN_CSUM_OUT_CRC;
9a6b09c0 1535 *txop = BG_OP_IN_CRC_OUT_CSUM;
acd6859b
JS
1536 break;
1537
1538 case SCSI_PROT_NORMAL:
1539 default:
1540 break;
1541
1542 }
1543 } else {
1544 switch (scsi_get_prot_op(sc)) {
1545 case SCSI_PROT_READ_STRIP:
1546 case SCSI_PROT_WRITE_INSERT:
acd6859b 1547 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1548 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1549 break;
1550
1551 case SCSI_PROT_READ_PASS:
1552 case SCSI_PROT_WRITE_PASS:
4ac9b226 1553 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
9a6b09c0 1554 *txop = BG_OP_IN_CSUM_OUT_CSUM;
acd6859b
JS
1555 break;
1556
1557 case SCSI_PROT_READ_INSERT:
1558 case SCSI_PROT_WRITE_STRIP:
acd6859b 1559 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1560 *txop = BG_OP_IN_CSUM_OUT_NODIF;
acd6859b
JS
1561 break;
1562
1563 case SCSI_PROT_NORMAL:
1564 default:
1565 break;
1566 }
1567 }
1568
5d1e1510 1569 return 0;
acd6859b
JS
1570}
1571#endif
1572
1573/**
1574 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1575 * @phba: The Hba for which this call is being executed.
1576 * @sc: pointer to scsi command we're working on
1577 * @bpl: pointer to buffer list for protection groups
eceee00e 1578 * @datasegcnt: number of segments of data that have been dma mapped
acd6859b
JS
1579 *
1580 * This function sets up BPL buffer list for protection groups of
e2a0a9d6
JS
1581 * type LPFC_PG_TYPE_NO_DIF
1582 *
1583 * This is usually used when the HBA is instructed to generate
1584 * DIFs and insert them into data stream (or strip DIF from
1585 * incoming data stream)
1586 *
1587 * The buffer list consists of just one protection group described
1588 * below:
1589 * +-------------------------+
6c8eea54
JS
1590 * start of prot group --> | PDE_5 |
1591 * +-------------------------+
1592 * | PDE_6 |
e2a0a9d6
JS
1593 * +-------------------------+
1594 * | Data BDE |
1595 * +-------------------------+
1596 * |more Data BDE's ... (opt)|
1597 * +-------------------------+
1598 *
e2a0a9d6
JS
1599 *
1600 * Note: Data s/g buffers have been dma mapped
acd6859b
JS
1601 *
1602 * Returns the number of BDEs added to the BPL.
1603 **/
e2a0a9d6
JS
1604static int
1605lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1606 struct ulp_bde64 *bpl, int datasegcnt)
1607{
1608 struct scatterlist *sgde = NULL; /* s/g data entry */
6c8eea54
JS
1609 struct lpfc_pde5 *pde5 = NULL;
1610 struct lpfc_pde6 *pde6 = NULL;
e2a0a9d6 1611 dma_addr_t physaddr;
6c8eea54 1612 int i = 0, num_bde = 0, status;
e2a0a9d6 1613 int datadir = sc->sc_data_direction;
0829a19a 1614#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 1615 uint32_t rc;
0829a19a 1616#endif
acd6859b 1617 uint32_t checking = 1;
e2a0a9d6 1618 uint32_t reftag;
6c8eea54 1619 uint8_t txop, rxop;
e2a0a9d6 1620
6c8eea54
JS
1621 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1622 if (status)
e2a0a9d6
JS
1623 goto out;
1624
6c8eea54 1625 /* extract some info from the scsi command for pde*/
125c12f7 1626 reftag = scsi_prot_ref_tag(sc);
68a6a66c
JS
1627 if (reftag == LPFC_INVALID_REFTAG)
1628 goto out;
e2a0a9d6 1629
f9bb2da1 1630#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 1631 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 1632 if (rc) {
9a6b09c0 1633 if (rc & BG_ERR_SWAP)
acd6859b 1634 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 1635 if (rc & BG_ERR_CHECK)
acd6859b
JS
1636 checking = 0;
1637 }
f9bb2da1
JS
1638#endif
1639
6c8eea54
JS
1640 /* setup PDE5 with what we have */
1641 pde5 = (struct lpfc_pde5 *) bpl;
1642 memset(pde5, 0, sizeof(struct lpfc_pde5));
1643 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
6c8eea54 1644
bc73905a 1645 /* Endianness conversion if necessary for PDE5 */
589a52d6 1646 pde5->word0 = cpu_to_le32(pde5->word0);
7c56b9fd 1647 pde5->reftag = cpu_to_le32(reftag);
589a52d6 1648
6c8eea54
JS
1649 /* advance bpl and increment bde count */
1650 num_bde++;
1651 bpl++;
1652 pde6 = (struct lpfc_pde6 *) bpl;
1653
1654 /* setup PDE6 with the rest of the info */
1655 memset(pde6, 0, sizeof(struct lpfc_pde6));
1656 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1657 bf_set(pde6_optx, pde6, txop);
1658 bf_set(pde6_oprx, pde6, rxop);
a6887e28
JS
1659
1660 /*
1661 * We only need to check the data on READs, for WRITEs
1662 * protection data is automatically generated, not checked.
1663 */
6c8eea54 1664 if (datadir == DMA_FROM_DEVICE) {
125c12f7 1665 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
a6887e28
JS
1666 bf_set(pde6_ce, pde6, checking);
1667 else
1668 bf_set(pde6_ce, pde6, 0);
1669
125c12f7 1670 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
a6887e28
JS
1671 bf_set(pde6_re, pde6, checking);
1672 else
1673 bf_set(pde6_re, pde6, 0);
6c8eea54
JS
1674 }
1675 bf_set(pde6_ai, pde6, 1);
7c56b9fd
JS
1676 bf_set(pde6_ae, pde6, 0);
1677 bf_set(pde6_apptagval, pde6, 0);
e2a0a9d6 1678
bc73905a 1679 /* Endianness conversion if necessary for PDE6 */
589a52d6
JS
1680 pde6->word0 = cpu_to_le32(pde6->word0);
1681 pde6->word1 = cpu_to_le32(pde6->word1);
1682 pde6->word2 = cpu_to_le32(pde6->word2);
1683
6c8eea54 1684 /* advance bpl and increment bde count */
e2a0a9d6
JS
1685 num_bde++;
1686 bpl++;
1687
1688 /* assumption: caller has already run dma_map_sg on command data */
1689 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1690 physaddr = sg_dma_address(sgde);
1691 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1692 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1693 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1694 if (datadir == DMA_TO_DEVICE)
1695 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1696 else
1697 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1698 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1699 bpl++;
1700 num_bde++;
1701 }
1702
1703out:
1704 return num_bde;
1705}
1706
acd6859b
JS
1707/**
1708 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1709 * @phba: The Hba for which this call is being executed.
1710 * @sc: pointer to scsi command we're working on
1711 * @bpl: pointer to buffer list for protection groups
1712 * @datacnt: number of segments of data that have been dma mapped
1713 * @protcnt: number of segment of protection data that have been dma mapped
1714 *
1715 * This function sets up BPL buffer list for protection groups of
1716 * type LPFC_PG_TYPE_DIF
e2a0a9d6
JS
1717 *
1718 * This is usually used when DIFs are in their own buffers,
1719 * separate from the data. The HBA can then by instructed
1720 * to place the DIFs in the outgoing stream. For read operations,
1721 * The HBA could extract the DIFs and place it in DIF buffers.
1722 *
1723 * The buffer list for this type consists of one or more of the
1724 * protection groups described below:
1725 * +-------------------------+
6c8eea54 1726 * start of first prot group --> | PDE_5 |
e2a0a9d6 1727 * +-------------------------+
6c8eea54
JS
1728 * | PDE_6 |
1729 * +-------------------------+
1730 * | PDE_7 (Prot BDE) |
e2a0a9d6
JS
1731 * +-------------------------+
1732 * | Data BDE |
1733 * +-------------------------+
1734 * |more Data BDE's ... (opt)|
1735 * +-------------------------+
6c8eea54 1736 * start of new prot group --> | PDE_5 |
e2a0a9d6
JS
1737 * +-------------------------+
1738 * | ... |
1739 * +-------------------------+
1740 *
e2a0a9d6
JS
1741 * Note: It is assumed that both data and protection s/g buffers have been
1742 * mapped for DMA
acd6859b
JS
1743 *
1744 * Returns the number of BDEs added to the BPL.
1745 **/
e2a0a9d6
JS
1746static int
1747lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1748 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1749{
1750 struct scatterlist *sgde = NULL; /* s/g data entry */
1751 struct scatterlist *sgpe = NULL; /* s/g prot entry */
6c8eea54
JS
1752 struct lpfc_pde5 *pde5 = NULL;
1753 struct lpfc_pde6 *pde6 = NULL;
7f86059a 1754 struct lpfc_pde7 *pde7 = NULL;
e2a0a9d6
JS
1755 dma_addr_t dataphysaddr, protphysaddr;
1756 unsigned short curr_data = 0, curr_prot = 0;
7f86059a
JS
1757 unsigned int split_offset;
1758 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
e2a0a9d6
JS
1759 unsigned int protgrp_blks, protgrp_bytes;
1760 unsigned int remainder, subtotal;
6c8eea54 1761 int status;
e2a0a9d6
JS
1762 int datadir = sc->sc_data_direction;
1763 unsigned char pgdone = 0, alldone = 0;
1764 unsigned blksize;
0829a19a 1765#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 1766 uint32_t rc;
0829a19a 1767#endif
acd6859b 1768 uint32_t checking = 1;
e2a0a9d6 1769 uint32_t reftag;
6c8eea54 1770 uint8_t txop, rxop;
e2a0a9d6
JS
1771 int num_bde = 0;
1772
1773 sgpe = scsi_prot_sglist(sc);
1774 sgde = scsi_sglist(sc);
1775
1776 if (!sgpe || !sgde) {
372c187b 1777 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
32350664 1778 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
acd6859b
JS
1779 sgpe, sgde);
1780 return 0;
1781 }
1782
1783 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1784 if (status)
1785 goto out;
1786
1787 /* extract some info from the scsi command */
125c12f7
MP
1788 blksize = scsi_prot_interval(sc);
1789 reftag = scsi_prot_ref_tag(sc);
68a6a66c
JS
1790 if (reftag == LPFC_INVALID_REFTAG)
1791 goto out;
acd6859b
JS
1792
1793#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 1794 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 1795 if (rc) {
9a6b09c0 1796 if (rc & BG_ERR_SWAP)
acd6859b 1797 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 1798 if (rc & BG_ERR_CHECK)
acd6859b
JS
1799 checking = 0;
1800 }
1801#endif
1802
1803 split_offset = 0;
1804 do {
96f7077f
JS
1805 /* Check to see if we ran out of space */
1806 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1807 return num_bde + 3;
1808
acd6859b
JS
1809 /* setup PDE5 with what we have */
1810 pde5 = (struct lpfc_pde5 *) bpl;
1811 memset(pde5, 0, sizeof(struct lpfc_pde5));
1812 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1813
1814 /* Endianness conversion if necessary for PDE5 */
1815 pde5->word0 = cpu_to_le32(pde5->word0);
1816 pde5->reftag = cpu_to_le32(reftag);
1817
1818 /* advance bpl and increment bde count */
1819 num_bde++;
1820 bpl++;
1821 pde6 = (struct lpfc_pde6 *) bpl;
1822
1823 /* setup PDE6 with the rest of the info */
1824 memset(pde6, 0, sizeof(struct lpfc_pde6));
1825 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1826 bf_set(pde6_optx, pde6, txop);
1827 bf_set(pde6_oprx, pde6, rxop);
a6887e28 1828
125c12f7 1829 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
a6887e28
JS
1830 bf_set(pde6_ce, pde6, checking);
1831 else
1832 bf_set(pde6_ce, pde6, 0);
1833
125c12f7 1834 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
a6887e28
JS
1835 bf_set(pde6_re, pde6, checking);
1836 else
1837 bf_set(pde6_re, pde6, 0);
1838
acd6859b
JS
1839 bf_set(pde6_ai, pde6, 1);
1840 bf_set(pde6_ae, pde6, 0);
1841 bf_set(pde6_apptagval, pde6, 0);
1842
1843 /* Endianness conversion if necessary for PDE6 */
1844 pde6->word0 = cpu_to_le32(pde6->word0);
1845 pde6->word1 = cpu_to_le32(pde6->word1);
1846 pde6->word2 = cpu_to_le32(pde6->word2);
1847
1848 /* advance bpl and increment bde count */
1849 num_bde++;
1850 bpl++;
1851
1852 /* setup the first BDE that points to protection buffer */
1853 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1854 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1855
1856 /* must be integer multiple of the DIF block length */
1857 BUG_ON(protgroup_len % 8);
1858
1859 pde7 = (struct lpfc_pde7 *) bpl;
1860 memset(pde7, 0, sizeof(struct lpfc_pde7));
1861 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1862
1863 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1864 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1865
1866 protgrp_blks = protgroup_len / 8;
1867 protgrp_bytes = protgrp_blks * blksize;
1868
1869 /* check if this pde is crossing the 4K boundary; if so split */
1870 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1871 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1872 protgroup_offset += protgroup_remainder;
1873 protgrp_blks = protgroup_remainder / 8;
1874 protgrp_bytes = protgrp_blks * blksize;
1875 } else {
1876 protgroup_offset = 0;
1877 curr_prot++;
1878 }
1879
1880 num_bde++;
1881
1882 /* setup BDE's for data blocks associated with DIF data */
1883 pgdone = 0;
1884 subtotal = 0; /* total bytes processed for current prot grp */
1885 while (!pgdone) {
96f7077f
JS
1886 /* Check to see if we ran out of space */
1887 if (num_bde >= phba->cfg_total_seg_cnt)
1888 return num_bde + 1;
1889
acd6859b 1890 if (!sgde) {
372c187b 1891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
acd6859b
JS
1892 "9065 BLKGRD:%s Invalid data segment\n",
1893 __func__);
1894 return 0;
1895 }
1896 bpl++;
1897 dataphysaddr = sg_dma_address(sgde) + split_offset;
1898 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1899 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1900
1901 remainder = sg_dma_len(sgde) - split_offset;
1902
1903 if ((subtotal + remainder) <= protgrp_bytes) {
1904 /* we can use this whole buffer */
1905 bpl->tus.f.bdeSize = remainder;
1906 split_offset = 0;
1907
1908 if ((subtotal + remainder) == protgrp_bytes)
1909 pgdone = 1;
1910 } else {
1911 /* must split this buffer with next prot grp */
1912 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1913 split_offset += bpl->tus.f.bdeSize;
1914 }
1915
1916 subtotal += bpl->tus.f.bdeSize;
1917
1918 if (datadir == DMA_TO_DEVICE)
1919 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1920 else
1921 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1922 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1923
1924 num_bde++;
1925 curr_data++;
1926
1927 if (split_offset)
1928 break;
1929
1930 /* Move to the next s/g segment if possible */
1931 sgde = sg_next(sgde);
1932
1933 }
1934
1935 if (protgroup_offset) {
1936 /* update the reference tag */
1937 reftag += protgrp_blks;
1938 bpl++;
1939 continue;
1940 }
1941
1942 /* are we done ? */
1943 if (curr_prot == protcnt) {
1944 alldone = 1;
1945 } else if (curr_prot < protcnt) {
1946 /* advance to next prot buffer */
1947 sgpe = sg_next(sgpe);
1948 bpl++;
1949
1950 /* update the reference tag */
1951 reftag += protgrp_blks;
1952 } else {
1953 /* if we're here, we have a bug */
372c187b
DK
1954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1955 "9054 BLKGRD: bug in %s\n", __func__);
acd6859b
JS
1956 }
1957
1958 } while (!alldone);
1959out:
1960
1961 return num_bde;
1962}
1963
1964/**
1965 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1966 * @phba: The Hba for which this call is being executed.
1967 * @sc: pointer to scsi command we're working on
1968 * @sgl: pointer to buffer list for protection groups
eceee00e
LJ
1969 * @datasegcnt: number of segments of data that have been dma mapped
1970 * @lpfc_cmd: lpfc scsi command object pointer.
acd6859b
JS
1971 *
1972 * This function sets up SGL buffer list for protection groups of
1973 * type LPFC_PG_TYPE_NO_DIF
1974 *
1975 * This is usually used when the HBA is instructed to generate
1976 * DIFs and insert them into data stream (or strip DIF from
1977 * incoming data stream)
1978 *
1979 * The buffer list consists of just one protection group described
1980 * below:
1981 * +-------------------------+
1982 * start of prot group --> | DI_SEED |
1983 * +-------------------------+
1984 * | Data SGE |
1985 * +-------------------------+
1986 * |more Data SGE's ... (opt)|
1987 * +-------------------------+
1988 *
1989 *
1990 * Note: Data s/g buffers have been dma mapped
1991 *
1992 * Returns the number of SGEs added to the SGL.
1993 **/
1994static int
1995lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
d79c9e9d
JS
1996 struct sli4_sge *sgl, int datasegcnt,
1997 struct lpfc_io_buf *lpfc_cmd)
acd6859b
JS
1998{
1999 struct scatterlist *sgde = NULL; /* s/g data entry */
2000 struct sli4_sge_diseed *diseed = NULL;
2001 dma_addr_t physaddr;
2002 int i = 0, num_sge = 0, status;
acd6859b 2003 uint32_t reftag;
acd6859b 2004 uint8_t txop, rxop;
0829a19a 2005#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2006 uint32_t rc;
0829a19a 2007#endif
acd6859b
JS
2008 uint32_t checking = 1;
2009 uint32_t dma_len;
2010 uint32_t dma_offset = 0;
d79c9e9d
JS
2011 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2012 int j;
2013 bool lsp_just_set = false;
acd6859b
JS
2014
2015 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2016 if (status)
2017 goto out;
2018
2019 /* extract some info from the scsi command for pde*/
125c12f7 2020 reftag = scsi_prot_ref_tag(sc);
68a6a66c
JS
2021 if (reftag == LPFC_INVALID_REFTAG)
2022 goto out;
acd6859b
JS
2023
2024#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2025 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2026 if (rc) {
9a6b09c0 2027 if (rc & BG_ERR_SWAP)
acd6859b 2028 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2029 if (rc & BG_ERR_CHECK)
acd6859b
JS
2030 checking = 0;
2031 }
2032#endif
2033
2034 /* setup DISEED with what we have */
2035 diseed = (struct sli4_sge_diseed *) sgl;
2036 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2037 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2038
2039 /* Endianness conversion if necessary */
2040 diseed->ref_tag = cpu_to_le32(reftag);
2041 diseed->ref_tag_tran = diseed->ref_tag;
2042
a6887e28
JS
2043 /*
2044 * We only need to check the data on READs, for WRITEs
2045 * protection data is automatically generated, not checked.
2046 */
2047 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
125c12f7 2048 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
a6887e28
JS
2049 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2050 else
2051 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2052
125c12f7 2053 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
a6887e28
JS
2054 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2055 else
2056 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2057 }
2058
acd6859b
JS
2059 /* setup DISEED with the rest of the info */
2060 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2061 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
a6887e28 2062
acd6859b
JS
2063 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2064 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2065
2066 /* Endianness conversion if necessary for DISEED */
2067 diseed->word2 = cpu_to_le32(diseed->word2);
2068 diseed->word3 = cpu_to_le32(diseed->word3);
2069
2070 /* advance bpl and increment sge count */
2071 num_sge++;
2072 sgl++;
2073
2074 /* assumption: caller has already run dma_map_sg on command data */
d79c9e9d
JS
2075 sgde = scsi_sglist(sc);
2076 j = 3;
2077 for (i = 0; i < datasegcnt; i++) {
2078 /* clear it */
2079 sgl->word2 = 0;
acd6859b 2080
d79c9e9d
JS
2081 /* do we need to expand the segment */
2082 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2083 ((datasegcnt - 1) != i)) {
2084 /* set LSP type */
2085 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2086
2087 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2088
2089 if (unlikely(!sgl_xtra)) {
2090 lpfc_cmd->seg_cnt = 0;
2091 return 0;
2092 }
2093 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2094 sgl_xtra->dma_phys_sgl));
2095 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2096 sgl_xtra->dma_phys_sgl));
2097
2098 } else {
2099 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2100 }
2101
2102 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2103 if ((datasegcnt - 1) == i)
2104 bf_set(lpfc_sli4_sge_last, sgl, 1);
2105 physaddr = sg_dma_address(sgde);
2106 dma_len = sg_dma_len(sgde);
2107 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2108 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2109
2110 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2111 sgl->word2 = cpu_to_le32(sgl->word2);
2112 sgl->sge_len = cpu_to_le32(dma_len);
2113
2114 dma_offset += dma_len;
2115 sgde = sg_next(sgde);
2116
2117 sgl++;
2118 num_sge++;
2119 lsp_just_set = false;
2120
2121 } else {
2122 sgl->word2 = cpu_to_le32(sgl->word2);
2123 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2124
2125 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2126 i = i - 1;
2127
2128 lsp_just_set = true;
2129 }
2130
2131 j++;
acd6859b 2132
acd6859b
JS
2133 }
2134
2135out:
2136 return num_sge;
2137}
2138
2139/**
2140 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2141 * @phba: The Hba for which this call is being executed.
2142 * @sc: pointer to scsi command we're working on
2143 * @sgl: pointer to buffer list for protection groups
2144 * @datacnt: number of segments of data that have been dma mapped
2145 * @protcnt: number of segment of protection data that have been dma mapped
eceee00e 2146 * @lpfc_cmd: lpfc scsi command object pointer.
acd6859b
JS
2147 *
2148 * This function sets up SGL buffer list for protection groups of
2149 * type LPFC_PG_TYPE_DIF
2150 *
2151 * This is usually used when DIFs are in their own buffers,
2152 * separate from the data. The HBA can then by instructed
2153 * to place the DIFs in the outgoing stream. For read operations,
2154 * The HBA could extract the DIFs and place it in DIF buffers.
2155 *
2156 * The buffer list for this type consists of one or more of the
2157 * protection groups described below:
2158 * +-------------------------+
2159 * start of first prot group --> | DISEED |
2160 * +-------------------------+
2161 * | DIF (Prot SGE) |
2162 * +-------------------------+
2163 * | Data SGE |
2164 * +-------------------------+
2165 * |more Data SGE's ... (opt)|
2166 * +-------------------------+
2167 * start of new prot group --> | DISEED |
2168 * +-------------------------+
2169 * | ... |
2170 * +-------------------------+
2171 *
2172 * Note: It is assumed that both data and protection s/g buffers have been
2173 * mapped for DMA
2174 *
2175 * Returns the number of SGEs added to the SGL.
2176 **/
2177static int
2178lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
d79c9e9d
JS
2179 struct sli4_sge *sgl, int datacnt, int protcnt,
2180 struct lpfc_io_buf *lpfc_cmd)
acd6859b
JS
2181{
2182 struct scatterlist *sgde = NULL; /* s/g data entry */
2183 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2184 struct sli4_sge_diseed *diseed = NULL;
2185 dma_addr_t dataphysaddr, protphysaddr;
2186 unsigned short curr_data = 0, curr_prot = 0;
2187 unsigned int split_offset;
2188 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2189 unsigned int protgrp_blks, protgrp_bytes;
2190 unsigned int remainder, subtotal;
2191 int status;
2192 unsigned char pgdone = 0, alldone = 0;
2193 unsigned blksize;
2194 uint32_t reftag;
2195 uint8_t txop, rxop;
2196 uint32_t dma_len;
0829a19a 2197#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2198 uint32_t rc;
0829a19a 2199#endif
acd6859b
JS
2200 uint32_t checking = 1;
2201 uint32_t dma_offset = 0;
d79c9e9d
JS
2202 int num_sge = 0, j = 2;
2203 struct sli4_hybrid_sgl *sgl_xtra = NULL;
acd6859b
JS
2204
2205 sgpe = scsi_prot_sglist(sc);
2206 sgde = scsi_sglist(sc);
2207
2208 if (!sgpe || !sgde) {
372c187b 2209 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
32350664 2210 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
e2a0a9d6
JS
2211 sgpe, sgde);
2212 return 0;
2213 }
2214
6c8eea54
JS
2215 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2216 if (status)
e2a0a9d6
JS
2217 goto out;
2218
6c8eea54 2219 /* extract some info from the scsi command */
125c12f7
MP
2220 blksize = scsi_prot_interval(sc);
2221 reftag = scsi_prot_ref_tag(sc);
68a6a66c
JS
2222 if (reftag == LPFC_INVALID_REFTAG)
2223 goto out;
e2a0a9d6 2224
f9bb2da1 2225#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2226 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2227 if (rc) {
9a6b09c0 2228 if (rc & BG_ERR_SWAP)
acd6859b 2229 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2230 if (rc & BG_ERR_CHECK)
acd6859b
JS
2231 checking = 0;
2232 }
f9bb2da1
JS
2233#endif
2234
e2a0a9d6
JS
2235 split_offset = 0;
2236 do {
96f7077f 2237 /* Check to see if we ran out of space */
d79c9e9d
JS
2238 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2239 !(phba->cfg_xpsgl))
96f7077f
JS
2240 return num_sge + 3;
2241
d79c9e9d
JS
2242 /* DISEED and DIF have to be together */
2243 if (!((j + 1) % phba->border_sge_num) ||
2244 !((j + 2) % phba->border_sge_num) ||
2245 !((j + 3) % phba->border_sge_num)) {
2246 sgl->word2 = 0;
2247
2248 /* set LSP type */
2249 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2250
2251 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2252
2253 if (unlikely(!sgl_xtra)) {
2254 goto out;
2255 } else {
2256 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2257 sgl_xtra->dma_phys_sgl));
2258 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2259 sgl_xtra->dma_phys_sgl));
2260 }
2261
2262 sgl->word2 = cpu_to_le32(sgl->word2);
2263 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2264
2265 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2266 j = 0;
2267 }
2268
acd6859b
JS
2269 /* setup DISEED with what we have */
2270 diseed = (struct sli4_sge_diseed *) sgl;
2271 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2272 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2273
2274 /* Endianness conversion if necessary */
2275 diseed->ref_tag = cpu_to_le32(reftag);
2276 diseed->ref_tag_tran = diseed->ref_tag;
2277
125c12f7 2278 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
a6887e28 2279 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
a6887e28
JS
2280 } else {
2281 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2282 /*
2283 * When in this mode, the hardware will replace
2284 * the guard tag from the host with a
2285 * newly generated good CRC for the wire.
2286 * Switch to raw mode here to avoid this
2287 * behavior. What the host sends gets put on the wire.
2288 */
2289 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2290 txop = BG_OP_RAW_MODE;
2291 rxop = BG_OP_RAW_MODE;
2292 }
2293 }
2294
2295
125c12f7 2296 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
a6887e28
JS
2297 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2298 else
2299 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2300
acd6859b
JS
2301 /* setup DISEED with the rest of the info */
2302 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2303 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
a6887e28 2304
acd6859b
JS
2305 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2306 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2307
2308 /* Endianness conversion if necessary for DISEED */
2309 diseed->word2 = cpu_to_le32(diseed->word2);
2310 diseed->word3 = cpu_to_le32(diseed->word3);
2311
2312 /* advance sgl and increment bde count */
2313 num_sge++;
d79c9e9d 2314
acd6859b 2315 sgl++;
d79c9e9d 2316 j++;
e2a0a9d6
JS
2317
2318 /* setup the first BDE that points to protection buffer */
7f86059a
JS
2319 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2320 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
e2a0a9d6 2321
e2a0a9d6
JS
2322 /* must be integer multiple of the DIF block length */
2323 BUG_ON(protgroup_len % 8);
2324
acd6859b
JS
2325 /* Now setup DIF SGE */
2326 sgl->word2 = 0;
2327 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2328 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2329 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2330 sgl->word2 = cpu_to_le32(sgl->word2);
d79c9e9d 2331 sgl->sge_len = 0;
7f86059a 2332
e2a0a9d6
JS
2333 protgrp_blks = protgroup_len / 8;
2334 protgrp_bytes = protgrp_blks * blksize;
2335
acd6859b
JS
2336 /* check if DIF SGE is crossing the 4K boundary; if so split */
2337 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2338 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
7f86059a
JS
2339 protgroup_offset += protgroup_remainder;
2340 protgrp_blks = protgroup_remainder / 8;
7c56b9fd 2341 protgrp_bytes = protgrp_blks * blksize;
7f86059a
JS
2342 } else {
2343 protgroup_offset = 0;
2344 curr_prot++;
2345 }
e2a0a9d6 2346
acd6859b 2347 num_sge++;
e2a0a9d6 2348
acd6859b 2349 /* setup SGE's for data blocks associated with DIF data */
e2a0a9d6
JS
2350 pgdone = 0;
2351 subtotal = 0; /* total bytes processed for current prot grp */
d79c9e9d
JS
2352
2353 sgl++;
2354 j++;
2355
e2a0a9d6 2356 while (!pgdone) {
96f7077f 2357 /* Check to see if we ran out of space */
d79c9e9d
JS
2358 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2359 !phba->cfg_xpsgl)
96f7077f
JS
2360 return num_sge + 1;
2361
e2a0a9d6 2362 if (!sgde) {
372c187b 2363 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
acd6859b 2364 "9086 BLKGRD:%s Invalid data segment\n",
e2a0a9d6
JS
2365 __func__);
2366 return 0;
2367 }
e2a0a9d6 2368
d79c9e9d
JS
2369 if (!((j + 1) % phba->border_sge_num)) {
2370 sgl->word2 = 0;
e2a0a9d6 2371
d79c9e9d
JS
2372 /* set LSP type */
2373 bf_set(lpfc_sli4_sge_type, sgl,
2374 LPFC_SGE_TYPE_LSP);
e2a0a9d6 2375
d79c9e9d
JS
2376 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2377 lpfc_cmd);
2378
2379 if (unlikely(!sgl_xtra)) {
2380 goto out;
2381 } else {
2382 sgl->addr_lo = cpu_to_le32(
2383 putPaddrLow(sgl_xtra->dma_phys_sgl));
2384 sgl->addr_hi = cpu_to_le32(
2385 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2386 }
2387
2388 sgl->word2 = cpu_to_le32(sgl->word2);
2389 sgl->sge_len = cpu_to_le32(
2390 phba->cfg_sg_dma_buf_size);
2391
2392 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
e2a0a9d6 2393 } else {
d79c9e9d
JS
2394 dataphysaddr = sg_dma_address(sgde) +
2395 split_offset;
e2a0a9d6 2396
d79c9e9d 2397 remainder = sg_dma_len(sgde) - split_offset;
e2a0a9d6 2398
d79c9e9d
JS
2399 if ((subtotal + remainder) <= protgrp_bytes) {
2400 /* we can use this whole buffer */
2401 dma_len = remainder;
2402 split_offset = 0;
e2a0a9d6 2403
d79c9e9d
JS
2404 if ((subtotal + remainder) ==
2405 protgrp_bytes)
2406 pgdone = 1;
2407 } else {
2408 /* must split this buffer with next
2409 * prot grp
2410 */
2411 dma_len = protgrp_bytes - subtotal;
2412 split_offset += dma_len;
2413 }
acd6859b 2414
d79c9e9d 2415 subtotal += dma_len;
e2a0a9d6 2416
d79c9e9d
JS
2417 sgl->word2 = 0;
2418 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2419 dataphysaddr));
2420 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2421 dataphysaddr));
2422 bf_set(lpfc_sli4_sge_last, sgl, 0);
2423 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2424 bf_set(lpfc_sli4_sge_type, sgl,
2425 LPFC_SGE_TYPE_DATA);
e2a0a9d6 2426
d79c9e9d
JS
2427 sgl->sge_len = cpu_to_le32(dma_len);
2428 dma_offset += dma_len;
2429
2430 num_sge++;
2431 curr_data++;
2432
2433 if (split_offset) {
2434 sgl++;
2435 j++;
2436 break;
2437 }
2438
2439 /* Move to the next s/g segment if possible */
2440 sgde = sg_next(sgde);
2441
2442 sgl++;
2443 }
2444
2445 j++;
e2a0a9d6
JS
2446 }
2447
7f86059a
JS
2448 if (protgroup_offset) {
2449 /* update the reference tag */
2450 reftag += protgrp_blks;
7f86059a
JS
2451 continue;
2452 }
2453
e2a0a9d6
JS
2454 /* are we done ? */
2455 if (curr_prot == protcnt) {
d79c9e9d
JS
2456 /* mark the last SGL */
2457 sgl--;
acd6859b 2458 bf_set(lpfc_sli4_sge_last, sgl, 1);
e2a0a9d6
JS
2459 alldone = 1;
2460 } else if (curr_prot < protcnt) {
2461 /* advance to next prot buffer */
2462 sgpe = sg_next(sgpe);
e2a0a9d6
JS
2463
2464 /* update the reference tag */
2465 reftag += protgrp_blks;
2466 } else {
2467 /* if we're here, we have a bug */
372c187b
DK
2468 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2469 "9085 BLKGRD: bug in %s\n", __func__);
e2a0a9d6
JS
2470 }
2471
2472 } while (!alldone);
acd6859b 2473
e2a0a9d6
JS
2474out:
2475
acd6859b 2476 return num_sge;
e2a0a9d6 2477}
7f86059a 2478
acd6859b
JS
2479/**
2480 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2481 * @phba: The Hba for which this call is being executed.
2482 * @sc: pointer to scsi command we're working on
2483 *
e2a0a9d6
JS
2484 * Given a SCSI command that supports DIF, determine composition of protection
2485 * groups involved in setting up buffer lists
2486 *
acd6859b
JS
2487 * Returns: Protection group type (with or without DIF)
2488 *
2489 **/
e2a0a9d6
JS
2490static int
2491lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2492{
2493 int ret = LPFC_PG_TYPE_INVALID;
2494 unsigned char op = scsi_get_prot_op(sc);
2495
2496 switch (op) {
2497 case SCSI_PROT_READ_STRIP:
2498 case SCSI_PROT_WRITE_INSERT:
2499 ret = LPFC_PG_TYPE_NO_DIF;
2500 break;
2501 case SCSI_PROT_READ_INSERT:
2502 case SCSI_PROT_WRITE_STRIP:
2503 case SCSI_PROT_READ_PASS:
2504 case SCSI_PROT_WRITE_PASS:
e2a0a9d6
JS
2505 ret = LPFC_PG_TYPE_DIF_BUF;
2506 break;
2507 default:
9c6aa9d7 2508 if (phba)
372c187b 2509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9c6aa9d7
JS
2510 "9021 Unsupported protection op:%d\n",
2511 op);
e2a0a9d6
JS
2512 break;
2513 }
e2a0a9d6
JS
2514 return ret;
2515}
2516
a6887e28
JS
2517/**
2518 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2519 * @phba: The Hba for which this call is being executed.
2520 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2521 *
2522 * Adjust the data length to account for how much data
2523 * is actually on the wire.
2524 *
2525 * returns the adjusted data length
2526 **/
2527static int
2528lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
c490850a 2529 struct lpfc_io_buf *lpfc_cmd)
a6887e28
JS
2530{
2531 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2532 int fcpdl;
2533
2534 fcpdl = scsi_bufflen(sc);
2535
2536 /* Check if there is protection data on the wire */
2537 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
9c6aa9d7 2538 /* Read check for protection data */
a6887e28
JS
2539 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2540 return fcpdl;
2541
2542 } else {
9c6aa9d7 2543 /* Write check for protection data */
a6887e28
JS
2544 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2545 return fcpdl;
2546 }
2547
2548 /*
2549 * If we are in DIF Type 1 mode every data block has a 8 byte
9c6aa9d7
JS
2550 * DIF (trailer) attached to it. Must ajust FCP data length
2551 * to account for the protection data.
a6887e28 2552 */
125c12f7 2553 fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
a6887e28
JS
2554
2555 return fcpdl;
2556}
2557
acd6859b
JS
2558/**
2559 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2560 * @phba: The Hba for which this call is being executed.
2561 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2562 *
e2a0a9d6
JS
2563 * This is the protection/DIF aware version of
2564 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
5e0e2318
JS
2565 * two functions eventually, but for now, it's here.
2566 * RETURNS 0 - SUCCESS,
2567 * 1 - Failed DMA map, retry.
2568 * 2 - Invalid scsi cmd or prot-type. Do not rety.
acd6859b 2569 **/
e2a0a9d6 2570static int
acd6859b 2571lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
c490850a 2572 struct lpfc_io_buf *lpfc_cmd)
e2a0a9d6
JS
2573{
2574 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2575 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
c490850a 2576 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
e2a0a9d6
JS
2577 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2578 uint32_t num_bde = 0;
2579 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2580 int prot_group_type = 0;
a6887e28 2581 int fcpdl;
5e0e2318 2582 int ret = 1;
7c4042a4 2583 struct lpfc_vport *vport = phba->pport;
e2a0a9d6
JS
2584
2585 /*
2586 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2587 * fcp_rsp regions to the first data bde entry
2588 */
2589 bpl += 2;
2590 if (scsi_sg_count(scsi_cmnd)) {
2591 /*
2592 * The driver stores the segment count returned from pci_map_sg
2593 * because this a count of dma-mappings used to map the use_sg
2594 * pages. They are not guaranteed to be the same for those
2595 * architectures that implement an IOMMU.
2596 */
2597 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2598 scsi_sglist(scsi_cmnd),
2599 scsi_sg_count(scsi_cmnd), datadir);
2600 if (unlikely(!datasegcnt))
2601 return 1;
2602
2603 lpfc_cmd->seg_cnt = datasegcnt;
96f7077f
JS
2604
2605 /* First check if data segment count from SCSI Layer is good */
5e0e2318
JS
2606 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2607 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2608 ret = 2;
96f7077f 2609 goto err;
5e0e2318 2610 }
e2a0a9d6
JS
2611
2612 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2613
2614 switch (prot_group_type) {
2615 case LPFC_PG_TYPE_NO_DIF:
96f7077f
JS
2616
2617 /* Here we need to add a PDE5 and PDE6 to the count */
5e0e2318
JS
2618 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2619 ret = 2;
96f7077f 2620 goto err;
5e0e2318 2621 }
96f7077f 2622
e2a0a9d6
JS
2623 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2624 datasegcnt);
c9404c9c 2625 /* we should have 2 or more entries in buffer list */
5e0e2318
JS
2626 if (num_bde < 2) {
2627 ret = 2;
e2a0a9d6 2628 goto err;
5e0e2318 2629 }
e2a0a9d6 2630 break;
96f7077f
JS
2631
2632 case LPFC_PG_TYPE_DIF_BUF:
e2a0a9d6
JS
2633 /*
2634 * This type indicates that protection buffers are
2635 * passed to the driver, so that needs to be prepared
2636 * for DMA
2637 */
2638 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2639 scsi_prot_sglist(scsi_cmnd),
2640 scsi_prot_sg_count(scsi_cmnd), datadir);
2641 if (unlikely(!protsegcnt)) {
2642 scsi_dma_unmap(scsi_cmnd);
2643 return 1;
2644 }
2645
2646 lpfc_cmd->prot_seg_cnt = protsegcnt;
96f7077f
JS
2647
2648 /*
2649 * There is a minimun of 4 BPLs used for every
2650 * protection data segment.
2651 */
2652 if ((lpfc_cmd->prot_seg_cnt * 4) >
5e0e2318
JS
2653 (phba->cfg_total_seg_cnt - 2)) {
2654 ret = 2;
96f7077f 2655 goto err;
5e0e2318 2656 }
e2a0a9d6
JS
2657
2658 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2659 datasegcnt, protsegcnt);
c9404c9c 2660 /* we should have 3 or more entries in buffer list */
96f7077f 2661 if ((num_bde < 3) ||
5e0e2318
JS
2662 (num_bde > phba->cfg_total_seg_cnt)) {
2663 ret = 2;
e2a0a9d6 2664 goto err;
5e0e2318 2665 }
e2a0a9d6 2666 break;
96f7077f 2667
e2a0a9d6
JS
2668 case LPFC_PG_TYPE_INVALID:
2669 default:
96f7077f
JS
2670 scsi_dma_unmap(scsi_cmnd);
2671 lpfc_cmd->seg_cnt = 0;
2672
372c187b 2673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
e2a0a9d6
JS
2674 "9022 Unexpected protection group %i\n",
2675 prot_group_type);
5e0e2318 2676 return 2;
e2a0a9d6
JS
2677 }
2678 }
2679
2680 /*
2681 * Finish initializing those IOCB fields that are dependent on the
2682 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2683 * reinitialized since all iocb memory resources are used many times
2684 * for transmit, receive, and continuation bpl's.
2685 */
2686 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2687 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2688 iocb_cmd->ulpBdeCount = 1;
2689 iocb_cmd->ulpLe = 1;
2690
a6887e28 2691 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
e2a0a9d6
JS
2692 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2693
2694 /*
2695 * Due to difference in data length between DIF/non-DIF paths,
2696 * we need to set word 4 of IOCB here
2697 */
2698 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2699
7c4042a4
JS
2700 /*
2701 * For First burst, we may need to adjust the initial transfer
2702 * length for DIF
2703 */
2704 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2705 (fcpdl < vport->cfg_first_burst_size))
2706 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2707
dea3101e 2708 return 0;
e2a0a9d6 2709err:
96f7077f
JS
2710 if (lpfc_cmd->seg_cnt)
2711 scsi_dma_unmap(scsi_cmnd);
2712 if (lpfc_cmd->prot_seg_cnt)
2713 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2714 scsi_prot_sg_count(scsi_cmnd),
2715 scsi_cmnd->sc_data_direction);
2716
372c187b 2717 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
96f7077f
JS
2718 "9023 Cannot setup S/G List for HBA"
2719 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2720 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2721 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
e2a0a9d6 2722 prot_group_type, num_bde);
96f7077f
JS
2723
2724 lpfc_cmd->seg_cnt = 0;
2725 lpfc_cmd->prot_seg_cnt = 0;
5e0e2318 2726 return ret;
e2a0a9d6
JS
2727}
2728
737d4248
JS
2729/*
2730 * This function calcuates the T10 DIF guard tag
2731 * on the specified data using a CRC algorithmn
2732 * using crc_t10dif.
2733 */
7bfe781e 2734static uint16_t
737d4248
JS
2735lpfc_bg_crc(uint8_t *data, int count)
2736{
2737 uint16_t crc = 0;
2738 uint16_t x;
2739
2740 crc = crc_t10dif(data, count);
2741 x = cpu_to_be16(crc);
2742 return x;
2743}
2744
2745/*
2746 * This function calcuates the T10 DIF guard tag
2747 * on the specified data using a CSUM algorithmn
2748 * using ip_compute_csum.
2749 */
7bfe781e 2750static uint16_t
737d4248
JS
2751lpfc_bg_csum(uint8_t *data, int count)
2752{
2753 uint16_t ret;
2754
2755 ret = ip_compute_csum(data, count);
2756 return ret;
2757}
2758
2759/*
2760 * This function examines the protection data to try to determine
2761 * what type of T10-DIF error occurred.
2762 */
7bfe781e 2763static void
c490850a 2764lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
737d4248
JS
2765{
2766 struct scatterlist *sgpe; /* s/g prot entry */
2767 struct scatterlist *sgde; /* s/g data entry */
2768 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2769 struct scsi_dif_tuple *src = NULL;
2770 uint8_t *data_src = NULL;
db6f1c2f 2771 uint16_t guard_tag;
737d4248
JS
2772 uint16_t start_app_tag, app_tag;
2773 uint32_t start_ref_tag, ref_tag;
2774 int prot, protsegcnt;
2775 int err_type, len, data_len;
2776 int chk_ref, chk_app, chk_guard;
2777 uint16_t sum;
2778 unsigned blksize;
2779
2780 err_type = BGS_GUARD_ERR_MASK;
2781 sum = 0;
2782 guard_tag = 0;
2783
2784 /* First check to see if there is protection data to examine */
2785 prot = scsi_get_prot_op(cmd);
2786 if ((prot == SCSI_PROT_READ_STRIP) ||
2787 (prot == SCSI_PROT_WRITE_INSERT) ||
2788 (prot == SCSI_PROT_NORMAL))
2789 goto out;
2790
2791 /* Currently the driver just supports ref_tag and guard_tag checking */
2792 chk_ref = 1;
2793 chk_app = 0;
2794 chk_guard = 0;
2795
2796 /* Setup a ptr to the protection data provided by the SCSI host */
2797 sgpe = scsi_prot_sglist(cmd);
2798 protsegcnt = lpfc_cmd->prot_seg_cnt;
2799
2800 if (sgpe && protsegcnt) {
2801
2802 /*
2803 * We will only try to verify guard tag if the segment
2804 * data length is a multiple of the blksize.
2805 */
2806 sgde = scsi_sglist(cmd);
125c12f7 2807 blksize = scsi_prot_interval(cmd);
737d4248
JS
2808 data_src = (uint8_t *)sg_virt(sgde);
2809 data_len = sgde->length;
2810 if ((data_len & (blksize - 1)) == 0)
2811 chk_guard = 1;
737d4248 2812
e85d8f9f 2813 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
125c12f7 2814 start_ref_tag = scsi_prot_ref_tag(cmd);
68a6a66c
JS
2815 if (start_ref_tag == LPFC_INVALID_REFTAG)
2816 goto out;
737d4248 2817 start_app_tag = src->app_tag;
737d4248
JS
2818 len = sgpe->length;
2819 while (src && protsegcnt) {
2820 while (len) {
2821
2822 /*
2823 * First check to see if a protection data
2824 * check is valid
2825 */
128b6f9f
DM
2826 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2827 (src->app_tag == T10_PI_APP_ESCAPE)) {
737d4248
JS
2828 start_ref_tag++;
2829 goto skipit;
2830 }
2831
9c6aa9d7 2832 /* First Guard Tag checking */
737d4248
JS
2833 if (chk_guard) {
2834 guard_tag = src->guard_tag;
125c12f7
MP
2835 if (cmd->prot_flags
2836 & SCSI_PROT_IP_CHECKSUM)
737d4248
JS
2837 sum = lpfc_bg_csum(data_src,
2838 blksize);
2839 else
2840 sum = lpfc_bg_crc(data_src,
2841 blksize);
2842 if ((guard_tag != sum)) {
2843 err_type = BGS_GUARD_ERR_MASK;
2844 goto out;
2845 }
2846 }
9c6aa9d7
JS
2847
2848 /* Reference Tag checking */
2849 ref_tag = be32_to_cpu(src->ref_tag);
2850 if (chk_ref && (ref_tag != start_ref_tag)) {
2851 err_type = BGS_REFTAG_ERR_MASK;
2852 goto out;
2853 }
2854 start_ref_tag++;
2855
2856 /* App Tag checking */
2857 app_tag = src->app_tag;
2858 if (chk_app && (app_tag != start_app_tag)) {
2859 err_type = BGS_APPTAG_ERR_MASK;
2860 goto out;
2861 }
737d4248
JS
2862skipit:
2863 len -= sizeof(struct scsi_dif_tuple);
2864 if (len < 0)
2865 len = 0;
2866 src++;
2867
2868 data_src += blksize;
2869 data_len -= blksize;
2870
2871 /*
2872 * Are we at the end of the Data segment?
2873 * The data segment is only used for Guard
2874 * tag checking.
2875 */
2876 if (chk_guard && (data_len == 0)) {
2877 chk_guard = 0;
2878 sgde = sg_next(sgde);
2879 if (!sgde)
2880 goto out;
2881
2882 data_src = (uint8_t *)sg_virt(sgde);
2883 data_len = sgde->length;
2884 if ((data_len & (blksize - 1)) == 0)
2885 chk_guard = 1;
2886 }
2887 }
2888
2889 /* Goto the next Protection data segment */
2890 sgpe = sg_next(sgpe);
2891 if (sgpe) {
2892 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2893 len = sgpe->length;
2894 } else {
2895 src = NULL;
2896 }
2897 protsegcnt--;
2898 }
2899 }
2900out:
2901 if (err_type == BGS_GUARD_ERR_MASK) {
f2b1e9c6
HR
2902 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2903 set_host_byte(cmd, DID_ABORT);
737d4248
JS
2904 phba->bg_guard_err_cnt++;
2905 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
68a6a66c 2906 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
125c12f7 2907 scsi_prot_ref_tag(cmd),
737d4248
JS
2908 sum, guard_tag);
2909
2910 } else if (err_type == BGS_REFTAG_ERR_MASK) {
f2b1e9c6
HR
2911 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2912 set_host_byte(cmd, DID_ABORT);
737d4248
JS
2913
2914 phba->bg_reftag_err_cnt++;
2915 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
68a6a66c 2916 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
125c12f7 2917 scsi_prot_ref_tag(cmd),
737d4248
JS
2918 ref_tag, start_ref_tag);
2919
2920 } else if (err_type == BGS_APPTAG_ERR_MASK) {
f2b1e9c6
HR
2921 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2922 set_host_byte(cmd, DID_ABORT);
737d4248
JS
2923
2924 phba->bg_apptag_err_cnt++;
2925 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
68a6a66c 2926 "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
125c12f7 2927 scsi_prot_ref_tag(cmd),
737d4248
JS
2928 app_tag, start_app_tag);
2929 }
2930}
2931
96e209be
JS
2932/*
2933 * This function checks for BlockGuard errors detected by
2934 * the HBA. In case of errors, the ASC/ASCQ fields in the
2935 * sense buffer will be set accordingly, paired with
2936 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2937 * detected corruption.
2938 *
2939 * Returns:
2940 * 0 - No error found
2941 * 1 - BlockGuard error found
2942 * -1 - Internal error (bad profile, ...etc)
2943 */
2944static int
2945lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2946 struct lpfc_wcqe_complete *wcqe)
2947{
2948 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2949 int ret = 0;
2950 u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
2951 u32 bghm = 0;
2952 u32 bgstat = 0;
2953 u64 failing_sector = 0;
2954
2955 if (status == CQE_STATUS_DI_ERROR) {
2956 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
2957 bgstat |= BGS_GUARD_ERR_MASK;
2958 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
2959 bgstat |= BGS_APPTAG_ERR_MASK;
2960 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
2961 bgstat |= BGS_REFTAG_ERR_MASK;
2962
2963 /* Check to see if there was any good data before the error */
2964 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2965 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2966 bghm = wcqe->total_data_placed;
2967 }
2968
2969 /*
2970 * Set ALL the error bits to indicate we don't know what
2971 * type of error it is.
2972 */
2973 if (!bgstat)
2974 bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
2975 BGS_GUARD_ERR_MASK);
2976 }
2977
2978 if (lpfc_bgs_get_guard_err(bgstat)) {
2979 ret = 1;
2980
f2b1e9c6
HR
2981 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2982 set_host_byte(cmd, DID_ABORT);
96e209be
JS
2983 phba->bg_guard_err_cnt++;
2984 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2985 "9059 BLKGRD: Guard Tag error in cmd"
2986 " 0x%x lba 0x%llx blk cnt 0x%x "
2987 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2988 (unsigned long long)scsi_get_lba(cmd),
125c12f7 2989 scsi_logical_block_count(cmd), bgstat, bghm);
96e209be
JS
2990 }
2991
2992 if (lpfc_bgs_get_reftag_err(bgstat)) {
2993 ret = 1;
2994
f2b1e9c6
HR
2995 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2996 set_host_byte(cmd, DID_ABORT);
96e209be
JS
2997
2998 phba->bg_reftag_err_cnt++;
2999 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3000 "9060 BLKGRD: Ref Tag error in cmd"
3001 " 0x%x lba 0x%llx blk cnt 0x%x "
3002 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3003 (unsigned long long)scsi_get_lba(cmd),
125c12f7 3004 scsi_logical_block_count(cmd), bgstat, bghm);
96e209be
JS
3005 }
3006
3007 if (lpfc_bgs_get_apptag_err(bgstat)) {
3008 ret = 1;
3009
f2b1e9c6
HR
3010 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3011 set_host_byte(cmd, DID_ABORT);
96e209be
JS
3012
3013 phba->bg_apptag_err_cnt++;
3014 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3015 "9062 BLKGRD: App Tag error in cmd"
3016 " 0x%x lba 0x%llx blk cnt 0x%x "
3017 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3018 (unsigned long long)scsi_get_lba(cmd),
125c12f7 3019 scsi_logical_block_count(cmd), bgstat, bghm);
96e209be
JS
3020 }
3021
3022 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3023 /*
3024 * setup sense data descriptor 0 per SPC-4 as an information
3025 * field, and put the failing LBA in it.
3026 * This code assumes there was also a guard/app/ref tag error
3027 * indication.
3028 */
3029 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
3030 cmd->sense_buffer[8] = 0; /* Information descriptor type */
3031 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3032 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3033
3034 /* bghm is a "on the wire" FC frame based count */
3035 switch (scsi_get_prot_op(cmd)) {
3036 case SCSI_PROT_READ_INSERT:
3037 case SCSI_PROT_WRITE_STRIP:
3038 bghm /= cmd->device->sector_size;
3039 break;
3040 case SCSI_PROT_READ_STRIP:
3041 case SCSI_PROT_WRITE_INSERT:
3042 case SCSI_PROT_READ_PASS:
3043 case SCSI_PROT_WRITE_PASS:
3044 bghm /= (cmd->device->sector_size +
3045 sizeof(struct scsi_dif_tuple));
3046 break;
3047 }
3048
3049 failing_sector = scsi_get_lba(cmd);
3050 failing_sector += bghm;
3051
3052 /* Descriptor Information */
3053 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3054 }
3055
3056 if (!ret) {
3057 /* No error was reported - problem in FW? */
3058 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3059 "9068 BLKGRD: Unknown error in cmd"
3060 " 0x%x lba 0x%llx blk cnt 0x%x "
3061 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3062 (unsigned long long)scsi_get_lba(cmd),
125c12f7 3063 scsi_logical_block_count(cmd), bgstat, bghm);
96e209be 3064
125c12f7 3065 /* Calculate what type of error it was */
96e209be
JS
3066 lpfc_calc_bg_err(phba, lpfc_cmd);
3067 }
3068 return ret;
3069}
737d4248 3070
e2a0a9d6
JS
3071/*
3072 * This function checks for BlockGuard errors detected by
3073 * the HBA. In case of errors, the ASC/ASCQ fields in the
3074 * sense buffer will be set accordingly, paired with
3075 * ILLEGAL_REQUEST to signal to the kernel that the HBA
3076 * detected corruption.
3077 *
3078 * Returns:
3079 * 0 - No error found
3080 * 1 - BlockGuard error found
3081 * -1 - Internal error (bad profile, ...etc)
3082 */
3083static int
c490850a
JS
3084lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
3085 struct lpfc_iocbq *pIocbOut)
e2a0a9d6
JS
3086{
3087 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3088 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3089 int ret = 0;
3090 uint32_t bghm = bgf->bghm;
3091 uint32_t bgstat = bgf->bgstat;
3092 uint64_t failing_sector = 0;
3093
e2a0a9d6 3094 if (lpfc_bgs_get_invalid_prof(bgstat)) {
c6668cae 3095 cmd->result = DID_ERROR << 16;
737d4248 3096 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
68a6a66c
JS
3097 "9072 BLKGRD: Invalid BG Profile in cmd "
3098 "0x%x reftag 0x%x blk cnt 0x%x "
737d4248 3099 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
125c12f7
MP
3100 scsi_prot_ref_tag(cmd),
3101 scsi_logical_block_count(cmd), bgstat, bghm);
e2a0a9d6
JS
3102 ret = (-1);
3103 goto out;
3104 }
3105
3106 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
c6668cae 3107 cmd->result = DID_ERROR << 16;
737d4248 3108 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
68a6a66c
JS
3109 "9073 BLKGRD: Invalid BG PDIF Block in cmd "
3110 "0x%x reftag 0x%x blk cnt 0x%x "
737d4248 3111 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
125c12f7
MP
3112 scsi_prot_ref_tag(cmd),
3113 scsi_logical_block_count(cmd), bgstat, bghm);
e2a0a9d6
JS
3114 ret = (-1);
3115 goto out;
3116 }
3117
3118 if (lpfc_bgs_get_guard_err(bgstat)) {
3119 ret = 1;
3120
f2b1e9c6
HR
3121 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
3122 set_host_byte(cmd, DID_ABORT);
e2a0a9d6 3123 phba->bg_guard_err_cnt++;
737d4248 3124 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
68a6a66c
JS
3125 "9055 BLKGRD: Guard Tag error in cmd "
3126 "0x%x reftag 0x%x blk cnt 0x%x "
737d4248 3127 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
125c12f7
MP
3128 scsi_prot_ref_tag(cmd),
3129 scsi_logical_block_count(cmd), bgstat, bghm);
e2a0a9d6
JS
3130 }
3131
3132 if (lpfc_bgs_get_reftag_err(bgstat)) {
3133 ret = 1;
3134
f2b1e9c6
HR
3135 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3136 set_host_byte(cmd, DID_ABORT);
e2a0a9d6
JS
3137
3138 phba->bg_reftag_err_cnt++;
737d4248 3139 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
68a6a66c
JS
3140 "9056 BLKGRD: Ref Tag error in cmd "
3141 "0x%x reftag 0x%x blk cnt 0x%x "
737d4248 3142 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
125c12f7
MP
3143 scsi_prot_ref_tag(cmd),
3144 scsi_logical_block_count(cmd), bgstat, bghm);
e2a0a9d6
JS
3145 }
3146
3147 if (lpfc_bgs_get_apptag_err(bgstat)) {
3148 ret = 1;
3149
f2b1e9c6
HR
3150 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3151 set_host_byte(cmd, DID_ABORT);
e2a0a9d6
JS
3152
3153 phba->bg_apptag_err_cnt++;
737d4248 3154 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
68a6a66c
JS
3155 "9061 BLKGRD: App Tag error in cmd "
3156 "0x%x reftag 0x%x blk cnt 0x%x "
737d4248 3157 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
125c12f7
MP
3158 scsi_prot_ref_tag(cmd),
3159 scsi_logical_block_count(cmd), bgstat, bghm);
e2a0a9d6
JS
3160 }
3161
3162 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3163 /*
3164 * setup sense data descriptor 0 per SPC-4 as an information
7c56b9fd
JS
3165 * field, and put the failing LBA in it.
3166 * This code assumes there was also a guard/app/ref tag error
3167 * indication.
e2a0a9d6 3168 */
7c56b9fd
JS
3169 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
3170 cmd->sense_buffer[8] = 0; /* Information descriptor type */
3171 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3172 cmd->sense_buffer[10] = 0x80; /* Validity bit */
acd6859b
JS
3173
3174 /* bghm is a "on the wire" FC frame based count */
3175 switch (scsi_get_prot_op(cmd)) {
3176 case SCSI_PROT_READ_INSERT:
3177 case SCSI_PROT_WRITE_STRIP:
3178 bghm /= cmd->device->sector_size;
3179 break;
3180 case SCSI_PROT_READ_STRIP:
3181 case SCSI_PROT_WRITE_INSERT:
3182 case SCSI_PROT_READ_PASS:
3183 case SCSI_PROT_WRITE_PASS:
3184 bghm /= (cmd->device->sector_size +
3185 sizeof(struct scsi_dif_tuple));
3186 break;
3187 }
e2a0a9d6
JS
3188
3189 failing_sector = scsi_get_lba(cmd);
3190 failing_sector += bghm;
3191
7c56b9fd
JS
3192 /* Descriptor Information */
3193 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
e2a0a9d6
JS
3194 }
3195
3196 if (!ret) {
3197 /* No error was reported - problem in FW? */
737d4248 3198 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
68a6a66c
JS
3199 "9057 BLKGRD: Unknown error in cmd "
3200 "0x%x reftag 0x%x blk cnt 0x%x "
737d4248 3201 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
125c12f7
MP
3202 scsi_prot_ref_tag(cmd),
3203 scsi_logical_block_count(cmd), bgstat, bghm);
737d4248 3204
125c12f7 3205 /* Calculate what type of error it was */
737d4248 3206 lpfc_calc_bg_err(phba, lpfc_cmd);
e2a0a9d6 3207 }
e2a0a9d6
JS
3208out:
3209 return ret;
dea3101e
JB
3210}
3211
da0436e9
JS
3212/**
3213 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3214 * @phba: The Hba for which this call is being executed.
3215 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3216 *
3217 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3218 * field of @lpfc_cmd for device with SLI-4 interface spec.
3219 *
3220 * Return codes:
5e0e2318
JS
3221 * 2 - Error - Do not retry
3222 * 1 - Error - Retry
6c8eea54 3223 * 0 - Success
da0436e9
JS
3224 **/
3225static int
c490850a 3226lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
da0436e9
JS
3227{
3228 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3229 struct scatterlist *sgel = NULL;
3230 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
0794d601 3231 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
fedd3b7b 3232 struct sli4_sge *first_data_sgl;
da255e2e
JS
3233 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3234 struct lpfc_vport *vport = phba->pport;
3235 union lpfc_wqe128 *wqe = &pwqeq->wqe;
da0436e9 3236 dma_addr_t physaddr;
da0436e9
JS
3237 uint32_t dma_len;
3238 uint32_t dma_offset = 0;
d79c9e9d 3239 int nseg, i, j;
fedd3b7b 3240 struct ulp_bde64 *bde;
d79c9e9d
JS
3241 bool lsp_just_set = false;
3242 struct sli4_hybrid_sgl *sgl_xtra = NULL;
da0436e9
JS
3243
3244 /*
3245 * There are three possibilities here - use scatter-gather segment, use
3246 * the single mapping, or neither. Start the lpfc command prep by
3247 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3248 * data bde entry.
3249 */
3250 if (scsi_sg_count(scsi_cmnd)) {
3251 /*
3252 * The driver stores the segment count returned from pci_map_sg
3253 * because this a count of dma-mappings used to map the use_sg
3254 * pages. They are not guaranteed to be the same for those
3255 * architectures that implement an IOMMU.
3256 */
3257
3258 nseg = scsi_dma_map(scsi_cmnd);
5116fbf1 3259 if (unlikely(nseg <= 0))
da0436e9
JS
3260 return 1;
3261 sgl += 1;
3262 /* clear the last flag in the fcp_rsp map entry */
3263 sgl->word2 = le32_to_cpu(sgl->word2);
3264 bf_set(lpfc_sli4_sge_last, sgl, 0);
3265 sgl->word2 = cpu_to_le32(sgl->word2);
3266 sgl += 1;
fedd3b7b 3267 first_data_sgl = sgl;
da0436e9 3268 lpfc_cmd->seg_cnt = nseg;
d79c9e9d
JS
3269 if (!phba->cfg_xpsgl &&
3270 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
372c187b
DK
3271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3272 "9074 BLKGRD:"
3273 " %s: Too many sg segments from "
3274 "dma_map_sg. Config %d, seg_cnt %d\n",
3275 __func__, phba->cfg_sg_seg_cnt,
3276 lpfc_cmd->seg_cnt);
5e0e2318 3277 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
96f7077f 3278 lpfc_cmd->seg_cnt = 0;
da0436e9 3279 scsi_dma_unmap(scsi_cmnd);
5e0e2318 3280 return 2;
da0436e9
JS
3281 }
3282
3283 /*
3284 * The driver established a maximum scatter-gather segment count
3285 * during probe that limits the number of sg elements in any
3286 * single scsi command. Just run through the seg_cnt and format
3287 * the sge's.
3288 * When using SLI-3 the driver will try to fit all the BDEs into
3289 * the IOCB. If it can't then the BDEs get added to a BPL as it
3290 * does for SLI-2 mode.
3291 */
d79c9e9d
JS
3292
3293 /* for tracking segment boundaries */
3294 sgel = scsi_sglist(scsi_cmnd);
3295 j = 2;
3296 for (i = 0; i < nseg; i++) {
3297 sgl->word2 = 0;
838220fb 3298 if (nseg == 1) {
da0436e9 3299 bf_set(lpfc_sli4_sge_last, sgl, 1);
d79c9e9d
JS
3300 bf_set(lpfc_sli4_sge_type, sgl,
3301 LPFC_SGE_TYPE_DATA);
3302 } else {
da0436e9 3303 bf_set(lpfc_sli4_sge_last, sgl, 0);
d79c9e9d
JS
3304
3305 /* do we need to expand the segment */
3306 if (!lsp_just_set &&
3307 !((j + 1) % phba->border_sge_num) &&
3308 ((nseg - 1) != i)) {
3309 /* set LSP type */
3310 bf_set(lpfc_sli4_sge_type, sgl,
3311 LPFC_SGE_TYPE_LSP);
3312
3313 sgl_xtra = lpfc_get_sgl_per_hdwq(
3314 phba, lpfc_cmd);
3315
3316 if (unlikely(!sgl_xtra)) {
3317 lpfc_cmd->seg_cnt = 0;
3318 scsi_dma_unmap(scsi_cmnd);
3319 return 1;
3320 }
3321 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3322 sgl_xtra->dma_phys_sgl));
3323 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3324 sgl_xtra->dma_phys_sgl));
3325
3326 } else {
3327 bf_set(lpfc_sli4_sge_type, sgl,
3328 LPFC_SGE_TYPE_DATA);
3329 }
3330 }
3331
3332 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3333 LPFC_SGE_TYPE_LSP)) {
3334 if ((nseg - 1) == i)
3335 bf_set(lpfc_sli4_sge_last, sgl, 1);
3336
3337 physaddr = sg_dma_address(sgel);
3338 dma_len = sg_dma_len(sgel);
3339 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3340 physaddr));
3341 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3342 physaddr));
3343
3344 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3345 sgl->word2 = cpu_to_le32(sgl->word2);
3346 sgl->sge_len = cpu_to_le32(dma_len);
3347
3348 dma_offset += dma_len;
3349 sgel = sg_next(sgel);
3350
3351 sgl++;
3352 lsp_just_set = false;
3353
3354 } else {
3355 sgl->word2 = cpu_to_le32(sgl->word2);
3356 sgl->sge_len = cpu_to_le32(
3357 phba->cfg_sg_dma_buf_size);
3358
3359 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3360 i = i - 1;
3361
3362 lsp_just_set = true;
3363 }
3364
3365 j++;
da0436e9 3366 }
838220fb
JS
3367
3368 /* PBDE support for first data SGE only.
3369 * For FCoE, we key off Performance Hints.
3370 * For FC, we key off lpfc_enable_pbde.
0bc2b7c5 3371 */
838220fb
JS
3372 if (nseg == 1 &&
3373 ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3374 phba->cfg_enable_pbde)) {
3375 /* Words 13-15 */
fedd3b7b 3376 bde = (struct ulp_bde64 *)
da255e2e 3377 &wqe->words[13];
fedd3b7b
JS
3378 bde->addrLow = first_data_sgl->addr_lo;
3379 bde->addrHigh = first_data_sgl->addr_hi;
3380 bde->tus.f.bdeSize =
3381 le32_to_cpu(first_data_sgl->sge_len);
3382 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3383 bde->tus.w = cpu_to_le32(bde->tus.w);
da255e2e 3384
838220fb
JS
3385 /* Word 11 - set PBDE bit */
3386 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
da255e2e
JS
3387 } else {
3388 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
838220fb 3389 /* Word 11 - PBDE bit disabled by default template */
fedd3b7b 3390 }
da0436e9
JS
3391 } else {
3392 sgl += 1;
838220fb 3393 /* set the last flag in the fcp_rsp map entry */
da0436e9
JS
3394 sgl->word2 = le32_to_cpu(sgl->word2);
3395 bf_set(lpfc_sli4_sge_last, sgl, 1);
3396 sgl->word2 = cpu_to_le32(sgl->word2);
414abe0a
JS
3397
3398 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3399 phba->cfg_enable_pbde) {
3400 bde = (struct ulp_bde64 *)
da255e2e 3401 &wqe->words[13];
414abe0a
JS
3402 memset(bde, 0, (sizeof(uint32_t) * 3));
3403 }
da0436e9 3404 }
da255e2e 3405
da0436e9
JS
3406 /*
3407 * Finish initializing those IOCB fields that are dependent on the
3408 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3409 * explicitly reinitialized.
3410 * all iocb memory resources are reused.
3411 */
3412 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
da255e2e
JS
3413 /* Set first-burst provided it was successfully negotiated */
3414 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3415 vport->cfg_first_burst_size &&
3416 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3417 u32 init_len, total_len;
3418
3419 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3420 init_len = min(total_len, vport->cfg_first_burst_size);
3421
3422 /* Word 4 & 5 */
3423 wqe->fcp_iwrite.initial_xfer_len = init_len;
3424 wqe->fcp_iwrite.total_xfer_len = total_len;
3425 } else {
3426 /* Word 4 */
3427 wqe->fcp_iwrite.total_xfer_len =
3428 be32_to_cpu(fcp_cmnd->fcpDl);
3429 }
1ba981fd
JS
3430
3431 /*
3432 * If the OAS driver feature is enabled and the lun is enabled for
3433 * OAS, set the oas iocb related flags.
3434 */
f38fa0bb 3435 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
c92c841c 3436 scsi_cmnd->device->hostdata)->oas_enabled) {
9bd2bff5 3437 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
c92c841c
JS
3438 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3439 scsi_cmnd->device->hostdata)->priority;
da255e2e
JS
3440
3441 /* Word 10 */
3442 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3443 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3444
3445 if (lpfc_cmd->cur_iocbq.priority)
3446 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3447 (lpfc_cmd->cur_iocbq.priority << 1));
3448 else
3449 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3450 (phba->cfg_XLanePriority << 1));
c92c841c 3451 }
c490850a 3452
da0436e9
JS
3453 return 0;
3454}
3455
acd6859b
JS
3456/**
3457 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3458 * @phba: The Hba for which this call is being executed.
3459 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3460 *
3461 * This is the protection/DIF aware version of
3462 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3463 * two functions eventually, but for now, it's here
5e0e2318
JS
3464 * Return codes:
3465 * 2 - Error - Do not retry
3466 * 1 - Error - Retry
3467 * 0 - Success
acd6859b
JS
3468 **/
3469static int
3470lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
c490850a 3471 struct lpfc_io_buf *lpfc_cmd)
acd6859b
JS
3472{
3473 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3474 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
0794d601 3475 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
da255e2e
JS
3476 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3477 union lpfc_wqe128 *wqe = &pwqeq->wqe;
96f7077f 3478 uint32_t num_sge = 0;
acd6859b
JS
3479 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3480 int prot_group_type = 0;
3481 int fcpdl;
5e0e2318 3482 int ret = 1;
7c4042a4 3483 struct lpfc_vport *vport = phba->pport;
acd6859b
JS
3484
3485 /*
3486 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
96f7077f 3487 * fcp_rsp regions to the first data sge entry
acd6859b
JS
3488 */
3489 if (scsi_sg_count(scsi_cmnd)) {
3490 /*
3491 * The driver stores the segment count returned from pci_map_sg
3492 * because this a count of dma-mappings used to map the use_sg
3493 * pages. They are not guaranteed to be the same for those
3494 * architectures that implement an IOMMU.
3495 */
3496 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3497 scsi_sglist(scsi_cmnd),
3498 scsi_sg_count(scsi_cmnd), datadir);
3499 if (unlikely(!datasegcnt))
3500 return 1;
3501
3502 sgl += 1;
3503 /* clear the last flag in the fcp_rsp map entry */
3504 sgl->word2 = le32_to_cpu(sgl->word2);
3505 bf_set(lpfc_sli4_sge_last, sgl, 0);
3506 sgl->word2 = cpu_to_le32(sgl->word2);
3507
3508 sgl += 1;
3509 lpfc_cmd->seg_cnt = datasegcnt;
96f7077f
JS
3510
3511 /* First check if data segment count from SCSI Layer is good */
d79c9e9d
JS
3512 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3513 !phba->cfg_xpsgl) {
5e0e2318
JS
3514 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3515 ret = 2;
96f7077f 3516 goto err;
5e0e2318 3517 }
acd6859b
JS
3518
3519 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3520
3521 switch (prot_group_type) {
3522 case LPFC_PG_TYPE_NO_DIF:
96f7077f 3523 /* Here we need to add a DISEED to the count */
d79c9e9d
JS
3524 if (((lpfc_cmd->seg_cnt + 1) >
3525 phba->cfg_total_seg_cnt) &&
3526 !phba->cfg_xpsgl) {
5e0e2318 3527 ret = 2;
96f7077f 3528 goto err;
5e0e2318 3529 }
96f7077f
JS
3530
3531 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
d79c9e9d 3532 datasegcnt, lpfc_cmd);
96f7077f 3533
acd6859b 3534 /* we should have 2 or more entries in buffer list */
5e0e2318
JS
3535 if (num_sge < 2) {
3536 ret = 2;
acd6859b 3537 goto err;
5e0e2318 3538 }
acd6859b 3539 break;
96f7077f
JS
3540
3541 case LPFC_PG_TYPE_DIF_BUF:
acd6859b
JS
3542 /*
3543 * This type indicates that protection buffers are
3544 * passed to the driver, so that needs to be prepared
3545 * for DMA
3546 */
3547 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3548 scsi_prot_sglist(scsi_cmnd),
3549 scsi_prot_sg_count(scsi_cmnd), datadir);
3550 if (unlikely(!protsegcnt)) {
3551 scsi_dma_unmap(scsi_cmnd);
3552 return 1;
3553 }
3554
3555 lpfc_cmd->prot_seg_cnt = protsegcnt;
96f7077f
JS
3556 /*
3557 * There is a minimun of 3 SGEs used for every
3558 * protection data segment.
3559 */
d79c9e9d
JS
3560 if (((lpfc_cmd->prot_seg_cnt * 3) >
3561 (phba->cfg_total_seg_cnt - 2)) &&
3562 !phba->cfg_xpsgl) {
5e0e2318 3563 ret = 2;
96f7077f 3564 goto err;
5e0e2318 3565 }
acd6859b 3566
96f7077f 3567 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
d79c9e9d 3568 datasegcnt, protsegcnt, lpfc_cmd);
96f7077f 3569
acd6859b 3570 /* we should have 3 or more entries in buffer list */
d79c9e9d
JS
3571 if (num_sge < 3 ||
3572 (num_sge > phba->cfg_total_seg_cnt &&
3573 !phba->cfg_xpsgl)) {
5e0e2318 3574 ret = 2;
acd6859b 3575 goto err;
5e0e2318 3576 }
acd6859b 3577 break;
96f7077f 3578
acd6859b
JS
3579 case LPFC_PG_TYPE_INVALID:
3580 default:
96f7077f
JS
3581 scsi_dma_unmap(scsi_cmnd);
3582 lpfc_cmd->seg_cnt = 0;
3583
372c187b 3584 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
acd6859b
JS
3585 "9083 Unexpected protection group %i\n",
3586 prot_group_type);
5e0e2318 3587 return 2;
acd6859b
JS
3588 }
3589 }
3590
8012cc38
JS
3591 switch (scsi_get_prot_op(scsi_cmnd)) {
3592 case SCSI_PROT_WRITE_STRIP:
3593 case SCSI_PROT_READ_STRIP:
3594 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3595 break;
3596 case SCSI_PROT_WRITE_INSERT:
3597 case SCSI_PROT_READ_INSERT:
3598 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3599 break;
3600 case SCSI_PROT_WRITE_PASS:
3601 case SCSI_PROT_READ_PASS:
3602 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3603 break;
3604 }
3605
acd6859b 3606 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
acd6859b
JS
3607 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3608
da255e2e
JS
3609 /* Set first-burst provided it was successfully negotiated */
3610 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3611 vport->cfg_first_burst_size &&
3612 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3613 u32 init_len, total_len;
acd6859b 3614
da255e2e
JS
3615 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3616 init_len = min(total_len, vport->cfg_first_burst_size);
3617
3618 /* Word 4 & 5 */
3619 wqe->fcp_iwrite.initial_xfer_len = init_len;
3620 wqe->fcp_iwrite.total_xfer_len = total_len;
3621 } else {
3622 /* Word 4 */
3623 wqe->fcp_iwrite.total_xfer_len =
3624 be32_to_cpu(fcp_cmnd->fcpDl);
3625 }
7c4042a4 3626
9bd2bff5
JS
3627 /*
3628 * If the OAS driver feature is enabled and the lun is enabled for
3629 * OAS, set the oas iocb related flags.
3630 */
3631 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
da255e2e 3632 scsi_cmnd->device->hostdata)->oas_enabled) {
9bd2bff5
JS
3633 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3634
da255e2e
JS
3635 /* Word 10 */
3636 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3637 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3638 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3639 (phba->cfg_XLanePriority << 1));
3640 }
3641
3642 /* Word 7. DIF Flags */
3643 if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
3644 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3645 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
3646 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3647 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
3648 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3649
3650 lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
3651 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3652
acd6859b
JS
3653 return 0;
3654err:
96f7077f
JS
3655 if (lpfc_cmd->seg_cnt)
3656 scsi_dma_unmap(scsi_cmnd);
3657 if (lpfc_cmd->prot_seg_cnt)
3658 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3659 scsi_prot_sg_count(scsi_cmnd),
3660 scsi_cmnd->sc_data_direction);
3661
372c187b 3662 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
96f7077f
JS
3663 "9084 Cannot setup S/G List for HBA"
3664 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3665 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3666 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3667 prot_group_type, num_sge);
3668
3669 lpfc_cmd->seg_cnt = 0;
3670 lpfc_cmd->prot_seg_cnt = 0;
5e0e2318 3671 return ret;
acd6859b
JS
3672}
3673
3772a991
JS
3674/**
3675 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3676 * @phba: The Hba for which this call is being executed.
3677 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3678 *
3679 * This routine wraps the actual DMA mapping function pointer from the
3680 * lpfc_hba struct.
3681 *
3682 * Return codes:
6c8eea54
JS
3683 * 1 - Error
3684 * 0 - Success
3772a991
JS
3685 **/
3686static inline int
c490850a 3687lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3772a991
JS
3688{
3689 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3690}
3691
acd6859b
JS
3692/**
3693 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3694 * using BlockGuard.
3695 * @phba: The Hba for which this call is being executed.
3696 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3697 *
3698 * This routine wraps the actual DMA mapping function pointer from the
3699 * lpfc_hba struct.
3700 *
3701 * Return codes:
3702 * 1 - Error
3703 * 0 - Success
3704 **/
3705static inline int
c490850a 3706lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
acd6859b
JS
3707{
3708 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3709}
3710
da255e2e
JS
3711/**
3712 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3713 * buffer
0bb87e01 3714 * @vport: Pointer to vport object.
da255e2e
JS
3715 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3716 * @tmo: Timeout value for IO
3717 *
3718 * This routine initializes IOCB/WQE data structure from scsi command
3719 *
3720 * Return codes:
3721 * 1 - Error
3722 * 0 - Success
3723 **/
3724static inline int
3725lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3726 uint8_t tmo)
3727{
3728 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3729}
3730
ea2151b4 3731/**
3621a710 3732 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
ea2151b4
JS
3733 * @phba: Pointer to hba context object.
3734 * @vport: Pointer to vport object.
3735 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
0bb87e01 3736 * @fcpi_parm: FCP Initiator parameter.
ea2151b4
JS
3737 *
3738 * This function posts an event when there is a SCSI command reporting
3739 * error from the scsi device.
3740 **/
3741static void
3742lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
96e209be 3743 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
ea2151b4
JS
3744 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3745 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3746 uint32_t resp_info = fcprsp->rspStatus2;
3747 uint32_t scsi_status = fcprsp->rspStatus3;
ea2151b4
JS
3748 struct lpfc_fast_path_event *fast_path_evt = NULL;
3749 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3750 unsigned long flags;
3751
307e3380 3752 if (!pnode)
5989b8d4
JS
3753 return;
3754
ea2151b4
JS
3755 /* If there is queuefull or busy condition send a scsi event */
3756 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3757 (cmnd->result == SAM_STAT_BUSY)) {
3758 fast_path_evt = lpfc_alloc_fast_evt(phba);
3759 if (!fast_path_evt)
3760 return;
3761 fast_path_evt->un.scsi_evt.event_type =
3762 FC_REG_SCSI_EVENT;
3763 fast_path_evt->un.scsi_evt.subcategory =
3764 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3765 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3766 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3767 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3768 &pnode->nlp_portname, sizeof(struct lpfc_name));
3769 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3770 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3771 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3772 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3773 fast_path_evt = lpfc_alloc_fast_evt(phba);
3774 if (!fast_path_evt)
3775 return;
3776 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3777 FC_REG_SCSI_EVENT;
3778 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3779 LPFC_EVENT_CHECK_COND;
3780 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3781 cmnd->device->lun;
3782 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3783 &pnode->nlp_portname, sizeof(struct lpfc_name));
3784 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3785 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3786 fast_path_evt->un.check_cond_evt.sense_key =
3787 cmnd->sense_buffer[2] & 0xf;
3788 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3789 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3790 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3791 fcpi_parm &&
3792 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3793 ((scsi_status == SAM_STAT_GOOD) &&
3794 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3795 /*
3796 * If status is good or resid does not match with fcp_param and
3797 * there is valid fcpi_parm, then there is a read_check error
3798 */
3799 fast_path_evt = lpfc_alloc_fast_evt(phba);
3800 if (!fast_path_evt)
3801 return;
3802 fast_path_evt->un.read_check_error.header.event_type =
3803 FC_REG_FABRIC_EVENT;
3804 fast_path_evt->un.read_check_error.header.subcategory =
3805 LPFC_EVENT_FCPRDCHKERR;
3806 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3807 &pnode->nlp_portname, sizeof(struct lpfc_name));
3808 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3809 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3810 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3811 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3812 fast_path_evt->un.read_check_error.fcpiparam =
3813 fcpi_parm;
3814 } else
3815 return;
3816
3817 fast_path_evt->vport = vport;
3818 spin_lock_irqsave(&phba->hbalock, flags);
3819 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3820 spin_unlock_irqrestore(&phba->hbalock, flags);
3821 lpfc_worker_wake_up(phba);
3822 return;
3823}
9bad7671
JS
3824
3825/**
f1126688 3826 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3772a991 3827 * @phba: The HBA for which this call is being executed.
9bad7671
JS
3828 * @psb: The scsi buffer which is going to be un-mapped.
3829 *
3830 * This routine does DMA un-mapping of scatter gather list of scsi command
3772a991 3831 * field of @lpfc_cmd for device with SLI-3 interface spec.
9bad7671 3832 **/
bcf4dbfa 3833static void
c490850a 3834lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
bcf4dbfa
JS
3835{
3836 /*
3837 * There are only two special cases to consider. (1) the scsi command
3838 * requested scatter-gather usage or (2) the scsi command allocated
3839 * a request buffer, but did not request use_sg. There is a third
3840 * case, but it does not require resource deallocation.
3841 */
a0b4f78f
FT
3842 if (psb->seg_cnt > 0)
3843 scsi_dma_unmap(psb->pCmd);
e2a0a9d6
JS
3844 if (psb->prot_seg_cnt > 0)
3845 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3846 scsi_prot_sg_count(psb->pCmd),
3847 psb->pCmd->sc_data_direction);
bcf4dbfa
JS
3848}
3849
02243836
JS
3850/**
3851 * lpfc_unblock_requests - allow further commands to be queued.
3852 * @phba: pointer to phba object
3853 *
3854 * For single vport, just call scsi_unblock_requests on physical port.
3855 * For multiple vports, send scsi_unblock_requests for all the vports.
3856 */
3857void
3858lpfc_unblock_requests(struct lpfc_hba *phba)
3859{
3860 struct lpfc_vport **vports;
3861 struct Scsi_Host *shost;
3862 int i;
3863
3864 if (phba->sli_rev == LPFC_SLI_REV4 &&
3865 !phba->sli4_hba.max_cfg_param.vpi_used) {
3866 shost = lpfc_shost_from_vport(phba->pport);
3867 scsi_unblock_requests(shost);
3868 return;
3869 }
3870
3871 vports = lpfc_create_vport_work_array(phba);
3872 if (vports != NULL)
3873 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3874 shost = lpfc_shost_from_vport(vports[i]);
3875 scsi_unblock_requests(shost);
3876 }
3877 lpfc_destroy_vport_work_array(phba, vports);
3878}
3879
3880/**
3881 * lpfc_block_requests - prevent further commands from being queued.
3882 * @phba: pointer to phba object
3883 *
3884 * For single vport, just call scsi_block_requests on physical port.
3885 * For multiple vports, send scsi_block_requests for all the vports.
3886 */
3887void
3888lpfc_block_requests(struct lpfc_hba *phba)
3889{
3890 struct lpfc_vport **vports;
3891 struct Scsi_Host *shost;
3892 int i;
3893
3894 if (atomic_read(&phba->cmf_stop_io))
3895 return;
3896
3897 if (phba->sli_rev == LPFC_SLI_REV4 &&
3898 !phba->sli4_hba.max_cfg_param.vpi_used) {
3899 shost = lpfc_shost_from_vport(phba->pport);
3900 scsi_block_requests(shost);
3901 return;
3902 }
3903
3904 vports = lpfc_create_vport_work_array(phba);
3905 if (vports != NULL)
3906 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3907 shost = lpfc_shost_from_vport(vports[i]);
3908 scsi_block_requests(shost);
3909 }
3910 lpfc_destroy_vport_work_array(phba, vports);
3911}
3912
3913/**
3914 * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
3915 * @phba: The HBA for which this call is being executed.
3916 * @time: The latency of the IO that completed (in ns)
3917 * @size: The size of the IO that completed
3918 * @shost: SCSI host the IO completed on (NULL for a NVME IO)
3919 *
3920 * The routine adjusts the various Burst and Bandwidth counters used in
3921 * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
3922 * that means the IO was never issued to the HBA, so this routine is
3923 * just being called to cleanup the counter from a previous
3924 * lpfc_update_cmf_cmd call.
3925 */
3926int
3927lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
3928 uint64_t time, uint32_t size, struct Scsi_Host *shost)
3929{
3930 struct lpfc_cgn_stat *cgs;
3931
3932 if (time != LPFC_CGN_NOT_SENT) {
3933 /* lat is ns coming in, save latency in us */
3934 if (time < 1000)
3935 time = 1;
3936 else
3937 time = div_u64(time + 500, 1000); /* round it */
3938
3939 cgs = this_cpu_ptr(phba->cmf_stat);
3940 atomic64_add(size, &cgs->rcv_bytes);
3941 atomic64_add(time, &cgs->rx_latency);
3942 atomic_inc(&cgs->rx_io_cnt);
3943 }
3944 return 0;
3945}
3946
3947/**
3948 * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
3949 * @phba: The HBA for which this call is being executed.
3950 * @size: The size of the IO that will be issued
3951 *
3952 * The routine adjusts the various Burst and Bandwidth counters used in
3953 * Congestion management and E2E.
3954 */
3955int
3956lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
3957{
3958 uint64_t total;
3959 struct lpfc_cgn_stat *cgs;
3960 int cpu;
3961
3962 /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
8b0d2757
JS
3963 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
3964 phba->cmf_max_bytes_per_interval) {
02243836
JS
3965 total = 0;
3966 for_each_present_cpu(cpu) {
3967 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3968 total += atomic64_read(&cgs->total_bytes);
3969 }
3970 if (total >= phba->cmf_max_bytes_per_interval) {
3971 if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
3972 lpfc_block_requests(phba);
3973 phba->cmf_last_ts =
3974 lpfc_calc_cmf_latency(phba);
3975 }
3976 atomic_inc(&phba->cmf_busy);
3977 return -EBUSY;
3978 }
17b27ac5
JS
3979 if (size > atomic_read(&phba->rx_max_read_cnt))
3980 atomic_set(&phba->rx_max_read_cnt, size);
02243836
JS
3981 }
3982
3983 cgs = this_cpu_ptr(phba->cmf_stat);
3984 atomic64_add(size, &cgs->total_bytes);
3985 return 0;
3986}
3987
9bad7671 3988/**
0bb87e01 3989 * lpfc_handle_fcp_err - FCP response handler
9bad7671 3990 * @vport: The virtual port for which this call is being executed.
c490850a 3991 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
0bb87e01 3992 * @fcpi_parm: FCP Initiator parameter.
9bad7671
JS
3993 *
3994 * This routine is called to process response IOCB with status field
3995 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3996 * based upon SCSI and FCP error.
3997 **/
dea3101e 3998static void
c490850a 3999lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
96e209be 4000 uint32_t fcpi_parm)
dea3101e
JB
4001{
4002 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
4003 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
4004 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
dea3101e
JB
4005 uint32_t resp_info = fcprsp->rspStatus2;
4006 uint32_t scsi_status = fcprsp->rspStatus3;
c7743956 4007 uint32_t *lp;
dea3101e
JB
4008 uint32_t host_status = DID_OK;
4009 uint32_t rsplen = 0;
5afab6bb 4010 uint32_t fcpDl;
c7743956 4011 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
dea3101e 4012
ea2151b4 4013
dea3101e
JB
4014 /*
4015 * If this is a task management command, there is no
4016 * scsi packet associated with this lpfc_cmd. The driver
4017 * consumes it.
4018 */
4019 if (fcpcmd->fcpCntl2) {
4020 scsi_status = 0;
4021 goto out;
4022 }
4023
6a9c52cf
JS
4024 if (resp_info & RSP_LEN_VALID) {
4025 rsplen = be32_to_cpu(fcprsp->rspRspLen);
e40a02c1 4026 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
372c187b
DK
4027 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4028 "2719 Invalid response length: "
4029 "tgt x%x lun x%llx cmnd x%x rsplen "
4030 "x%x\n", cmnd->device->id,
4031 cmnd->device->lun, cmnd->cmnd[0],
4032 rsplen);
6a9c52cf
JS
4033 host_status = DID_ERROR;
4034 goto out;
4035 }
e40a02c1 4036 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
372c187b 4037 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
e40a02c1
JS
4038 "2757 Protocol failure detected during "
4039 "processing of FCP I/O op: "
9cb78c16 4040 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
e40a02c1
JS
4041 cmnd->device->id,
4042 cmnd->device->lun, cmnd->cmnd[0],
4043 fcprsp->rspInfo3);
4044 host_status = DID_ERROR;
4045 goto out;
4046 }
6a9c52cf
JS
4047 }
4048
c7743956
JS
4049 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
4050 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
4051 if (snslen > SCSI_SENSE_BUFFERSIZE)
4052 snslen = SCSI_SENSE_BUFFERSIZE;
4053
4054 if (resp_info & RSP_LEN_VALID)
4055 rsplen = be32_to_cpu(fcprsp->rspRspLen);
4056 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
4057 }
4058 lp = (uint32_t *)cmnd->sense_buffer;
4059
aa1c7ee7
JS
4060 /* special handling for under run conditions */
4061 if (!scsi_status && (resp_info & RESID_UNDER)) {
4062 /* don't log under runs if fcp set... */
4063 if (vport->cfg_log_verbose & LOG_FCP)
4064 logit = LOG_FCP_ERROR;
4065 /* unless operator says so */
4066 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
4067 logit = LOG_FCP_UNDER;
4068 }
c7743956 4069
e8b62011 4070 lpfc_printf_vlog(vport, KERN_WARNING, logit,
e2a0a9d6 4071 "9024 FCP command x%x failed: x%x SNS x%x x%x "
e8b62011
JS
4072 "Data: x%x x%x x%x x%x x%x\n",
4073 cmnd->cmnd[0], scsi_status,
4074 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
4075 be32_to_cpu(fcprsp->rspResId),
4076 be32_to_cpu(fcprsp->rspSnsLen),
4077 be32_to_cpu(fcprsp->rspRspLen),
4078 fcprsp->rspInfo3);
dea3101e 4079
a0b4f78f 4080 scsi_set_resid(cmnd, 0);
5afab6bb 4081 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
dea3101e 4082 if (resp_info & RESID_UNDER) {
a0b4f78f 4083 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
dea3101e 4084
73d91e50 4085 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
45634a86 4086 "9025 FCP Underrun, expected %d, "
e8b62011 4087 "residual %d Data: x%x x%x x%x\n",
5afab6bb 4088 fcpDl,
e8b62011
JS
4089 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
4090 cmnd->underflow);
dea3101e 4091
7054a606 4092 /*
45634a86 4093 * If there is an under run, check if under run reported by
7054a606
JS
4094 * storage array is same as the under run reported by HBA.
4095 * If this is not same, there is a dropped frame.
4096 */
45634a86 4097 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
e8b62011
JS
4098 lpfc_printf_vlog(vport, KERN_WARNING,
4099 LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 4100 "9026 FCP Read Check Error "
e8b62011 4101 "and Underrun Data: x%x x%x x%x x%x\n",
5afab6bb 4102 fcpDl,
e8b62011
JS
4103 scsi_get_resid(cmnd), fcpi_parm,
4104 cmnd->cmnd[0]);
a0b4f78f 4105 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
7054a606
JS
4106 host_status = DID_ERROR;
4107 }
dea3101e
JB
4108 /*
4109 * The cmnd->underflow is the minimum number of bytes that must
25985edc 4110 * be transferred for this command. Provided a sense condition
dea3101e
JB
4111 * is not present, make sure the actual amount transferred is at
4112 * least the underflow value or fail.
4113 */
4114 if (!(resp_info & SNS_LEN_VALID) &&
4115 (scsi_status == SAM_STAT_GOOD) &&
a0b4f78f
FT
4116 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
4117 < cmnd->underflow)) {
e8b62011 4118 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
e2a0a9d6 4119 "9027 FCP command x%x residual "
e8b62011
JS
4120 "underrun converted to error "
4121 "Data: x%x x%x x%x\n",
66dbfbe6 4122 cmnd->cmnd[0], scsi_bufflen(cmnd),
e8b62011 4123 scsi_get_resid(cmnd), cmnd->underflow);
dea3101e
JB
4124 host_status = DID_ERROR;
4125 }
4126 } else if (resp_info & RESID_OVER) {
e8b62011 4127 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
e2a0a9d6 4128 "9028 FCP command x%x residual overrun error. "
e4e74273 4129 "Data: x%x x%x\n", cmnd->cmnd[0],
e8b62011 4130 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
dea3101e
JB
4131 host_status = DID_ERROR;
4132
4133 /*
4134 * Check SLI validation that all the transfer was actually done
26373d23 4135 * (fcpi_parm should be zero). Apply check only to reads.
dea3101e 4136 */
5afab6bb 4137 } else if (fcpi_parm) {
e8b62011 4138 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
96e209be 4139 "9029 FCP %s Check Error Data: "
eee8877e 4140 "x%x x%x x%x x%x x%x\n",
5afab6bb
JS
4141 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
4142 "Read" : "Write"),
5afab6bb 4143 fcpDl, be32_to_cpu(fcprsp->rspResId),
eee8877e 4144 fcpi_parm, cmnd->cmnd[0], scsi_status);
5afab6bb
JS
4145
4146 /* There is some issue with the LPe12000 that causes it
4147 * to miscalculate the fcpi_parm and falsely trip this
4148 * recovery logic. Detect this case and don't error when true.
4149 */
4150 if (fcpi_parm > fcpDl)
4151 goto out;
4152
eee8877e
JS
4153 switch (scsi_status) {
4154 case SAM_STAT_GOOD:
4155 case SAM_STAT_CHECK_CONDITION:
4156 /* Fabric dropped a data frame. Fail any successful
4157 * command in which we detected dropped frames.
4158 * A status of good or some check conditions could
4159 * be considered a successful command.
4160 */
4161 host_status = DID_ERROR;
4162 break;
4163 }
a0b4f78f 4164 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
dea3101e
JB
4165 }
4166
4167 out:
c6668cae 4168 cmnd->result = host_status << 16 | scsi_status;
96e209be
JS
4169 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4170}
4171
4172/**
4173 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
4174 * @phba: The hba for which this call is being executed.
4175 * @pwqeIn: The command WQE for the scsi cmnd.
0bb87e01 4176 * @wcqe: Pointer to driver response CQE object.
96e209be
JS
4177 *
4178 * This routine assigns scsi command result by looking into response WQE
4179 * status field appropriately. This routine handles QUEUE FULL condition as
4180 * well by ramping down device queue depth.
4181 **/
4182static void
4183lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4184 struct lpfc_wcqe_complete *wcqe)
4185{
4186 struct lpfc_io_buf *lpfc_cmd =
4187 (struct lpfc_io_buf *)pwqeIn->context1;
4188 struct lpfc_vport *vport = pwqeIn->vport;
1e7dddb2
CIK
4189 struct lpfc_rport_data *rdata;
4190 struct lpfc_nodelist *ndlp;
96e209be
JS
4191 struct scsi_cmnd *cmd;
4192 unsigned long flags;
4193 struct lpfc_fast_path_event *fast_path_evt;
4194 struct Scsi_Host *shost;
4195 u32 logit = LOG_FCP;
4196 u32 status, idx;
4197 unsigned long iflags = 0;
02243836 4198 u32 lat;
3e49af93 4199 u8 wait_xb_clr = 0;
96e209be
JS
4200
4201 /* Sanity check on return of outstanding command */
4202 if (!lpfc_cmd) {
4203 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4204 "9032 Null lpfc_cmd pointer. No "
4205 "release, skip completion\n");
4206 return;
4207 }
4208
1e7dddb2
CIK
4209 rdata = lpfc_cmd->rdata;
4210 ndlp = rdata->pnode;
4211
96e209be
JS
4212 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4213 /* TOREMOVE - currently this flag is checked during
4214 * the release of lpfc_iocbq. Remove once we move
4215 * to lpfc_wqe_job construct.
4216 *
4217 * This needs to be done outside buf_lock
4218 */
4219 spin_lock_irqsave(&phba->hbalock, iflags);
4220 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
4221 spin_unlock_irqrestore(&phba->hbalock, iflags);
4222 }
4223
4224 /* Guard against abort handler being called at same time */
4225 spin_lock(&lpfc_cmd->buf_lock);
4226
4227 /* Sanity check on return of outstanding command */
4228 cmd = lpfc_cmd->pCmd;
ae960d78 4229 if (!cmd) {
96e209be
JS
4230 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4231 "9042 I/O completion: Not an active IO\n");
4232 spin_unlock(&lpfc_cmd->buf_lock);
4233 lpfc_release_scsi_buf(phba, lpfc_cmd);
4234 return;
4235 }
4236 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4237 if (phba->sli4_hba.hdwq)
4238 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4239
4240#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4241 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4242 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4243#endif
4244 shost = cmd->device->host;
4245
4246 status = bf_get(lpfc_wcqe_c_status, wcqe);
4247 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4248 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4249
4250 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
3e49af93 4251 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
96e209be 4252 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
3e49af93
JS
4253 if (phba->cfg_fcp_wait_abts_rsp)
4254 wait_xb_clr = 1;
4255 }
96e209be
JS
4256
4257#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4258 if (lpfc_cmd->prot_data_type) {
4259 struct scsi_dif_tuple *src = NULL;
4260
4261 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4262 /*
4263 * Used to restore any changes to protection
4264 * data for error injection.
4265 */
4266 switch (lpfc_cmd->prot_data_type) {
4267 case LPFC_INJERR_REFTAG:
4268 src->ref_tag =
4269 lpfc_cmd->prot_data;
4270 break;
4271 case LPFC_INJERR_APPTAG:
4272 src->app_tag =
4273 (uint16_t)lpfc_cmd->prot_data;
4274 break;
4275 case LPFC_INJERR_GUARD:
4276 src->guard_tag =
4277 (uint16_t)lpfc_cmd->prot_data;
4278 break;
4279 default:
4280 break;
4281 }
4282
4283 lpfc_cmd->prot_data = 0;
4284 lpfc_cmd->prot_data_type = 0;
4285 lpfc_cmd->prot_data_segment = NULL;
4286 }
4287#endif
4288 if (unlikely(lpfc_cmd->status)) {
4289 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4290 (lpfc_cmd->result & IOERR_DRVR_MASK))
4291 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4292 else if (lpfc_cmd->status >= IOSTAT_CNT)
4293 lpfc_cmd->status = IOSTAT_DEFAULT;
4294 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4295 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4296 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4297 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4298 logit = 0;
4299 else
4300 logit = LOG_FCP | LOG_FCP_UNDER;
4301 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4302 "9034 FCP cmd x%x failed <%d/%lld> "
4303 "status: x%x result: x%x "
4304 "sid: x%x did: x%x oxid: x%x "
4305 "Data: x%x x%x x%x\n",
4306 cmd->cmnd[0],
4307 cmd->device ? cmd->device->id : 0xffff,
4308 cmd->device ? cmd->device->lun : 0xffff,
4309 lpfc_cmd->status, lpfc_cmd->result,
4310 vport->fc_myDID,
4311 (ndlp) ? ndlp->nlp_DID : 0,
4312 lpfc_cmd->cur_iocbq.sli4_xritag,
4313 wcqe->parameter, wcqe->total_data_placed,
4314 lpfc_cmd->cur_iocbq.iotag);
4315 }
4316
4317 switch (lpfc_cmd->status) {
4318 case IOSTAT_SUCCESS:
4319 cmd->result = DID_OK << 16;
4320 break;
4321 case IOSTAT_FCP_RSP_ERROR:
4322 lpfc_handle_fcp_err(vport, lpfc_cmd,
4323 pwqeIn->wqe.fcp_iread.total_xfer_len -
4324 wcqe->total_data_placed);
4325 break;
4326 case IOSTAT_NPORT_BSY:
4327 case IOSTAT_FABRIC_BSY:
4328 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4329 fast_path_evt = lpfc_alloc_fast_evt(phba);
4330 if (!fast_path_evt)
4331 break;
4332 fast_path_evt->un.fabric_evt.event_type =
4333 FC_REG_FABRIC_EVENT;
4334 fast_path_evt->un.fabric_evt.subcategory =
4335 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4336 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4337 if (ndlp) {
4338 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4339 &ndlp->nlp_portname,
4340 sizeof(struct lpfc_name));
4341 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4342 &ndlp->nlp_nodename,
4343 sizeof(struct lpfc_name));
4344 }
4345 fast_path_evt->vport = vport;
4346 fast_path_evt->work_evt.evt =
4347 LPFC_EVT_FASTPATH_MGMT_EVT;
4348 spin_lock_irqsave(&phba->hbalock, flags);
4349 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4350 &phba->work_list);
4351 spin_unlock_irqrestore(&phba->hbalock, flags);
4352 lpfc_worker_wake_up(phba);
4353 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4354 "9035 Fabric/Node busy FCP cmd x%x failed"
4355 " <%d/%lld> "
4356 "status: x%x result: x%x "
4357 "sid: x%x did: x%x oxid: x%x "
4358 "Data: x%x x%x x%x\n",
4359 cmd->cmnd[0],
4360 cmd->device ? cmd->device->id : 0xffff,
4361 cmd->device ? cmd->device->lun : 0xffff,
4362 lpfc_cmd->status, lpfc_cmd->result,
4363 vport->fc_myDID,
4364 (ndlp) ? ndlp->nlp_DID : 0,
4365 lpfc_cmd->cur_iocbq.sli4_xritag,
4366 wcqe->parameter,
4367 wcqe->total_data_placed,
4368 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4369 break;
4370 case IOSTAT_REMOTE_STOP:
4371 if (ndlp) {
4372 /* This I/O was aborted by the target, we don't
4373 * know the rxid and because we did not send the
4374 * ABTS we cannot generate and RRQ.
4375 */
4376 lpfc_set_rrq_active(phba, ndlp,
4377 lpfc_cmd->cur_iocbq.sli4_lxritag,
4378 0, 0);
4379 }
4380 fallthrough;
4381 case IOSTAT_LOCAL_REJECT:
4382 if (lpfc_cmd->result & IOERR_DRVR_MASK)
4383 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4384 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4385 lpfc_cmd->result ==
4386 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4387 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4388 lpfc_cmd->result ==
4389 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4390 cmd->result = DID_NO_CONNECT << 16;
4391 break;
4392 }
4393 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4394 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4395 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
dc2dc273 4396 lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
96e209be
JS
4397 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4398 cmd->result = DID_REQUEUE << 16;
4399 break;
4400 }
4401 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4402 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4403 status == CQE_STATUS_DI_ERROR) {
4404 if (scsi_get_prot_op(cmd) !=
4405 SCSI_PROT_NORMAL) {
4406 /*
4407 * This is a response for a BG enabled
4408 * cmd. Parse BG error
4409 */
4410 lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
4411 wcqe);
4412 break;
4413 }
4414 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4415 "9040 non-zero BGSTAT on unprotected cmd\n");
4416 }
4417 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4418 "9036 Local Reject FCP cmd x%x failed"
4419 " <%d/%lld> "
4420 "status: x%x result: x%x "
4421 "sid: x%x did: x%x oxid: x%x "
4422 "Data: x%x x%x x%x\n",
4423 cmd->cmnd[0],
4424 cmd->device ? cmd->device->id : 0xffff,
4425 cmd->device ? cmd->device->lun : 0xffff,
4426 lpfc_cmd->status, lpfc_cmd->result,
4427 vport->fc_myDID,
4428 (ndlp) ? ndlp->nlp_DID : 0,
4429 lpfc_cmd->cur_iocbq.sli4_xritag,
4430 wcqe->parameter,
4431 wcqe->total_data_placed,
4432 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4433 fallthrough;
4434 default:
4435 if (lpfc_cmd->status >= IOSTAT_CNT)
4436 lpfc_cmd->status = IOSTAT_DEFAULT;
4437 cmd->result = DID_ERROR << 16;
4438 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4439 "9037 FCP Completion Error: xri %x "
4440 "status x%x result x%x [x%x] "
4441 "placed x%x\n",
4442 lpfc_cmd->cur_iocbq.sli4_xritag,
4443 lpfc_cmd->status, lpfc_cmd->result,
4444 wcqe->parameter,
4445 wcqe->total_data_placed);
4446 }
4447 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4448 u32 *lp = (u32 *)cmd->sense_buffer;
4449
4450 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
f1156125 4451 "9039 Iodone <%d/%llu> cmd x%px, error "
dc2dc273 4452 "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
96e209be 4453 cmd->device->id, cmd->device->lun, cmd,
dc2dc273
JS
4454 cmd->result, *lp, *(lp + 3),
4455 (u64)scsi_get_lba(cmd),
4456 cmd->retries, scsi_get_resid(cmd));
96e209be
JS
4457 }
4458
4459 lpfc_update_stats(vport, lpfc_cmd);
4460
4461 if (vport->cfg_max_scsicmpl_time &&
4462 time_after(jiffies, lpfc_cmd->start_time +
4463 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4464 spin_lock_irqsave(shost->host_lock, flags);
4465 if (ndlp) {
4466 if (ndlp->cmd_qdepth >
4467 atomic_read(&ndlp->cmd_pending) &&
4468 (atomic_read(&ndlp->cmd_pending) >
4469 LPFC_MIN_TGT_QDEPTH) &&
4470 (cmd->cmnd[0] == READ_10 ||
4471 cmd->cmnd[0] == WRITE_10))
4472 ndlp->cmd_qdepth =
4473 atomic_read(&ndlp->cmd_pending);
4474
4475 ndlp->last_change_time = jiffies;
4476 }
4477 spin_unlock_irqrestore(shost->host_lock, flags);
4478 }
4479 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4480
4481#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4482 if (lpfc_cmd->ts_cmd_start) {
4483 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4484 lpfc_cmd->ts_data_io = ktime_get_ns();
4485 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4486 lpfc_io_ktime(phba, lpfc_cmd);
4487 }
4488#endif
02243836
JS
4489 if (likely(!wait_xb_clr))
4490 lpfc_cmd->pCmd = NULL;
4491 spin_unlock(&lpfc_cmd->buf_lock);
4492
4493 /* Check if IO qualified for CMF */
4494 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
4495 cmd->sc_data_direction == DMA_FROM_DEVICE &&
4496 (scsi_sg_count(cmd))) {
4497 /* Used when calculating average latency */
4498 lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
4499 lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
4500 }
4501
3e49af93
JS
4502 if (wait_xb_clr)
4503 goto out;
96e209be
JS
4504
4505 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4506 cmd->scsi_done(cmd);
4507
4508 /*
4509 * If there is an abort thread waiting for command completion
4510 * wake up the thread.
4511 */
4512 spin_lock(&lpfc_cmd->buf_lock);
4513 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4514 if (lpfc_cmd->waitq)
4515 wake_up(lpfc_cmd->waitq);
4516 spin_unlock(&lpfc_cmd->buf_lock);
02243836 4517out:
96e209be 4518 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e
JB
4519}
4520
9bad7671 4521/**
3621a710 4522 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
9bad7671
JS
4523 * @phba: The Hba for which this call is being executed.
4524 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3772a991 4525 * @pIocbOut: The response IOCBQ for the scsi cmnd.
9bad7671
JS
4526 *
4527 * This routine assigns scsi command result by looking into response IOCB
4528 * status field appropriately. This routine handles QUEUE FULL condition as
4529 * well by ramping down device queue depth.
4530 **/
dea3101e
JB
4531static void
4532lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4533 struct lpfc_iocbq *pIocbOut)
4534{
c490850a
JS
4535 struct lpfc_io_buf *lpfc_cmd =
4536 (struct lpfc_io_buf *) pIocbIn->context1;
2e0fef85 4537 struct lpfc_vport *vport = pIocbIn->vport;
dea3101e
JB
4538 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4539 struct lpfc_nodelist *pnode = rdata->pnode;
75baf696 4540 struct scsi_cmnd *cmd;
fa61a54e 4541 unsigned long flags;
ea2151b4 4542 struct lpfc_fast_path_event *fast_path_evt;
75baf696 4543 struct Scsi_Host *shost;
4c47efc1 4544 int idx;
73d91e50 4545 uint32_t logit = LOG_FCP;
dea3101e 4546
c2017260
JS
4547 /* Guard against abort handler being called at same time */
4548 spin_lock(&lpfc_cmd->buf_lock);
4549
75baf696 4550 /* Sanity check on return of outstanding command */
75baf696 4551 cmd = lpfc_cmd->pCmd;
43bfea1b 4552 if (!cmd || !phba) {
372c187b 4553 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
c2017260
JS
4554 "2621 IO completion: Not an active IO\n");
4555 spin_unlock(&lpfc_cmd->buf_lock);
c90261dc 4556 return;
c2017260 4557 }
4c47efc1
JS
4558
4559 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4560 if (phba->sli4_hba.hdwq)
4561 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4562
6a828b0f 4563#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
840eda96
JS
4564 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4565 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
6a828b0f 4566#endif
75baf696
JS
4567 shost = cmd->device->host;
4568
e3d2b802 4569 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
dea3101e 4570 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3e49af93
JS
4571 /* pick up SLI4 exchange busy status from HBA */
4572 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
324e1c40
JS
4573 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
4574 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
341af102 4575
9a6b09c0
JS
4576#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4577 if (lpfc_cmd->prot_data_type) {
4578 struct scsi_dif_tuple *src = NULL;
4579
4580 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4581 /*
4582 * Used to restore any changes to protection
4583 * data for error injection.
4584 */
4585 switch (lpfc_cmd->prot_data_type) {
4586 case LPFC_INJERR_REFTAG:
4587 src->ref_tag =
4588 lpfc_cmd->prot_data;
4589 break;
4590 case LPFC_INJERR_APPTAG:
4591 src->app_tag =
4592 (uint16_t)lpfc_cmd->prot_data;
4593 break;
4594 case LPFC_INJERR_GUARD:
4595 src->guard_tag =
4596 (uint16_t)lpfc_cmd->prot_data;
4597 break;
4598 default:
4599 break;
4600 }
4601
4602 lpfc_cmd->prot_data = 0;
4603 lpfc_cmd->prot_data_type = 0;
4604 lpfc_cmd->prot_data_segment = NULL;
4605 }
4606#endif
2ea259ee 4607
22770cba 4608 if (unlikely(lpfc_cmd->status)) {
dea3101e
JB
4609 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4610 (lpfc_cmd->result & IOERR_DRVR_MASK))
4611 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4612 else if (lpfc_cmd->status >= IOSTAT_CNT)
4613 lpfc_cmd->status = IOSTAT_DEFAULT;
aa1c7ee7
JS
4614 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4615 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4616 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4617 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
73d91e50
JS
4618 logit = 0;
4619 else
4620 logit = LOG_FCP | LOG_FCP_UNDER;
4621 lpfc_printf_vlog(vport, KERN_WARNING, logit,
9cb78c16 4622 "9030 FCP cmd x%x failed <%d/%lld> "
5a0d80fc
JS
4623 "status: x%x result: x%x "
4624 "sid: x%x did: x%x oxid: x%x "
4625 "Data: x%x x%x\n",
73d91e50
JS
4626 cmd->cmnd[0],
4627 cmd->device ? cmd->device->id : 0xffff,
4628 cmd->device ? cmd->device->lun : 0xffff,
4629 lpfc_cmd->status, lpfc_cmd->result,
3bf41ba9
JS
4630 vport->fc_myDID,
4631 (pnode) ? pnode->nlp_DID : 0,
5a0d80fc
JS
4632 phba->sli_rev == LPFC_SLI_REV4 ?
4633 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
73d91e50
JS
4634 pIocbOut->iocb.ulpContext,
4635 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
dea3101e
JB
4636
4637 switch (lpfc_cmd->status) {
4638 case IOSTAT_FCP_RSP_ERROR:
4639 /* Call FCP RSP handler to determine result */
96e209be
JS
4640 lpfc_handle_fcp_err(vport, lpfc_cmd,
4641 pIocbOut->iocb.un.fcpi.fcpi_parm);
dea3101e
JB
4642 break;
4643 case IOSTAT_NPORT_BSY:
4644 case IOSTAT_FABRIC_BSY:
c6668cae 4645 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
ea2151b4
JS
4646 fast_path_evt = lpfc_alloc_fast_evt(phba);
4647 if (!fast_path_evt)
4648 break;
4649 fast_path_evt->un.fabric_evt.event_type =
4650 FC_REG_FABRIC_EVENT;
4651 fast_path_evt->un.fabric_evt.subcategory =
4652 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4653 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
307e3380 4654 if (pnode) {
ea2151b4
JS
4655 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4656 &pnode->nlp_portname,
4657 sizeof(struct lpfc_name));
4658 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4659 &pnode->nlp_nodename,
4660 sizeof(struct lpfc_name));
4661 }
4662 fast_path_evt->vport = vport;
4663 fast_path_evt->work_evt.evt =
4664 LPFC_EVT_FASTPATH_MGMT_EVT;
4665 spin_lock_irqsave(&phba->hbalock, flags);
4666 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4667 &phba->work_list);
4668 spin_unlock_irqrestore(&phba->hbalock, flags);
4669 lpfc_worker_wake_up(phba);
dea3101e 4670 break;
92d7f7b0 4671 case IOSTAT_LOCAL_REJECT:
1151e3ec 4672 case IOSTAT_REMOTE_STOP:
ab56dc2e
JS
4673 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4674 lpfc_cmd->result ==
4675 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4676 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4677 lpfc_cmd->result ==
4678 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
c6668cae 4679 cmd->result = DID_NO_CONNECT << 16;
ab56dc2e
JS
4680 break;
4681 }
d7c255b2 4682 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
92d7f7b0 4683 lpfc_cmd->result == IOERR_NO_RESOURCES ||
b92938b4
JS
4684 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4685 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
c6668cae 4686 cmd->result = DID_REQUEUE << 16;
58da1ffb 4687 break;
e2a0a9d6 4688 }
e2a0a9d6
JS
4689 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4690 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4691 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4692 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4693 /*
4694 * This is a response for a BG enabled
4695 * cmd. Parse BG error
4696 */
4697 lpfc_parse_bg_err(phba, lpfc_cmd,
4698 pIocbOut);
4699 break;
4700 } else {
4701 lpfc_printf_vlog(vport, KERN_WARNING,
4702 LOG_BG,
4703 "9031 non-zero BGSTAT "
6a9c52cf 4704 "on unprotected cmd\n");
e2a0a9d6
JS
4705 }
4706 }
1151e3ec
JS
4707 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4708 && (phba->sli_rev == LPFC_SLI_REV4)
307e3380 4709 && pnode) {
1151e3ec
JS
4710 /* This IO was aborted by the target, we don't
4711 * know the rxid and because we did not send the
4712 * ABTS we cannot generate and RRQ.
4713 */
4714 lpfc_set_rrq_active(phba, pnode,
ee0f4fe1
JS
4715 lpfc_cmd->cur_iocbq.sli4_lxritag,
4716 0, 0);
1151e3ec 4717 }
df561f66 4718 fallthrough;
dea3101e 4719 default:
c6668cae 4720 cmd->result = DID_ERROR << 16;
dea3101e
JB
4721 break;
4722 }
4723
307e3380 4724 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
c6668cae
JT
4725 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4726 SAM_STAT_BUSY;
ab56dc2e 4727 } else
c6668cae 4728 cmd->result = DID_OK << 16;
dea3101e
JB
4729
4730 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4731 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4732
e8b62011 4733 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
32350664 4734 "0710 Iodone <%d/%llu> cmd x%px, error "
e8b62011
JS
4735 "x%x SNS x%x x%x Data: x%x x%x\n",
4736 cmd->device->id, cmd->device->lun, cmd,
4737 cmd->result, *lp, *(lp + 3), cmd->retries,
4738 scsi_get_resid(cmd));
dea3101e
JB
4739 }
4740
9df0a038 4741 lpfc_update_stats(vport, lpfc_cmd);
977b5a0a
JS
4742 if (vport->cfg_max_scsicmpl_time &&
4743 time_after(jiffies, lpfc_cmd->start_time +
4744 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
a257bf90 4745 spin_lock_irqsave(shost->host_lock, flags);
307e3380 4746 if (pnode) {
109f6ed0
JS
4747 if (pnode->cmd_qdepth >
4748 atomic_read(&pnode->cmd_pending) &&
4749 (atomic_read(&pnode->cmd_pending) >
4750 LPFC_MIN_TGT_QDEPTH) &&
4751 ((cmd->cmnd[0] == READ_10) ||
4752 (cmd->cmnd[0] == WRITE_10)))
4753 pnode->cmd_qdepth =
4754 atomic_read(&pnode->cmd_pending);
4755
4756 pnode->last_change_time = jiffies;
4757 }
a257bf90 4758 spin_unlock_irqrestore(shost->host_lock, flags);
977b5a0a 4759 }
1dcb58e5 4760 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
a257bf90 4761
c2017260
JS
4762 lpfc_cmd->pCmd = NULL;
4763 spin_unlock(&lpfc_cmd->buf_lock);
92e3af66 4764
2fcbc569
JS
4765#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4766 if (lpfc_cmd->ts_cmd_start) {
4767 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4768 lpfc_cmd->ts_data_io = ktime_get_ns();
4769 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4770 lpfc_io_ktime(phba, lpfc_cmd);
4771 }
4772#endif
3e49af93 4773
89533e9b
JS
4774 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4775 cmd->scsi_done(cmd);
4776
fa61a54e 4777 /*
c2017260 4778 * If there is an abort thread waiting for command completion
fa61a54e
JS
4779 * wake up the thread.
4780 */
c2017260 4781 spin_lock(&lpfc_cmd->buf_lock);
4eb01535 4782 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
b9e5a2d9 4783 if (lpfc_cmd->waitq)
4eb01535 4784 wake_up(lpfc_cmd->waitq);
c2017260 4785 spin_unlock(&lpfc_cmd->buf_lock);
fa61a54e 4786
0bd4ca25 4787 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e
JB
4788}
4789
8b2564ec 4790/**
da255e2e 4791 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
0bb87e01 4792 * @vport: Pointer to vport object.
da255e2e
JS
4793 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4794 * @tmo: timeout value for the IO
8b2564ec 4795 *
da255e2e
JS
4796 * Based on the data-direction of the command, initialize IOCB
4797 * in the I/O buffer. Fill in the IOCB fields which are independent
4798 * of the scsi buffer
9bad7671 4799 *
da255e2e 4800 * RETURNS 0 - SUCCESS,
9bad7671 4801 **/
da255e2e
JS
4802static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4803 struct lpfc_io_buf *lpfc_cmd,
4804 uint8_t tmo)
dea3101e 4805{
da255e2e
JS
4806 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4807 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
dea3101e
JB
4808 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4809 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
da255e2e 4810 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
dea3101e 4811 int datadir = scsi_cmnd->sc_data_direction;
da255e2e 4812 u32 fcpdl;
58da1ffb 4813
6acb3481 4814 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
027140ea 4815
dea3101e
JB
4816 /*
4817 * There are three possibilities here - use scatter-gather segment, use
4818 * the single mapping, or neither. Start the lpfc command prep by
4819 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4820 * data bde entry.
4821 */
a0b4f78f 4822 if (scsi_sg_count(scsi_cmnd)) {
dea3101e
JB
4823 if (datadir == DMA_TO_DEVICE) {
4824 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
182ba753 4825 iocb_cmd->ulpPU = PARM_READ_CHECK;
3cb01c57
JS
4826 if (vport->cfg_first_burst_size &&
4827 (pnode->nlp_flag & NLP_FIRSTBURST)) {
da255e2e
JS
4828 u32 xrdy_len;
4829
98bbf5f7 4830 fcpdl = scsi_bufflen(scsi_cmnd);
da255e2e
JS
4831 xrdy_len = min(fcpdl,
4832 vport->cfg_first_burst_size);
4833 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
3cb01c57 4834 }
dea3101e 4835 fcp_cmnd->fcpCntl3 = WRITE_DATA;
dea3101e
JB
4836 } else {
4837 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4838 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e 4839 fcp_cmnd->fcpCntl3 = READ_DATA;
dea3101e
JB
4840 }
4841 } else {
4842 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4843 iocb_cmd->un.fcpi.fcpi_parm = 0;
4844 iocb_cmd->ulpPU = 0;
4845 fcp_cmnd->fcpCntl3 = 0;
dea3101e 4846 }
da255e2e 4847
dea3101e
JB
4848 /*
4849 * Finish initializing those IOCB fields that are independent
4850 * of the scsi_cmnd request_buffer
4851 */
4852 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4853 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4854 piocbq->iocb.ulpFCP2Rcvy = 1;
09372820
JS
4855 else
4856 piocbq->iocb.ulpFCP2Rcvy = 0;
dea3101e
JB
4857
4858 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4859 piocbq->context1 = lpfc_cmd;
da255e2e
JS
4860 if (!piocbq->iocb_cmpl)
4861 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4862 piocbq->iocb.ulpTimeout = tmo;
2e0fef85 4863 piocbq->vport = vport;
da255e2e
JS
4864 return 0;
4865}
4866
4867/**
4868 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
0bb87e01 4869 * @vport: Pointer to vport object.
da255e2e
JS
4870 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4871 * @tmo: timeout value for the IO
4872 *
4873 * Based on the data-direction of the command copy WQE template
4874 * to I/O buffer WQE. Fill in the WQE fields which are independent
4875 * of the scsi buffer
4876 *
4877 * RETURNS 0 - SUCCESS,
4878 **/
4879static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4880 struct lpfc_io_buf *lpfc_cmd,
4881 uint8_t tmo)
4882{
4883 struct lpfc_hba *phba = vport->phba;
4884 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4885 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4886 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4887 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4888 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4889 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4890 u16 idx = lpfc_cmd->hdwq_no;
4891 int datadir = scsi_cmnd->sc_data_direction;
4892
4893 hdwq = &phba->sli4_hba.hdwq[idx];
4894
4895 /* Initialize 64 bytes only */
4896 memset(wqe, 0, sizeof(union lpfc_wqe128));
4897
4898 /*
4899 * There are three possibilities here - use scatter-gather segment, use
4900 * the single mapping, or neither.
4901 */
4902 if (scsi_sg_count(scsi_cmnd)) {
4903 if (datadir == DMA_TO_DEVICE) {
4904 /* From the iwrite template, initialize words 7 - 11 */
4905 memcpy(&wqe->words[7],
4906 &lpfc_iwrite_cmd_template.words[7],
4907 sizeof(uint32_t) * 5);
4908
4909 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4910 if (hdwq)
4911 hdwq->scsi_cstat.output_requests++;
4912 } else {
4913 /* From the iread template, initialize words 7 - 11 */
4914 memcpy(&wqe->words[7],
4915 &lpfc_iread_cmd_template.words[7],
4916 sizeof(uint32_t) * 5);
4917
4918 /* Word 7 */
4919 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4920
4921 fcp_cmnd->fcpCntl3 = READ_DATA;
4922 if (hdwq)
4923 hdwq->scsi_cstat.input_requests++;
02243836
JS
4924
4925 /* For a CMF Managed port, iod must be zero'ed */
4926 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
4927 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
4928 LPFC_WQE_IOD_NONE);
da255e2e
JS
4929 }
4930 } else {
4931 /* From the icmnd template, initialize words 4 - 11 */
4932 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4933 sizeof(uint32_t) * 8);
4934
4935 /* Word 7 */
4936 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4937
4938 fcp_cmnd->fcpCntl3 = 0;
4939 if (hdwq)
4940 hdwq->scsi_cstat.control_requests++;
4941 }
4942
4943 /*
4944 * Finish initializing those WQE fields that are independent
4945 * of the request_buffer
4946 */
4947
4948 /* Word 3 */
4949 bf_set(payload_offset_len, &wqe->fcp_icmd,
4950 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4951
4952 /* Word 6 */
4953 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4954 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4955 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4956
4957 /* Word 7*/
4958 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4959 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4960
4961 bf_set(wqe_class, &wqe->generic.wqe_com,
4962 (pnode->nlp_fcp_info & 0x0f));
4963
4964 /* Word 8 */
4965 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4966
4967 /* Word 9 */
4968 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4969
4970 pwqeq->vport = vport;
4971 pwqeq->vport = vport;
4972 pwqeq->context1 = lpfc_cmd;
4973 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
96e209be 4974 pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
da255e2e
JS
4975
4976 return 0;
4977}
4978
4979/**
4980 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4981 * @vport: The virtual port for which this call is being executed.
4982 * @lpfc_cmd: The scsi command which needs to send.
4983 * @pnode: Pointer to lpfc_nodelist.
4984 *
4985 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4986 * to transfer for device with SLI3 interface spec.
4987 **/
4988static int
4989lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4990 struct lpfc_nodelist *pnode)
4991{
4992 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4993 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4994 u8 *ptr;
4995
4996 if (!pnode)
4997 return 0;
4998
4999 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
5000 /* clear task management bits */
5001 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
5002
5003 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
5004 &lpfc_cmd->fcp_cmnd->fcp_lun);
5005
5006 ptr = &fcp_cmnd->fcpCdb[0];
5007 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
5008 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
5009 ptr += scsi_cmnd->cmd_len;
5010 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
5011 }
5012
5013 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
5014
5015 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
5016
5017 return 0;
dea3101e
JB
5018}
5019
da0436e9 5020/**
6d368e53 5021 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
9bad7671 5022 * @vport: The virtual port for which this call is being executed.
c490850a 5023 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
9bad7671
JS
5024 * @lun: Logical unit number.
5025 * @task_mgmt_cmd: SCSI task management command.
5026 *
3772a991
JS
5027 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
5028 * for device with SLI-3 interface spec.
9bad7671
JS
5029 *
5030 * Return codes:
5031 * 0 - Error
5032 * 1 - Success
5033 **/
dea3101e 5034static int
f1126688 5035lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
c490850a 5036 struct lpfc_io_buf *lpfc_cmd,
9cb78c16 5037 uint64_t lun,
dea3101e
JB
5038 uint8_t task_mgmt_cmd)
5039{
dea3101e
JB
5040 struct lpfc_iocbq *piocbq;
5041 IOCB_t *piocb;
5042 struct fcp_cmnd *fcp_cmnd;
0b18ac42 5043 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
dea3101e
JB
5044 struct lpfc_nodelist *ndlp = rdata->pnode;
5045
307e3380 5046 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
dea3101e 5047 return 0;
dea3101e 5048
dea3101e 5049 piocbq = &(lpfc_cmd->cur_iocbq);
2e0fef85
JS
5050 piocbq->vport = vport;
5051
dea3101e
JB
5052 piocb = &piocbq->iocb;
5053
5054 fcp_cmnd = lpfc_cmd->fcp_cmnd;
34b02dcd
JS
5055 /* Clear out any old data in the FCP command area */
5056 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
5057 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
dea3101e 5058 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
e2a0a9d6
JS
5059 if (vport->phba->sli_rev == 3 &&
5060 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 5061 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 5062 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
dea3101e 5063 piocb->ulpContext = ndlp->nlp_rpi;
6d368e53
JS
5064 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
5065 piocb->ulpContext =
5066 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
5067 }
53151bbb 5068 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
dea3101e 5069 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
f9226c20
JS
5070 piocb->ulpPU = 0;
5071 piocb->un.fcpi.fcpi_parm = 0;
dea3101e
JB
5072
5073 /* ulpTimeout is only one byte */
5074 if (lpfc_cmd->timeout > 0xff) {
5075 /*
5076 * Do not timeout the command at the firmware level.
5077 * The driver will provide the timeout mechanism.
5078 */
5079 piocb->ulpTimeout = 0;
f1126688 5080 } else
dea3101e 5081 piocb->ulpTimeout = lpfc_cmd->timeout;
da0436e9 5082
f1126688
JS
5083 if (vport->phba->sli_rev == LPFC_SLI_REV4)
5084 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
3772a991 5085
f1126688 5086 return 1;
3772a991
JS
5087}
5088
5089/**
25985edc 5090 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
3772a991
JS
5091 * @phba: The hba struct for which this call is being executed.
5092 * @dev_grp: The HBA PCI-Device group number.
5093 *
5094 * This routine sets up the SCSI interface API function jump table in @phba
5095 * struct.
5096 * Returns: 0 - success, -ENODEV - failure.
5097 **/
5098int
5099lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5100{
5101
f1126688 5102 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
f1126688 5103
3772a991
JS
5104 switch (dev_grp) {
5105 case LPFC_PCI_DEV_LP:
3772a991 5106 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
acd6859b 5107 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
3772a991 5108 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
19ca7609 5109 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
da255e2e 5110 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
3772a991 5111 break;
da0436e9 5112 case LPFC_PCI_DEV_OC:
da0436e9 5113 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
acd6859b 5114 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
da0436e9 5115 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
19ca7609 5116 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
da255e2e 5117 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
da0436e9 5118 break;
3772a991 5119 default:
2165bfec 5120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3772a991
JS
5121 "1418 Invalid HBA PCI-device group: 0x%x\n",
5122 dev_grp);
5123 return -ENODEV;
3772a991 5124 }
3772a991 5125 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
84d1b006 5126 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
3772a991
JS
5127 return 0;
5128}
5129
9bad7671 5130/**
0bb87e01 5131 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
9bad7671
JS
5132 * @phba: The Hba for which this call is being executed.
5133 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
5134 * @rspiocbq: Pointer to lpfc_iocbq data structure.
5135 *
5136 * This routine is IOCB completion routine for device reset and target reset
5137 * routine. This routine release scsi buffer associated with lpfc_cmd.
5138 **/
7054a606
JS
5139static void
5140lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
5141 struct lpfc_iocbq *cmdiocbq,
5142 struct lpfc_iocbq *rspiocbq)
5143{
c490850a
JS
5144 struct lpfc_io_buf *lpfc_cmd =
5145 (struct lpfc_io_buf *) cmdiocbq->context1;
7054a606
JS
5146 if (lpfc_cmd)
5147 lpfc_release_scsi_buf(phba, lpfc_cmd);
5148 return;
5149}
5150
5021267a
JS
5151/**
5152 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
5153 * if issuing a pci_bus_reset is possibly unsafe
5154 * @phba: lpfc_hba pointer.
5155 *
5156 * Description:
5157 * Walks the bus_list to ensure only PCI devices with Emulex
5158 * vendor id, device ids that support hot reset, and only one occurrence
5159 * of function 0.
5160 *
5161 * Returns:
5162 * -EBADSLT, detected invalid device
5163 * 0, successful
5164 */
5165int
372c187b 5166lpfc_check_pci_resettable(struct lpfc_hba *phba)
5021267a
JS
5167{
5168 const struct pci_dev *pdev = phba->pcidev;
5169 struct pci_dev *ptr = NULL;
5170 u8 counter = 0;
5171
5172 /* Walk the list of devices on the pci_dev's bus */
5173 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
5174 /* Check for Emulex Vendor ID */
5175 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
5176 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5177 "8346 Non-Emulex vendor found: "
5178 "0x%04x\n", ptr->vendor);
5179 return -EBADSLT;
5180 }
5181
5182 /* Check for valid Emulex Device ID */
f6c5e6c4
JS
5183 if (phba->sli_rev != LPFC_SLI_REV4 ||
5184 phba->hba_flag & HBA_FCOE_MODE) {
5021267a 5185 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
a94a40eb 5186 "8347 Incapable PCI reset device: "
5021267a
JS
5187 "0x%04x\n", ptr->device);
5188 return -EBADSLT;
5189 }
5190
5191 /* Check for only one function 0 ID to ensure only one HBA on
5192 * secondary bus
5193 */
5194 if (ptr->devfn == 0) {
5195 if (++counter > 1) {
5196 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5197 "8348 More than one device on "
5198 "secondary bus found\n");
5199 return -EBADSLT;
5200 }
5201 }
5202 }
5203
5204 return 0;
5205}
5206
9bad7671 5207/**
3621a710 5208 * lpfc_info - Info entry point of scsi_host_template data structure
9bad7671
JS
5209 * @host: The scsi host for which this call is being executed.
5210 *
5211 * This routine provides module information about hba.
5212 *
5213 * Reutrn code:
5214 * Pointer to char - Success.
5215 **/
dea3101e
JB
5216const char *
5217lpfc_info(struct Scsi_Host *host)
5218{
2e0fef85
JS
5219 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5220 struct lpfc_hba *phba = vport->phba;
5021267a
JS
5221 int link_speed = 0;
5222 static char lpfcinfobuf[384];
5223 char tmp[384] = {0};
dea3101e 5224
5021267a 5225 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
dea3101e 5226 if (phba && phba->pcidev){
5021267a
JS
5227 /* Model Description */
5228 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5229 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5230 sizeof(lpfcinfobuf))
5231 goto buffer_done;
5232
5233 /* PCI Info */
5234 scnprintf(tmp, sizeof(tmp),
5235 " on PCI bus %02x device %02x irq %d",
5236 phba->pcidev->bus->number, phba->pcidev->devfn,
5237 phba->pcidev->irq);
5238 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5239 sizeof(lpfcinfobuf))
5240 goto buffer_done;
5241
5242 /* Port Number */
dea3101e 5243 if (phba->Port[0]) {
5021267a
JS
5244 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5245 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5246 sizeof(lpfcinfobuf))
5247 goto buffer_done;
dea3101e 5248 }
5021267a
JS
5249
5250 /* Link Speed */
a085e87c 5251 link_speed = lpfc_sli_port_speed_get(phba);
5021267a
JS
5252 if (link_speed != 0) {
5253 scnprintf(tmp, sizeof(tmp),
5254 " Logical Link Speed: %d Mbps", link_speed);
5255 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5256 sizeof(lpfcinfobuf))
5257 goto buffer_done;
5258 }
5259
5260 /* PCI resettable */
5261 if (!lpfc_check_pci_resettable(phba)) {
5262 scnprintf(tmp, sizeof(tmp), " PCI resettable");
5263 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5264 }
dea3101e 5265 }
5021267a
JS
5266
5267buffer_done:
dea3101e
JB
5268 return lpfcinfobuf;
5269}
5270
9bad7671 5271/**
0bb87e01 5272 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
9bad7671
JS
5273 * @phba: The Hba for which this call is being executed.
5274 *
5275 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
5276 * The default value of cfg_poll_tmo is 10 milliseconds.
5277 **/
875fbdfe
JSEC
5278static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5279{
5280 unsigned long poll_tmo_expires =
5281 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5282
895427bd 5283 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
875fbdfe
JSEC
5284 mod_timer(&phba->fcp_poll_timer,
5285 poll_tmo_expires);
5286}
5287
9bad7671 5288/**
3621a710 5289 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
9bad7671
JS
5290 * @phba: The Hba for which this call is being executed.
5291 *
5292 * This routine starts the fcp_poll_timer of @phba.
5293 **/
875fbdfe
JSEC
5294void lpfc_poll_start_timer(struct lpfc_hba * phba)
5295{
5296 lpfc_poll_rearm_timer(phba);
5297}
5298
9bad7671 5299/**
3621a710 5300 * lpfc_poll_timeout - Restart polling timer
eceee00e 5301 * @t: Timer construct where lpfc_hba data structure pointer is obtained.
9bad7671
JS
5302 *
5303 * This routine restarts fcp_poll timer, when FCP ring polling is enable
5304 * and FCP Ring interrupt is disable.
5305 **/
f22eb4d3 5306void lpfc_poll_timeout(struct timer_list *t)
875fbdfe 5307{
f22eb4d3 5308 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
875fbdfe
JSEC
5309
5310 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190 5311 lpfc_sli_handle_fast_ring_event(phba,
895427bd 5312 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
45ed1190 5313
875fbdfe
JSEC
5314 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5315 lpfc_poll_rearm_timer(phba);
5316 }
875fbdfe
JSEC
5317}
5318
dc50715e
GS
5319/*
5320 * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
5321 * @vport: The virtual port for which this call is being executed.
5322 * @hash: calculated hash value
5323 * @buf: uuid associated with the VE
5324 * Return the VMID entry associated with the UUID
5325 * Make sure to acquire the appropriate lock before invoking this routine.
5326 */
5327struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
5328 u32 hash, u8 *buf)
5329{
5330 struct lpfc_vmid *vmp;
5331
5332 hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
5333 if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
5334 return vmp;
5335 }
5336 return NULL;
5337}
5338
5339/*
5340 * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
5341 * @vport: The virtual port for which this call is being executed.
5342 * @hash - calculated hash value
5343 * @vmp: Pointer to a VMID entry representing a VM sending I/O
5344 *
5345 * This routine will insert the newly acquired VMID entity in the hash table.
5346 * Make sure to acquire the appropriate lock before invoking this routine.
5347 */
5348static void
5349lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
5350 struct lpfc_vmid *vmp)
5351{
5352 hash_add(vport->hash_table, &vmp->hnode, hash);
5353}
5354
5355/*
5356 * lpfc_vmid_hash_fn - create a hash value of the UUID
5357 * @vmid: uuid associated with the VE
5358 * @len: length of the VMID string
5359 * Returns the calculated hash value
5360 */
5361int lpfc_vmid_hash_fn(const char *vmid, int len)
5362{
5363 int c;
5364 int hash = 0;
5365
5366 if (len == 0)
5367 return 0;
5368 while (len--) {
5369 c = *vmid++;
5370 if (c >= 'A' && c <= 'Z')
5371 c += 'a' - 'A';
5372
5373 hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
5374 (c >> LPFC_VMID_HASH_SHIFT)) * 19;
5375 }
5376
5377 return hash & LPFC_VMID_HASH_MASK;
5378}
5379
5380/*
5381 * lpfc_vmid_update_entry - update the vmid entry in the hash table
5382 * @vport: The virtual port for which this call is being executed.
5383 * @cmd: address of scsi cmd descriptor
5384 * @vmp: Pointer to a VMID entry representing a VM sending I/O
5385 * @tag: VMID tag
5386 */
5387static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
5388 *cmd, struct lpfc_vmid *vmp,
5389 union lpfc_vmid_io_tag *tag)
5390{
5391 u64 *lta;
5392
5393 if (vport->vmid_priority_tagging)
5394 tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
5395 else
5396 tag->app_id = vmp->un.app_id;
5397
5398 if (cmd->sc_data_direction == DMA_TO_DEVICE)
5399 vmp->io_wr_cnt++;
5400 else
5401 vmp->io_rd_cnt++;
5402
5403 /* update the last access timestamp in the table */
5404 lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
5405 *lta = jiffies;
5406}
5407
5408static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
5409 struct lpfc_vmid *vmid)
5410{
5411 u32 hash;
5412 struct lpfc_vmid *pvmid;
5413
5414 if (vport->port_type == LPFC_PHYSICAL_PORT) {
5415 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5416 } else {
5417 hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
5418 pvmid =
5419 lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
5420 vmid->host_vmid);
5421 if (pvmid)
5422 vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
5423 else
5424 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5425 }
5426}
5427
33c79741
GS
5428/*
5429 * lpfc_vmid_get_appid - get the VMID associated with the UUID
5430 * @vport: The virtual port for which this call is being executed.
5431 * @uuid: UUID associated with the VE
5432 * @cmd: address of scsi_cmd descriptor
5433 * @tag: VMID tag
5434 * Returns status of the function
5435 */
5436static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
5437 scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
5438{
5439 struct lpfc_vmid *vmp = NULL;
5440 int hash, len, rc, i;
5441
5442 /* check if QFPA is complete */
5443 if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
5444 LPFC_VMID_QFPA_CMPL)) {
5445 vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5446 return -EAGAIN;
5447 }
5448
5449 /* search if the UUID has already been mapped to the VMID */
5450 len = strlen(uuid);
5451 hash = lpfc_vmid_hash_fn(uuid, len);
5452
5453 /* search for the VMID in the table */
5454 read_lock(&vport->vmid_lock);
5455 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5456
5457 /* if found, check if its already registered */
5458 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
5459 read_unlock(&vport->vmid_lock);
5460 lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5461 rc = 0;
5462 } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
5463 vmp->flag & LPFC_VMID_DE_REGISTER)) {
5464 /* else if register or dereg request has already been sent */
5465 /* Hence VMID tag will not be added for this I/O */
5466 read_unlock(&vport->vmid_lock);
5467 rc = -EBUSY;
5468 } else {
5469 /* The VMID was not found in the hashtable. At this point, */
5470 /* drop the read lock first before proceeding further */
5471 read_unlock(&vport->vmid_lock);
5472 /* start the process to obtain one as per the */
5473 /* type of the VMID indicated */
5474 write_lock(&vport->vmid_lock);
5475 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5476
5477 /* while the read lock was released, in case the entry was */
5478 /* added by other context or is in process of being added */
5479 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
5480 lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5481 write_unlock(&vport->vmid_lock);
5482 return 0;
5483 } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
5484 write_unlock(&vport->vmid_lock);
5485 return -EBUSY;
5486 }
5487
5488 /* else search and allocate a free slot in the hash table */
5489 if (vport->cur_vmid_cnt < vport->max_vmid) {
5490 for (i = 0; i < vport->max_vmid; i++) {
5491 vmp = vport->vmid + i;
5492 if (vmp->flag == LPFC_VMID_SLOT_FREE)
5493 break;
5494 }
5495 if (i == vport->max_vmid)
5496 vmp = NULL;
5497 } else {
5498 vmp = NULL;
5499 }
5500
5501 if (!vmp) {
5502 write_unlock(&vport->vmid_lock);
5503 return -ENOMEM;
5504 }
5505
5506 /* Add the vmid and register */
5507 lpfc_put_vmid_in_hashtable(vport, hash, vmp);
5508 vmp->vmid_len = len;
5509 memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
5510 vmp->io_rd_cnt = 0;
5511 vmp->io_wr_cnt = 0;
5512 vmp->flag = LPFC_VMID_SLOT_USED;
5513
5514 vmp->delete_inactive =
5515 vport->vmid_inactivity_timeout ? 1 : 0;
5516
5517 /* if type priority tag, get next available VMID */
5518 if (lpfc_vmid_is_type_priority_tag(vport))
5519 lpfc_vmid_assign_cs_ctl(vport, vmp);
5520
5521 /* allocate the per cpu variable for holding */
5522 /* the last access time stamp only if VMID is enabled */
5523 if (!vmp->last_io_time)
5524 vmp->last_io_time = __alloc_percpu(sizeof(u64),
5525 __alignof__(struct
5526 lpfc_vmid));
5527 if (!vmp->last_io_time) {
5528 hash_del(&vmp->hnode);
5529 vmp->flag = LPFC_VMID_SLOT_FREE;
5530 write_unlock(&vport->vmid_lock);
5531 return -EIO;
5532 }
5533
5534 write_unlock(&vport->vmid_lock);
5535
5536 /* complete transaction with switch */
5537 if (lpfc_vmid_is_type_priority_tag(vport))
5538 rc = lpfc_vmid_uvem(vport, vmp, true);
5539 else
5540 rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
5541 if (!rc) {
5542 write_lock(&vport->vmid_lock);
5543 vport->cur_vmid_cnt++;
5544 vmp->flag |= LPFC_VMID_REQ_REGISTER;
5545 write_unlock(&vport->vmid_lock);
5546 } else {
5547 write_lock(&vport->vmid_lock);
5548 hash_del(&vmp->hnode);
5549 vmp->flag = LPFC_VMID_SLOT_FREE;
5550 free_percpu(vmp->last_io_time);
5551 write_unlock(&vport->vmid_lock);
5552 return -EIO;
5553 }
5554
5555 /* finally, enable the idle timer once */
5556 if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
5557 mod_timer(&vport->phba->inactive_vmid_poll,
5558 jiffies +
5559 msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
5560 vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
5561 }
5562 }
5563 return rc;
5564}
5565
5566/*
5567 * lpfc_is_command_vm_io - get the UUID from blk cgroup
5568 * @cmd: Pointer to scsi_cmnd data structure
5569 * Returns UUID if present, otherwise NULL
5570 */
5571static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
5572{
4221c8a4 5573 struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
33c79741 5574
4221c8a4 5575 return bio ? blkcg_get_fc_appid(bio) : NULL;
33c79741
GS
5576}
5577
9bad7671 5578/**
3621a710 5579 * lpfc_queuecommand - scsi_host_template queuecommand entry point
eceee00e 5580 * @shost: kernel scsi host pointer.
9bad7671 5581 * @cmnd: Pointer to scsi_cmnd data structure.
9bad7671
JS
5582 *
5583 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5584 * This routine prepares an IOCB from scsi command and provides to firmware.
5585 * The @done callback is invoked after driver finished processing the command.
5586 *
5587 * Return value :
5588 * 0 - Success
5589 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5590 **/
dea3101e 5591static int
b9a7c631 5592lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
dea3101e 5593{
2e0fef85
JS
5594 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5595 struct lpfc_hba *phba = vport->phba;
1ba981fd 5596 struct lpfc_rport_data *rdata;
1c6f4ef5 5597 struct lpfc_nodelist *ndlp;
c490850a 5598 struct lpfc_io_buf *lpfc_cmd;
19a7b4ae 5599 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4c47efc1 5600 int err, idx;
66b4d63b 5601 u8 *uuid = NULL;
37e38409 5602 uint64_t start;
2fcbc569 5603
02243836 5604 start = ktime_get_ns();
1ba981fd 5605 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
b0e83012
JS
5606
5607 /* sanity check on references */
5608 if (unlikely(!rdata) || unlikely(!rport))
5609 goto out_fail_command;
5610
19a7b4ae
JSEC
5611 err = fc_remote_port_chkready(rport);
5612 if (err) {
5613 cmnd->result = err;
dea3101e
JB
5614 goto out_fail_command;
5615 }
1c6f4ef5 5616 ndlp = rdata->pnode;
dea3101e 5617
bf08611b 5618 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
acd6859b 5619 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
e2a0a9d6 5620
372c187b 5621 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6a9c52cf
JS
5622 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5623 " op:%02x str=%s without registering for"
5624 " BlockGuard - Rejecting command\n",
e2a0a9d6
JS
5625 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5626 dif_op_str[scsi_get_prot_op(cmnd)]);
5627 goto out_fail_command;
5628 }
5629
dea3101e 5630 /*
19a7b4ae
JSEC
5631 * Catch race where our node has transitioned, but the
5632 * transport is still transitioning.
dea3101e 5633 */
307e3380 5634 if (!ndlp)
02243836
JS
5635 goto out_tgt_busy1;
5636
5637 /* Check if IO qualifies for CMF */
5638 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
5639 cmnd->sc_data_direction == DMA_FROM_DEVICE &&
5640 (scsi_sg_count(cmnd))) {
5641 /* Latency start time saved in rx_cmd_start later in routine */
5642 err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
5643 if (err)
5644 goto out_tgt_busy1;
5645 }
5646
2a5b7d62
JS
5647 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5648 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5649 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5650 "3377 Target Queue Full, scsi Id:%d "
5651 "Qdepth:%d Pending command:%d"
5652 " WWNN:%02x:%02x:%02x:%02x:"
5653 "%02x:%02x:%02x:%02x, "
5654 " WWPN:%02x:%02x:%02x:%02x:"
5655 "%02x:%02x:%02x:%02x",
5656 ndlp->nlp_sid, ndlp->cmd_qdepth,
5657 atomic_read(&ndlp->cmd_pending),
5658 ndlp->nlp_nodename.u.wwn[0],
5659 ndlp->nlp_nodename.u.wwn[1],
5660 ndlp->nlp_nodename.u.wwn[2],
5661 ndlp->nlp_nodename.u.wwn[3],
5662 ndlp->nlp_nodename.u.wwn[4],
5663 ndlp->nlp_nodename.u.wwn[5],
5664 ndlp->nlp_nodename.u.wwn[6],
5665 ndlp->nlp_nodename.u.wwn[7],
5666 ndlp->nlp_portname.u.wwn[0],
5667 ndlp->nlp_portname.u.wwn[1],
5668 ndlp->nlp_portname.u.wwn[2],
5669 ndlp->nlp_portname.u.wwn[3],
5670 ndlp->nlp_portname.u.wwn[4],
5671 ndlp->nlp_portname.u.wwn[5],
5672 ndlp->nlp_portname.u.wwn[6],
5673 ndlp->nlp_portname.u.wwn[7]);
02243836 5674 goto out_tgt_busy2;
2a5b7d62 5675 }
64bf0099 5676 }
f91bc594 5677
ace44e48 5678 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
dea3101e 5679 if (lpfc_cmd == NULL) {
eaf15d5b 5680 lpfc_rampdown_queue_depth(phba);
92d7f7b0 5681
895427bd 5682 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
e8b62011
JS
5683 "0707 driver's buffer pool is empty, "
5684 "IO busied\n");
dea3101e
JB
5685 goto out_host_busy;
5686 }
02243836 5687 lpfc_cmd->rx_cmd_start = start;
dea3101e
JB
5688
5689 /*
5690 * Store the midlayer's command structure for the completion phase
5691 * and complete the command initialization.
5692 */
5693 lpfc_cmd->pCmd = cmnd;
5694 lpfc_cmd->rdata = rdata;
2a5b7d62 5695 lpfc_cmd->ndlp = ndlp;
da255e2e 5696 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
dea3101e 5697 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
dea3101e 5698
da255e2e
JS
5699 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5700 if (err)
5701 goto out_host_busy_release_buf;
5702
e2a0a9d6 5703 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
6a9c52cf 5704 if (vport->phba->cfg_enable_bg) {
737d4248
JS
5705 lpfc_printf_vlog(vport,
5706 KERN_INFO, LOG_SCSI_CMD,
2613470a 5707 "9033 BLKGRD: rcvd %s cmd:x%x "
68a6a66c 5708 "reftag x%x cnt %u pt %x\n",
2613470a
JS
5709 dif_op_str[scsi_get_prot_op(cmnd)],
5710 cmnd->cmnd[0],
125c12f7
MP
5711 scsi_prot_ref_tag(cmnd),
5712 scsi_logical_block_count(cmnd),
2613470a 5713 (cmnd->cmnd[1]>>5));
6a9c52cf 5714 }
e2a0a9d6
JS
5715 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5716 } else {
6a9c52cf 5717 if (vport->phba->cfg_enable_bg) {
737d4248
JS
5718 lpfc_printf_vlog(vport,
5719 KERN_INFO, LOG_SCSI_CMD,
2613470a 5720 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
68a6a66c 5721 "x%x reftag x%x cnt %u pt %x\n",
2613470a 5722 cmnd->cmnd[0],
125c12f7
MP
5723 scsi_prot_ref_tag(cmnd),
5724 scsi_logical_block_count(cmnd),
2613470a 5725 (cmnd->cmnd[1]>>5));
6a9c52cf 5726 }
e2a0a9d6
JS
5727 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5728 }
5729
22770cba
JS
5730 if (unlikely(err)) {
5731 if (err == 2) {
5732 cmnd->result = DID_ERROR << 16;
5733 goto out_fail_command_release_buf;
5734 }
dea3101e 5735 goto out_host_busy_free_buf;
5e0e2318 5736 }
dea3101e 5737
dea3101e 5738
33c79741
GS
5739 /* check the necessary and sufficient condition to support VMID */
5740 if (lpfc_is_vmid_enabled(phba) &&
5741 (ndlp->vmid_support ||
5742 phba->pport->vmid_priority_tagging ==
5743 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
5744 /* is the I/O generated by a VM, get the associated virtual */
5745 /* entity id */
5746 uuid = lpfc_is_command_vm_io(cmnd);
5747
5748 if (uuid) {
5749 err = lpfc_vmid_get_appid(vport, uuid, cmnd,
5750 (union lpfc_vmid_io_tag *)
5751 &lpfc_cmd->cur_iocbq.vmid_tag);
5752 if (!err)
5753 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
5754 }
5755 }
5756
5757 atomic_inc(&ndlp->cmd_pending);
6a828b0f 5758#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
840eda96
JS
5759 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5760 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
6a828b0f 5761#endif
47ff4c51
JS
5762 /* Issue I/O to adapter */
5763 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
5764 &lpfc_cmd->cur_iocbq,
5765 SLI_IOCB_RET_IOCB);
2fcbc569
JS
5766#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5767 if (start) {
5768 lpfc_cmd->ts_cmd_start = start;
5769 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5770 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5771 } else {
5772 lpfc_cmd->ts_cmd_start = 0;
5773 }
5774#endif
eaf15d5b 5775 if (err) {
76f96b6d 5776 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
da255e2e
JS
5777 "3376 FCP could not issue IOCB err %x "
5778 "FCP cmd x%x <%d/%llu> "
5779 "sid: x%x did: x%x oxid: x%x "
5780 "Data: x%x x%x x%x x%x\n",
5781 err, cmnd->cmnd[0],
5782 cmnd->device ? cmnd->device->id : 0xffff,
5783 cmnd->device ? cmnd->device->lun : (u64)-1,
5784 vport->fc_myDID, ndlp->nlp_DID,
5785 phba->sli_rev == LPFC_SLI_REV4 ?
5786 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
5787 phba->sli_rev == LPFC_SLI_REV4 ?
5788 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5789 lpfc_cmd->cur_iocbq.iocb.ulpContext,
5790 lpfc_cmd->cur_iocbq.iotag,
5791 phba->sli_rev == LPFC_SLI_REV4 ?
5792 bf_get(wqe_tmo,
5793 &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
5794 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4221c8a4 5795 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
76f96b6d 5796
dea3101e 5797 goto out_host_busy_free_buf;
eaf15d5b 5798 }
da255e2e 5799
875fbdfe 5800 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190 5801 lpfc_sli_handle_fast_ring_event(phba,
895427bd 5802 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
45ed1190 5803
875fbdfe
JSEC
5804 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5805 lpfc_poll_rearm_timer(phba);
5806 }
5807
c490850a
JS
5808 if (phba->cfg_xri_rebalancing)
5809 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5810
dea3101e
JB
5811 return 0;
5812
5813 out_host_busy_free_buf:
1fbf9742 5814 idx = lpfc_cmd->hdwq_no;
bcf4dbfa 5815 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4c47efc1
JS
5816 if (phba->sli4_hba.hdwq) {
5817 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5818 case WRITE_DATA:
5819 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5820 break;
5821 case READ_DATA:
5822 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5823 break;
5824 default:
5825 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5826 }
5827 }
da255e2e 5828 out_host_busy_release_buf:
0bd4ca25 5829 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 5830 out_host_busy:
02243836
JS
5831 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5832 shost);
dea3101e
JB
5833 return SCSI_MLQUEUE_HOST_BUSY;
5834
02243836
JS
5835 out_tgt_busy2:
5836 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5837 shost);
5838 out_tgt_busy1:
3496343d
MC
5839 return SCSI_MLQUEUE_TARGET_BUSY;
5840
5e0e2318
JS
5841 out_fail_command_release_buf:
5842 lpfc_release_scsi_buf(phba, lpfc_cmd);
02243836
JS
5843 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5844 shost);
5e0e2318 5845
dea3101e 5846 out_fail_command:
b9a7c631 5847 cmnd->scsi_done(cmnd);
dea3101e
JB
5848 return 0;
5849}
5850
5e633302
GS
5851/*
5852 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
5853 * @vport: The virtual port for which this call is being executed.
5854 */
5855void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
5856{
5857 u32 bucket;
5858 struct lpfc_vmid *cur;
5859
5860 if (vport->port_type == LPFC_PHYSICAL_PORT)
5861 del_timer_sync(&vport->phba->inactive_vmid_poll);
5862
5863 kfree(vport->qfpa_res);
5864 kfree(vport->vmid_priority.vmid_range);
5865 kfree(vport->vmid);
5866
5867 if (!hash_empty(vport->hash_table))
5868 hash_for_each(vport->hash_table, bucket, cur, hnode)
5869 hash_del(&cur->hnode);
5870
5871 vport->qfpa_res = NULL;
5872 vport->vmid_priority.vmid_range = NULL;
5873 vport->vmid = NULL;
5874 vport->cur_vmid_cnt = 0;
5875}
f281233d 5876
9bad7671 5877/**
3621a710 5878 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
9bad7671
JS
5879 * @cmnd: Pointer to scsi_cmnd data structure.
5880 *
5881 * This routine aborts @cmnd pending in base driver.
5882 *
5883 * Return code :
5884 * 0x2003 - Error
5885 * 0x2002 - Success
5886 **/
dea3101e 5887static int
63c59c3b 5888lpfc_abort_handler(struct scsi_cmnd *cmnd)
dea3101e 5889{
2e0fef85
JS
5890 struct Scsi_Host *shost = cmnd->device->host;
5891 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5892 struct lpfc_hba *phba = vport->phba;
0bd4ca25 5893 struct lpfc_iocbq *iocb;
c490850a 5894 struct lpfc_io_buf *lpfc_cmd;
3a70730a 5895 int ret = SUCCESS, status = 0;
8931c73b 5896 struct lpfc_sli_ring *pring_s4 = NULL;
db7531d2 5897 struct lpfc_sli_ring *pring = NULL;
895427bd 5898 int ret_val;
59c68eaa 5899 unsigned long flags;
fa61a54e 5900 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
dea3101e 5901
3a70730a 5902 status = fc_block_scsi_eh(cmnd);
908e18e4 5903 if (status != 0 && status != SUCCESS)
3a70730a 5904 return status;
4f2e66c6 5905
c2017260
JS
5906 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5907 if (!lpfc_cmd)
5908 return ret;
5909
876dd7d0 5910 spin_lock_irqsave(&phba->hbalock, flags);
4f2e66c6 5911 /* driver queued commands are in process of being flushed */
c00f62e6 5912 if (phba->hba_flag & HBA_IOQ_FLUSH) {
4f2e66c6
JS
5913 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5914 "3168 SCSI Layer abort requested I/O has been "
5915 "flushed by LLD.\n");
c2017260
JS
5916 ret = FAILED;
5917 goto out_unlock;
4f2e66c6
JS
5918 }
5919
c2017260
JS
5920 /* Guard against IO completion being called at same time */
5921 spin_lock(&lpfc_cmd->buf_lock);
5922
5923 if (!lpfc_cmd->pCmd) {
eee8877e
JS
5924 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5925 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
9cb78c16 5926 "x%x ID %d LUN %llu\n",
3a70730a 5927 SUCCESS, cmnd->device->id, cmnd->device->lun);
c2017260 5928 goto out_unlock_buf;
eee8877e 5929 }
dea3101e 5930
4f2e66c6 5931 iocb = &lpfc_cmd->cur_iocbq;
8931c73b 5932 if (phba->sli_rev == LPFC_SLI_REV4) {
c00f62e6 5933 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
8931c73b
JS
5934 if (!pring_s4) {
5935 ret = FAILED;
c2017260 5936 goto out_unlock_buf;
8931c73b
JS
5937 }
5938 spin_lock(&pring_s4->ring_lock);
5939 }
4f2e66c6
JS
5940 /* the command is in process of being cancelled */
5941 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4f2e66c6
JS
5942 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5943 "3169 SCSI Layer abort requested I/O has been "
5944 "cancelled by LLD.\n");
c2017260
JS
5945 ret = FAILED;
5946 goto out_unlock_ring;
4f2e66c6 5947 }
0bd4ca25 5948 /*
c490850a 5949 * If pCmd field of the corresponding lpfc_io_buf structure
0bd4ca25
JSEC
5950 * points to a different SCSI command, then the driver has
5951 * already completed this command, but the midlayer did not
4f2e66c6 5952 * see the completion before the eh fired. Just return SUCCESS.
0bd4ca25 5953 */
4f2e66c6
JS
5954 if (lpfc_cmd->pCmd != cmnd) {
5955 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5956 "3170 SCSI Layer abort requested I/O has been "
5957 "completed by LLD.\n");
c2017260 5958 goto out_unlock_ring;
4f2e66c6 5959 }
dea3101e 5960
0bd4ca25 5961 BUG_ON(iocb->context1 != lpfc_cmd);
dea3101e 5962
ee62021a
JS
5963 /* abort issued in recovery is still in progress */
5964 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
5965 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5966 "3389 SCSI Layer I/O Abort Request is pending\n");
8931c73b
JS
5967 if (phba->sli_rev == LPFC_SLI_REV4)
5968 spin_unlock(&pring_s4->ring_lock);
c2017260 5969 spin_unlock(&lpfc_cmd->buf_lock);
ee62021a
JS
5970 spin_unlock_irqrestore(&phba->hbalock, flags);
5971 goto wait_for_cmpl;
5972 }
5973
8931c73b 5974 lpfc_cmd->waitq = &waitq;
98912dda 5975 if (phba->sli_rev == LPFC_SLI_REV4) {
59c68eaa 5976 spin_unlock(&pring_s4->ring_lock);
db7531d2
JS
5977 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5978 lpfc_sli4_abort_fcp_cmpl);
98912dda 5979 } else {
db7531d2
JS
5980 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5981 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5982 lpfc_sli_abort_fcp_cmpl);
98912dda 5983 }
4f2e66c6 5984
a22d73b6
JS
5985 /* Make sure HBA is alive */
5986 lpfc_issue_hb_tmo(phba);
5987
db7531d2 5988 if (ret_val != IOCB_SUCCESS) {
8931c73b 5989 /* Indicate the IO is not being aborted by the driver. */
8931c73b 5990 lpfc_cmd->waitq = NULL;
c2017260 5991 spin_unlock(&lpfc_cmd->buf_lock);
91a52b61 5992 spin_unlock_irqrestore(&phba->hbalock, flags);
0bd4ca25
JSEC
5993 ret = FAILED;
5994 goto out;
5995 }
dea3101e 5996
91a52b61 5997 /* no longer need the lock after this point */
c2017260 5998 spin_unlock(&lpfc_cmd->buf_lock);
91a52b61 5999 spin_unlock_irqrestore(&phba->hbalock, flags);
c2017260 6000
875fbdfe 6001 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
45ed1190 6002 lpfc_sli_handle_fast_ring_event(phba,
895427bd 6003 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe 6004
ee62021a 6005wait_for_cmpl:
db7531d2
JS
6006 /*
6007 * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
6008 * for abort to complete.
6009 */
fa61a54e
JS
6010 wait_event_timeout(waitq,
6011 (lpfc_cmd->pCmd != cmnd),
256ec0d0 6012 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
ee62021a 6013
c2017260 6014 spin_lock(&lpfc_cmd->buf_lock);
dea3101e 6015
0bd4ca25
JSEC
6016 if (lpfc_cmd->pCmd == cmnd) {
6017 ret = FAILED;
372c187b 6018 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
e8b62011 6019 "0748 abort handler timed out waiting "
4b160ae8 6020 "for aborting I/O (xri:x%x) to complete: "
9cb78c16 6021 "ret %#x, ID %d, LUN %llu\n",
247ca945
JS
6022 iocb->sli4_xritag, ret,
6023 cmnd->device->id, cmnd->device->lun);
dea3101e 6024 }
b9e5a2d9
JS
6025
6026 lpfc_cmd->waitq = NULL;
6027
c2017260 6028 spin_unlock(&lpfc_cmd->buf_lock);
4f2e66c6 6029 goto out;
dea3101e 6030
c2017260
JS
6031out_unlock_ring:
6032 if (phba->sli_rev == LPFC_SLI_REV4)
6033 spin_unlock(&pring_s4->ring_lock);
6034out_unlock_buf:
6035 spin_unlock(&lpfc_cmd->buf_lock);
4f2e66c6 6036out_unlock:
876dd7d0 6037 spin_unlock_irqrestore(&phba->hbalock, flags);
4f2e66c6 6038out:
e8b62011
JS
6039 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6040 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
9cb78c16 6041 "LUN %llu\n", ret, cmnd->device->id,
5cd049a5 6042 cmnd->device->lun);
63c59c3b 6043 return ret;
8fa728a2
JG
6044}
6045
bbb9d180
JS
6046static char *
6047lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
6048{
6049 switch (task_mgmt_cmd) {
6050 case FCP_ABORT_TASK_SET:
6051 return "ABORT_TASK_SET";
6052 case FCP_CLEAR_TASK_SET:
6053 return "FCP_CLEAR_TASK_SET";
6054 case FCP_BUS_RESET:
6055 return "FCP_BUS_RESET";
6056 case FCP_LUN_RESET:
6057 return "FCP_LUN_RESET";
6058 case FCP_TARGET_RESET:
6059 return "FCP_TARGET_RESET";
6060 case FCP_CLEAR_ACA:
6061 return "FCP_CLEAR_ACA";
6062 case FCP_TERMINATE_TASK:
6063 return "FCP_TERMINATE_TASK";
6064 default:
6065 return "unknown";
6066 }
6067}
6068
53151bbb
JS
6069
6070/**
6071 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
6072 * @vport: The virtual port for which this call is being executed.
c490850a 6073 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
53151bbb
JS
6074 *
6075 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
6076 *
6077 * Return code :
6078 * 0x2003 - Error
6079 * 0x2002 - Success
6080 **/
6081static int
c490850a 6082lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
53151bbb
JS
6083{
6084 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
6085 uint32_t rsp_info;
6086 uint32_t rsp_len;
6087 uint8_t rsp_info_code;
6088 int ret = FAILED;
6089
6090
6091 if (fcprsp == NULL)
6092 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6093 "0703 fcp_rsp is missing\n");
6094 else {
6095 rsp_info = fcprsp->rspStatus2;
6096 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
6097 rsp_info_code = fcprsp->rspInfo3;
6098
6099
6100 lpfc_printf_vlog(vport, KERN_INFO,
6101 LOG_FCP,
6102 "0706 fcp_rsp valid 0x%x,"
6103 " rsp len=%d code 0x%x\n",
6104 rsp_info,
6105 rsp_len, rsp_info_code);
6106
996a02ae
JS
6107 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
6108 * field specifies the number of valid bytes of FCP_RSP_INFO.
6109 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
6110 */
6111 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
6112 ((rsp_len == 8) || (rsp_len == 4))) {
53151bbb
JS
6113 switch (rsp_info_code) {
6114 case RSP_NO_FAILURE:
6115 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6116 "0715 Task Mgmt No Failure\n");
6117 ret = SUCCESS;
6118 break;
6119 case RSP_TM_NOT_SUPPORTED: /* TM rejected */
6120 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6121 "0716 Task Mgmt Target "
6122 "reject\n");
6123 break;
6124 case RSP_TM_NOT_COMPLETED: /* TM failed */
6125 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6126 "0717 Task Mgmt Target "
6127 "failed TM\n");
6128 break;
6129 case RSP_TM_INVALID_LU: /* TM to invalid LU! */
6130 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6131 "0718 Task Mgmt to invalid "
6132 "LUN\n");
6133 break;
6134 }
6135 }
6136 }
6137 return ret;
6138}
6139
6140
9bad7671 6141/**
bbb9d180
JS
6142 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
6143 * @vport: The virtual port for which this call is being executed.
eceee00e 6144 * @cmnd: Pointer to scsi_cmnd data structure.
bbb9d180
JS
6145 * @tgt_id: Target ID of remote device.
6146 * @lun_id: Lun number for the TMF
6147 * @task_mgmt_cmd: type of TMF to send
9bad7671 6148 *
bbb9d180
JS
6149 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
6150 * a remote port.
9bad7671 6151 *
bbb9d180
JS
6152 * Return Code:
6153 * 0x2003 - Error
6154 * 0x2002 - Success.
9bad7671 6155 **/
dea3101e 6156static int
eed695d7
JS
6157lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
6158 unsigned int tgt_id, uint64_t lun_id,
6159 uint8_t task_mgmt_cmd)
dea3101e 6160{
2e0fef85 6161 struct lpfc_hba *phba = vport->phba;
c490850a 6162 struct lpfc_io_buf *lpfc_cmd;
bbb9d180
JS
6163 struct lpfc_iocbq *iocbq;
6164 struct lpfc_iocbq *iocbqrsp;
eed695d7
JS
6165 struct lpfc_rport_data *rdata;
6166 struct lpfc_nodelist *pnode;
bbb9d180 6167 int ret;
915caaaf 6168 int status;
dea3101e 6169
eed695d7 6170 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
307e3380 6171 if (!rdata || !rdata->pnode)
915caaaf 6172 return FAILED;
eed695d7 6173 pnode = rdata->pnode;
bbb9d180 6174
ace44e48 6175 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
dea3101e 6176 if (lpfc_cmd == NULL)
915caaaf 6177 return FAILED;
0c411222 6178 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
0b18ac42 6179 lpfc_cmd->rdata = rdata;
eed695d7 6180 lpfc_cmd->pCmd = cmnd;
2a5b7d62 6181 lpfc_cmd->ndlp = pnode;
dea3101e 6182
bbb9d180
JS
6183 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
6184 task_mgmt_cmd);
915caaaf
JS
6185 if (!status) {
6186 lpfc_release_scsi_buf(phba, lpfc_cmd);
6187 return FAILED;
6188 }
dea3101e 6189
bbb9d180 6190 iocbq = &lpfc_cmd->cur_iocbq;
0bd4ca25 6191 iocbqrsp = lpfc_sli_get_iocbq(phba);
915caaaf
JS
6192 if (iocbqrsp == NULL) {
6193 lpfc_release_scsi_buf(phba, lpfc_cmd);
6194 return FAILED;
6195 }
5a0916b4 6196 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
bbb9d180 6197
e8b62011 6198 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
9cb78c16 6199 "0702 Issue %s to TGT %d LUN %llu "
6d368e53 6200 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
bbb9d180 6201 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
6d368e53
JS
6202 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
6203 iocbq->iocb_flag);
bbb9d180 6204
3772a991 6205 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
915caaaf 6206 iocbq, iocbqrsp, lpfc_cmd->timeout);
53151bbb
JS
6207 if ((status != IOCB_SUCCESS) ||
6208 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
ae374a30
JS
6209 if (status != IOCB_SUCCESS ||
6210 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
372c187b 6211 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
ae374a30
JS
6212 "0727 TMF %s to TGT %d LUN %llu "
6213 "failed (%d, %d) iocb_flag x%x\n",
6214 lpfc_taskmgmt_name(task_mgmt_cmd),
6215 tgt_id, lun_id,
6216 iocbqrsp->iocb.ulpStatus,
6217 iocbqrsp->iocb.un.ulpWord[4],
6218 iocbq->iocb_flag);
53151bbb
JS
6219 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
6220 if (status == IOCB_SUCCESS) {
6221 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
6222 /* Something in the FCP_RSP was invalid.
6223 * Check conditions */
6224 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
6225 else
6226 ret = FAILED;
6227 } else if (status == IOCB_TIMEDOUT) {
6228 ret = TIMEOUT_ERROR;
6229 } else {
6230 ret = FAILED;
6231 }
53151bbb 6232 } else
bbb9d180
JS
6233 ret = SUCCESS;
6234
6175c02a 6235 lpfc_sli_release_iocbq(phba, iocbqrsp);
bbb9d180
JS
6236
6237 if (ret != TIMEOUT_ERROR)
6238 lpfc_release_scsi_buf(phba, lpfc_cmd);
6239
6240 return ret;
6241}
6242
6243/**
6244 * lpfc_chk_tgt_mapped -
6245 * @vport: The virtual port to check on
6246 * @cmnd: Pointer to scsi_cmnd data structure.
6247 *
6248 * This routine delays until the scsi target (aka rport) for the
6249 * command exists (is present and logged in) or we declare it non-existent.
6250 *
6251 * Return code :
6252 * 0x2003 - Error
6253 * 0x2002 - Success
6254 **/
6255static int
6256lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
6257{
1ba981fd 6258 struct lpfc_rport_data *rdata;
1c6f4ef5 6259 struct lpfc_nodelist *pnode;
bbb9d180
JS
6260 unsigned long later;
6261
1ba981fd 6262 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
1c6f4ef5
JS
6263 if (!rdata) {
6264 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
32350664 6265 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
1c6f4ef5
JS
6266 return FAILED;
6267 }
6268 pnode = rdata->pnode;
bbb9d180
JS
6269 /*
6270 * If target is not in a MAPPED state, delay until
6271 * target is rediscovered or devloss timeout expires.
6272 */
6273 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6274 while (time_after(later, jiffies)) {
307e3380 6275 if (!pnode)
bbb9d180
JS
6276 return FAILED;
6277 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
6278 return SUCCESS;
6279 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1ba981fd 6280 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
bbb9d180
JS
6281 if (!rdata)
6282 return FAILED;
6283 pnode = rdata->pnode;
6284 }
307e3380 6285 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
bbb9d180
JS
6286 return FAILED;
6287 return SUCCESS;
6288}
6289
6290/**
6291 * lpfc_reset_flush_io_context -
6292 * @vport: The virtual port (scsi_host) for the flush context
6293 * @tgt_id: If aborting by Target contect - specifies the target id
6294 * @lun_id: If aborting by Lun context - specifies the lun id
6295 * @context: specifies the context level to flush at.
6296 *
6297 * After a reset condition via TMF, we need to flush orphaned i/o
6298 * contexts from the adapter. This routine aborts any contexts
6299 * outstanding, then waits for their completions. The wait is
6300 * bounded by devloss_tmo though.
6301 *
6302 * Return code :
6303 * 0x2003 - Error
6304 * 0x2002 - Success
6305 **/
6306static int
6307lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
6308 uint64_t lun_id, lpfc_ctx_cmd context)
6309{
6310 struct lpfc_hba *phba = vport->phba;
6311 unsigned long later;
6312 int cnt;
6313
6314 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6175c02a 6315 if (cnt)
98912dda 6316 lpfc_sli_abort_taskmgmt(vport,
895427bd 6317 &phba->sli.sli3_ring[LPFC_FCP_RING],
98912dda 6318 tgt_id, lun_id, context);
915caaaf
JS
6319 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6320 while (time_after(later, jiffies) && cnt) {
6321 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
bbb9d180 6322 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
dea3101e 6323 }
dea3101e 6324 if (cnt) {
372c187b 6325 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
bbb9d180
JS
6326 "0724 I/O flush failure for context %s : cnt x%x\n",
6327 ((context == LPFC_CTX_LUN) ? "LUN" :
6328 ((context == LPFC_CTX_TGT) ? "TGT" :
6329 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
6330 cnt);
6331 return FAILED;
dea3101e 6332 }
bbb9d180
JS
6333 return SUCCESS;
6334}
6335
6336/**
6337 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
6338 * @cmnd: Pointer to scsi_cmnd data structure.
6339 *
6340 * This routine does a device reset by sending a LUN_RESET task management
6341 * command.
6342 *
6343 * Return code :
6344 * 0x2003 - Error
6345 * 0x2002 - Success
6346 **/
6347static int
6348lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
6349{
6350 struct Scsi_Host *shost = cmnd->device->host;
6351 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1ba981fd 6352 struct lpfc_rport_data *rdata;
1c6f4ef5 6353 struct lpfc_nodelist *pnode;
bbb9d180 6354 unsigned tgt_id = cmnd->device->id;
9cb78c16 6355 uint64_t lun_id = cmnd->device->lun;
bbb9d180 6356 struct lpfc_scsi_event_header scsi_event;
53151bbb 6357 int status;
da09ae48 6358 u32 logit = LOG_FCP;
bbb9d180 6359
1ba981fd 6360 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
ad490b6e 6361 if (!rdata || !rdata->pnode) {
372c187b 6362 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
32350664 6363 "0798 Device Reset rdata failure: rdata x%px\n",
ad490b6e 6364 rdata);
1c6f4ef5
JS
6365 return FAILED;
6366 }
6367 pnode = rdata->pnode;
589a52d6 6368 status = fc_block_scsi_eh(cmnd);
908e18e4 6369 if (status != 0 && status != SUCCESS)
589a52d6 6370 return status;
bbb9d180
JS
6371
6372 status = lpfc_chk_tgt_mapped(vport, cmnd);
6373 if (status == FAILED) {
372c187b 6374 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
32350664 6375 "0721 Device Reset rport failure: rdata x%px\n", rdata);
bbb9d180
JS
6376 return FAILED;
6377 }
6378
6379 scsi_event.event_type = FC_REG_SCSI_EVENT;
6380 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
6381 scsi_event.lun = lun_id;
6382 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6383 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6384
6385 fc_host_post_vendor_event(shost, fc_get_event_number(),
6386 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6387
eed695d7 6388 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
bbb9d180 6389 FCP_LUN_RESET);
da09ae48
JS
6390 if (status != SUCCESS)
6391 logit = LOG_TRACE_EVENT;
bbb9d180 6392
da09ae48 6393 lpfc_printf_vlog(vport, KERN_ERR, logit,
9cb78c16 6394 "0713 SCSI layer issued Device Reset (%d, %llu) "
bbb9d180
JS
6395 "return x%x\n", tgt_id, lun_id, status);
6396
6397 /*
6398 * We have to clean up i/o as : they may be orphaned by the TMF;
6399 * or if the TMF failed, they may be in an indeterminate state.
6400 * So, continue on.
6401 * We will report success if all the i/o aborts successfully.
6402 */
53151bbb
JS
6403 if (status == SUCCESS)
6404 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
bbb9d180 6405 LPFC_CTX_LUN);
53151bbb
JS
6406
6407 return status;
bbb9d180
JS
6408}
6409
6410/**
6411 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
6412 * @cmnd: Pointer to scsi_cmnd data structure.
6413 *
6414 * This routine does a target reset by sending a TARGET_RESET task management
6415 * command.
6416 *
6417 * Return code :
6418 * 0x2003 - Error
6419 * 0x2002 - Success
6420 **/
6421static int
6422lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
6423{
6424 struct Scsi_Host *shost = cmnd->device->host;
6425 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1ba981fd 6426 struct lpfc_rport_data *rdata;
1c6f4ef5 6427 struct lpfc_nodelist *pnode;
bbb9d180 6428 unsigned tgt_id = cmnd->device->id;
9cb78c16 6429 uint64_t lun_id = cmnd->device->lun;
bbb9d180 6430 struct lpfc_scsi_event_header scsi_event;
53151bbb 6431 int status;
da09ae48 6432 u32 logit = LOG_FCP;
21990d3d 6433 u32 dev_loss_tmo = vport->cfg_devloss_tmo;
31051249
JS
6434 unsigned long flags;
6435 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
bbb9d180 6436
1ba981fd 6437 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
bbd3d738 6438 if (!rdata || !rdata->pnode) {
372c187b 6439 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
32350664 6440 "0799 Target Reset rdata failure: rdata x%px\n",
bbd3d738 6441 rdata);
1c6f4ef5
JS
6442 return FAILED;
6443 }
6444 pnode = rdata->pnode;
589a52d6 6445 status = fc_block_scsi_eh(cmnd);
908e18e4 6446 if (status != 0 && status != SUCCESS)
589a52d6 6447 return status;
bbb9d180
JS
6448
6449 status = lpfc_chk_tgt_mapped(vport, cmnd);
6450 if (status == FAILED) {
372c187b 6451 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
32350664 6452 "0722 Target Reset rport failure: rdata x%px\n", rdata);
63e480fd 6453 if (pnode) {
31051249 6454 spin_lock_irqsave(&pnode->lock, flags);
63e480fd
JS
6455 pnode->nlp_flag &= ~NLP_NPR_ADISC;
6456 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
31051249 6457 spin_unlock_irqrestore(&pnode->lock, flags);
63e480fd 6458 }
8c50d25c
JS
6459 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6460 LPFC_CTX_TGT);
6461 return FAST_IO_FAIL;
bbb9d180
JS
6462 }
6463
6464 scsi_event.event_type = FC_REG_SCSI_EVENT;
6465 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
6466 scsi_event.lun = 0;
6467 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6468 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6469
6470 fc_host_post_vendor_event(shost, fc_get_event_number(),
6471 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6472
eed695d7 6473 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
bbb9d180 6474 FCP_TARGET_RESET);
21990d3d
JS
6475 if (status != SUCCESS) {
6476 logit = LOG_TRACE_EVENT;
6477
6478 /* Issue LOGO, if no LOGO is outstanding */
6479 spin_lock_irqsave(&pnode->lock, flags);
bb1ffe9e 6480 if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
21990d3d
JS
6481 !pnode->logo_waitq) {
6482 pnode->logo_waitq = &waitq;
6483 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6484 pnode->nlp_flag |= NLP_ISSUE_LOGO;
bb1ffe9e 6485 pnode->save_flags |= NLP_WAIT_FOR_LOGO;
21990d3d
JS
6486 spin_unlock_irqrestore(&pnode->lock, flags);
6487 lpfc_unreg_rpi(vport, pnode);
6488 wait_event_timeout(waitq,
bb1ffe9e 6489 (!(pnode->save_flags &
21990d3d
JS
6490 NLP_WAIT_FOR_LOGO)),
6491 msecs_to_jiffies(dev_loss_tmo *
6492 1000));
6493
bb1ffe9e 6494 if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
21990d3d
JS
6495 lpfc_printf_vlog(vport, KERN_ERR, logit,
6496 "0725 SCSI layer TGTRST "
6497 "failed & LOGO TMO (%d, %llu) "
6498 "return x%x\n",
6499 tgt_id, lun_id, status);
6500 spin_lock_irqsave(&pnode->lock, flags);
bb1ffe9e 6501 pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
21990d3d
JS
6502 } else {
6503 spin_lock_irqsave(&pnode->lock, flags);
6504 }
6505 pnode->logo_waitq = NULL;
6506 spin_unlock_irqrestore(&pnode->lock, flags);
6507 status = SUCCESS;
6508
31051249 6509 } else {
21990d3d
JS
6510 spin_unlock_irqrestore(&pnode->lock, flags);
6511 status = FAILED;
31051249 6512 }
31051249 6513 }
bbb9d180 6514
da09ae48 6515 lpfc_printf_vlog(vport, KERN_ERR, logit,
9cb78c16 6516 "0723 SCSI layer issued Target Reset (%d, %llu) "
bbb9d180
JS
6517 "return x%x\n", tgt_id, lun_id, status);
6518
6519 /*
6520 * We have to clean up i/o as : they may be orphaned by the TMF;
6521 * or if the TMF failed, they may be in an indeterminate state.
6522 * So, continue on.
6523 * We will report success if all the i/o aborts successfully.
6524 */
53151bbb
JS
6525 if (status == SUCCESS)
6526 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3a70730a 6527 LPFC_CTX_TGT);
53151bbb 6528 return status;
dea3101e
JB
6529}
6530
9bad7671 6531/**
3621a710 6532 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
9bad7671
JS
6533 * @cmnd: Pointer to scsi_cmnd data structure.
6534 *
bbb9d180
JS
6535 * This routine does target reset to all targets on @cmnd->device->host.
6536 * This emulates Parallel SCSI Bus Reset Semantics.
9bad7671 6537 *
bbb9d180
JS
6538 * Return code :
6539 * 0x2003 - Error
6540 * 0x2002 - Success
9bad7671 6541 **/
94d0e7b8 6542static int
7054a606 6543lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 6544{
2e0fef85
JS
6545 struct Scsi_Host *shost = cmnd->device->host;
6546 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
dea3101e 6547 struct lpfc_nodelist *ndlp = NULL;
ea2151b4 6548 struct lpfc_scsi_event_header scsi_event;
bbb9d180
JS
6549 int match;
6550 int ret = SUCCESS, status, i;
da09ae48 6551 u32 logit = LOG_FCP;
ea2151b4
JS
6552
6553 scsi_event.event_type = FC_REG_SCSI_EVENT;
6554 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
6555 scsi_event.lun = 0;
6556 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
6557 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
6558
bbb9d180
JS
6559 fc_host_post_vendor_event(shost, fc_get_event_number(),
6560 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
dea3101e 6561
bf08611b 6562 status = fc_block_scsi_eh(cmnd);
908e18e4 6563 if (status != 0 && status != SUCCESS)
bf08611b 6564 return status;
bbb9d180 6565
dea3101e
JB
6566 /*
6567 * Since the driver manages a single bus device, reset all
6568 * targets known to the driver. Should any target reset
6569 * fail, this routine returns failure to the midlayer.
6570 */
e17da18e 6571 for (i = 0; i < LPFC_MAX_TARGET; i++) {
685f0bf7 6572 /* Search for mapped node by target ID */
dea3101e 6573 match = 0;
2e0fef85
JS
6574 spin_lock_irq(shost->host_lock);
6575 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
307e3380 6576
a6571c6e
JS
6577 if (vport->phba->cfg_fcp2_no_tgt_reset &&
6578 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
6579 continue;
685f0bf7 6580 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
915caaaf 6581 ndlp->nlp_sid == i &&
a0f2d3ef
JS
6582 ndlp->rport &&
6583 ndlp->nlp_type & NLP_FCP_TARGET) {
dea3101e
JB
6584 match = 1;
6585 break;
6586 }
6587 }
2e0fef85 6588 spin_unlock_irq(shost->host_lock);
dea3101e
JB
6589 if (!match)
6590 continue;
bbb9d180 6591
eed695d7 6592 status = lpfc_send_taskmgmt(vport, cmnd,
bbb9d180
JS
6593 i, 0, FCP_TARGET_RESET);
6594
6595 if (status != SUCCESS) {
372c187b 6596 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
e8b62011
JS
6597 "0700 Bus Reset on target %d failed\n",
6598 i);
915caaaf 6599 ret = FAILED;
dea3101e
JB
6600 }
6601 }
6175c02a 6602 /*
bbb9d180
JS
6603 * We have to clean up i/o as : they may be orphaned by the TMFs
6604 * above; or if any of the TMFs failed, they may be in an
6605 * indeterminate state.
6606 * We will report success if all the i/o aborts successfully.
6175c02a 6607 */
bbb9d180
JS
6608
6609 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
6610 if (status != SUCCESS)
0bd4ca25 6611 ret = FAILED;
da09ae48
JS
6612 if (ret == FAILED)
6613 logit = LOG_TRACE_EVENT;
bbb9d180 6614
da09ae48 6615 lpfc_printf_vlog(vport, KERN_ERR, logit,
e8b62011 6616 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
dea3101e
JB
6617 return ret;
6618}
6619
27b01b82
JS
6620/**
6621 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6622 * @cmnd: Pointer to scsi_cmnd data structure.
6623 *
6624 * This routine does host reset to the adaptor port. It brings the HBA
6625 * offline, performs a board restart, and then brings the board back online.
6626 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6627 * reject all outstanding SCSI commands to the host and error returned
6628 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6629 * of error handling, it will only return error if resetting of the adapter
6630 * is not successful; in all other cases, will return success.
6631 *
6632 * Return code :
6633 * 0x2003 - Error
6634 * 0x2002 - Success
6635 **/
6636static int
6637lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6638{
6639 struct Scsi_Host *shost = cmnd->device->host;
6640 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6641 struct lpfc_hba *phba = vport->phba;
6642 int rc, ret = SUCCESS;
6643
da09ae48 6644 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
a88dbb6a
JS
6645 "3172 SCSI layer issued Host Reset Data:\n");
6646
618a5230 6647 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
27b01b82
JS
6648 lpfc_offline(phba);
6649 rc = lpfc_sli_brdrestart(phba);
6650 if (rc)
8c24a4f6
JS
6651 goto error;
6652
6ec7c372
JS
6653 /* Wait for successful restart of adapter */
6654 if (phba->sli_rev < LPFC_SLI_REV4) {
6655 rc = lpfc_sli_chipset_init(phba);
6656 if (rc)
6657 goto error;
6658 }
6659
a88dbb6a
JS
6660 rc = lpfc_online(phba);
6661 if (rc)
8c24a4f6
JS
6662 goto error;
6663
27b01b82
JS
6664 lpfc_unblock_mgmt_io(phba);
6665
27b01b82 6666 return ret;
8c24a4f6 6667error:
372c187b 6668 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8c24a4f6
JS
6669 "3323 Failed host reset\n");
6670 lpfc_unblock_mgmt_io(phba);
6671 return FAILED;
27b01b82
JS
6672}
6673
9bad7671 6674/**
3621a710 6675 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
9bad7671
JS
6676 * @sdev: Pointer to scsi_device.
6677 *
6678 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
6679 * globally available list of scsi buffers. This routine also makes sure scsi
6680 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6681 * of scsi buffer exists for the lifetime of the driver.
6682 *
6683 * Return codes:
6684 * non-0 - Error
6685 * 0 - Success
6686 **/
dea3101e
JB
6687static int
6688lpfc_slave_alloc(struct scsi_device *sdev)
6689{
2e0fef85
JS
6690 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6691 struct lpfc_hba *phba = vport->phba;
19a7b4ae 6692 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3772a991 6693 uint32_t total = 0;
dea3101e 6694 uint32_t num_to_alloc = 0;
3772a991 6695 int num_allocated = 0;
d7c47992 6696 uint32_t sdev_cnt;
1ba981fd
JS
6697 struct lpfc_device_data *device_data;
6698 unsigned long flags;
6699 struct lpfc_name target_wwpn;
dea3101e 6700
19a7b4ae 6701 if (!rport || fc_remote_port_chkready(rport))
dea3101e
JB
6702 return -ENXIO;
6703
f38fa0bb 6704 if (phba->cfg_fof) {
1ba981fd
JS
6705
6706 /*
6707 * Check to see if the device data structure for the lun
6708 * exists. If not, create one.
6709 */
6710
6711 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6712 spin_lock_irqsave(&phba->devicelock, flags);
6713 device_data = __lpfc_get_device_data(phba,
6714 &phba->luns,
6715 &vport->fc_portname,
6716 &target_wwpn,
6717 sdev->lun);
6718 if (!device_data) {
6719 spin_unlock_irqrestore(&phba->devicelock, flags);
6720 device_data = lpfc_create_device_data(phba,
6721 &vport->fc_portname,
6722 &target_wwpn,
b5749fe1
JS
6723 sdev->lun,
6724 phba->cfg_XLanePriority,
6725 true);
1ba981fd
JS
6726 if (!device_data)
6727 return -ENOMEM;
6728 spin_lock_irqsave(&phba->devicelock, flags);
6729 list_add_tail(&device_data->listentry, &phba->luns);
6730 }
6731 device_data->rport_data = rport->dd_data;
6732 device_data->available = true;
6733 spin_unlock_irqrestore(&phba->devicelock, flags);
6734 sdev->hostdata = device_data;
6735 } else {
6736 sdev->hostdata = rport->dd_data;
6737 }
d7c47992 6738 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
dea3101e 6739
0794d601
JS
6740 /* For SLI4, all IO buffers are pre-allocated */
6741 if (phba->sli_rev == LPFC_SLI_REV4)
6742 return 0;
6743
6744 /* This code path is now ONLY for SLI3 adapters */
6745
dea3101e
JB
6746 /*
6747 * Populate the cmds_per_lun count scsi_bufs into this host's globally
6748 * available list of scsi buffers. Don't allocate more than the
a784efbf
JSEC
6749 * HBA limit conveyed to the midlayer via the host structure. The
6750 * formula accounts for the lun_queue_depth + error handlers + 1
6751 * extra. This list of scsi bufs exists for the lifetime of the driver.
dea3101e
JB
6752 */
6753 total = phba->total_scsi_bufs;
3de2a653 6754 num_to_alloc = vport->cfg_lun_queue_depth + 2;
92d7f7b0 6755
d7c47992
JS
6756 /* If allocated buffers are enough do nothing */
6757 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6758 return 0;
6759
92d7f7b0
JS
6760 /* Allow some exchanges to be available always to complete discovery */
6761 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
6762 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6763 "0704 At limitation of %d preallocated "
6764 "command buffers\n", total);
dea3101e 6765 return 0;
92d7f7b0
JS
6766 /* Allow some exchanges to be available always to complete discovery */
6767 } else if (total + num_to_alloc >
6768 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
6769 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6770 "0705 Allocation request of %d "
6771 "command buffers will exceed max of %d. "
6772 "Reducing allocation request to %d.\n",
6773 num_to_alloc, phba->cfg_hba_queue_depth,
6774 (phba->cfg_hba_queue_depth - total));
dea3101e
JB
6775 num_to_alloc = phba->cfg_hba_queue_depth - total;
6776 }
0794d601 6777 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
3772a991 6778 if (num_to_alloc != num_allocated) {
372c187b 6779 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
96f7077f
JS
6780 "0708 Allocation request of %d "
6781 "command buffers did not succeed. "
6782 "Allocated %d buffers.\n",
6783 num_to_alloc, num_allocated);
dea3101e 6784 }
1c6f4ef5
JS
6785 if (num_allocated > 0)
6786 phba->total_scsi_bufs += num_allocated;
dea3101e
JB
6787 return 0;
6788}
6789
9bad7671 6790/**
3621a710 6791 * lpfc_slave_configure - scsi_host_template slave_configure entry point
9bad7671
JS
6792 * @sdev: Pointer to scsi_device.
6793 *
6794 * This routine configures following items
6795 * - Tag command queuing support for @sdev if supported.
9bad7671
JS
6796 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6797 *
6798 * Return codes:
6799 * 0 - Success
6800 **/
dea3101e
JB
6801static int
6802lpfc_slave_configure(struct scsi_device *sdev)
6803{
2e0fef85
JS
6804 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6805 struct lpfc_hba *phba = vport->phba;
dea3101e 6806
db5ed4df 6807 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
dea3101e 6808
875fbdfe 6809 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190 6810 lpfc_sli_handle_fast_ring_event(phba,
895427bd 6811 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe
JSEC
6812 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6813 lpfc_poll_rearm_timer(phba);
6814 }
6815
dea3101e
JB
6816 return 0;
6817}
6818
9bad7671 6819/**
3621a710 6820 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
9bad7671
JS
6821 * @sdev: Pointer to scsi_device.
6822 *
6823 * This routine sets @sdev hostatdata filed to null.
6824 **/
dea3101e
JB
6825static void
6826lpfc_slave_destroy(struct scsi_device *sdev)
6827{
d7c47992
JS
6828 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6829 struct lpfc_hba *phba = vport->phba;
1ba981fd
JS
6830 unsigned long flags;
6831 struct lpfc_device_data *device_data = sdev->hostdata;
6832
d7c47992 6833 atomic_dec(&phba->sdev_cnt);
f38fa0bb 6834 if ((phba->cfg_fof) && (device_data)) {
1ba981fd
JS
6835 spin_lock_irqsave(&phba->devicelock, flags);
6836 device_data->available = false;
6837 if (!device_data->oas_enabled)
6838 lpfc_delete_device_data(phba, device_data);
6839 spin_unlock_irqrestore(&phba->devicelock, flags);
6840 }
dea3101e
JB
6841 sdev->hostdata = NULL;
6842 return;
6843}
6844
1ba981fd
JS
6845/**
6846 * lpfc_create_device_data - creates and initializes device data structure for OAS
eceee00e 6847 * @phba: Pointer to host bus adapter structure.
1ba981fd
JS
6848 * @vport_wwpn: Pointer to vport's wwpn information
6849 * @target_wwpn: Pointer to target's wwpn information
6850 * @lun: Lun on target
eceee00e 6851 * @pri: Priority
1ba981fd
JS
6852 * @atomic_create: Flag to indicate if memory should be allocated using the
6853 * GFP_ATOMIC flag or not.
6854 *
6855 * This routine creates a device data structure which will contain identifying
6856 * information for the device (host wwpn, target wwpn, lun), state of OAS,
6857 * whether or not the corresponding lun is available by the system,
6858 * and pointer to the rport data.
6859 *
6860 * Return codes:
6861 * NULL - Error
6862 * Pointer to lpfc_device_data - Success
6863 **/
6864struct lpfc_device_data*
6865lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6866 struct lpfc_name *target_wwpn, uint64_t lun,
b5749fe1 6867 uint32_t pri, bool atomic_create)
1ba981fd
JS
6868{
6869
6870 struct lpfc_device_data *lun_info;
6871 int memory_flags;
6872
6873 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
f38fa0bb 6874 !(phba->cfg_fof))
1ba981fd
JS
6875 return NULL;
6876
6877 /* Attempt to create the device data to contain lun info */
6878
6879 if (atomic_create)
6880 memory_flags = GFP_ATOMIC;
6881 else
6882 memory_flags = GFP_KERNEL;
6883 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6884 if (!lun_info)
6885 return NULL;
6886 INIT_LIST_HEAD(&lun_info->listentry);
6887 lun_info->rport_data = NULL;
6888 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6889 sizeof(struct lpfc_name));
6890 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6891 sizeof(struct lpfc_name));
6892 lun_info->device_id.lun = lun;
6893 lun_info->oas_enabled = false;
b5749fe1 6894 lun_info->priority = pri;
1ba981fd
JS
6895 lun_info->available = false;
6896 return lun_info;
6897}
6898
6899/**
6900 * lpfc_delete_device_data - frees a device data structure for OAS
eceee00e 6901 * @phba: Pointer to host bus adapter structure.
1ba981fd
JS
6902 * @lun_info: Pointer to device data structure to free.
6903 *
6904 * This routine frees the previously allocated device data structure passed.
6905 *
6906 **/
6907void
6908lpfc_delete_device_data(struct lpfc_hba *phba,
6909 struct lpfc_device_data *lun_info)
6910{
6911
6912 if (unlikely(!phba) || !lun_info ||
f38fa0bb 6913 !(phba->cfg_fof))
1ba981fd
JS
6914 return;
6915
6916 if (!list_empty(&lun_info->listentry))
6917 list_del(&lun_info->listentry);
6918 mempool_free(lun_info, phba->device_data_mem_pool);
6919 return;
6920}
6921
6922/**
6923 * __lpfc_get_device_data - returns the device data for the specified lun
eceee00e 6924 * @phba: Pointer to host bus adapter structure.
1ba981fd
JS
6925 * @list: Point to list to search.
6926 * @vport_wwpn: Pointer to vport's wwpn information
6927 * @target_wwpn: Pointer to target's wwpn information
6928 * @lun: Lun on target
6929 *
6930 * This routine searches the list passed for the specified lun's device data.
6931 * This function does not hold locks, it is the responsibility of the caller
6932 * to ensure the proper lock is held before calling the function.
6933 *
6934 * Return codes:
6935 * NULL - Error
6936 * Pointer to lpfc_device_data - Success
6937 **/
6938struct lpfc_device_data*
6939__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6940 struct lpfc_name *vport_wwpn,
6941 struct lpfc_name *target_wwpn, uint64_t lun)
6942{
6943
6944 struct lpfc_device_data *lun_info;
6945
6946 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
f38fa0bb 6947 !phba->cfg_fof)
1ba981fd
JS
6948 return NULL;
6949
6950 /* Check to see if the lun is already enabled for OAS. */
6951
6952 list_for_each_entry(lun_info, list, listentry) {
6953 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6954 sizeof(struct lpfc_name)) == 0) &&
6955 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6956 sizeof(struct lpfc_name)) == 0) &&
6957 (lun_info->device_id.lun == lun))
6958 return lun_info;
6959 }
6960
6961 return NULL;
6962}
6963
6964/**
6965 * lpfc_find_next_oas_lun - searches for the next oas lun
eceee00e 6966 * @phba: Pointer to host bus adapter structure.
1ba981fd
JS
6967 * @vport_wwpn: Pointer to vport's wwpn information
6968 * @target_wwpn: Pointer to target's wwpn information
6969 * @starting_lun: Pointer to the lun to start searching for
6970 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6971 * @found_target_wwpn: Pointer to the found lun's target wwpn information
6972 * @found_lun: Pointer to the found lun.
6973 * @found_lun_status: Pointer to status of the found lun.
eceee00e 6974 * @found_lun_pri: Pointer to priority of the found lun.
1ba981fd
JS
6975 *
6976 * This routine searches the luns list for the specified lun
6977 * or the first lun for the vport/target. If the vport wwpn contains
6978 * a zero value then a specific vport is not specified. In this case
6979 * any vport which contains the lun will be considered a match. If the
6980 * target wwpn contains a zero value then a specific target is not specified.
6981 * In this case any target which contains the lun will be considered a
6982 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
6983 * are returned. The function will also return the next lun if available.
6984 * If the next lun is not found, starting_lun parameter will be set to
6985 * NO_MORE_OAS_LUN.
6986 *
6987 * Return codes:
6988 * non-0 - Error
6989 * 0 - Success
6990 **/
6991bool
6992lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6993 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6994 struct lpfc_name *found_vport_wwpn,
6995 struct lpfc_name *found_target_wwpn,
6996 uint64_t *found_lun,
b5749fe1
JS
6997 uint32_t *found_lun_status,
6998 uint32_t *found_lun_pri)
1ba981fd
JS
6999{
7000
7001 unsigned long flags;
7002 struct lpfc_device_data *lun_info;
7003 struct lpfc_device_id *device_id;
7004 uint64_t lun;
7005 bool found = false;
7006
7007 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
7008 !starting_lun || !found_vport_wwpn ||
7009 !found_target_wwpn || !found_lun || !found_lun_status ||
7010 (*starting_lun == NO_MORE_OAS_LUN) ||
f38fa0bb 7011 !phba->cfg_fof)
1ba981fd
JS
7012 return false;
7013
7014 lun = *starting_lun;
7015 *found_lun = NO_MORE_OAS_LUN;
7016 *starting_lun = NO_MORE_OAS_LUN;
7017
7018 /* Search for lun or the lun closet in value */
7019
7020 spin_lock_irqsave(&phba->devicelock, flags);
7021 list_for_each_entry(lun_info, &phba->luns, listentry) {
7022 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
7023 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
7024 sizeof(struct lpfc_name)) == 0)) &&
7025 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
7026 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
7027 sizeof(struct lpfc_name)) == 0)) &&
7028 (lun_info->oas_enabled)) {
7029 device_id = &lun_info->device_id;
7030 if ((!found) &&
7031 ((lun == FIND_FIRST_OAS_LUN) ||
7032 (device_id->lun == lun))) {
7033 *found_lun = device_id->lun;
7034 memcpy(found_vport_wwpn,
7035 &device_id->vport_wwpn,
7036 sizeof(struct lpfc_name));
7037 memcpy(found_target_wwpn,
7038 &device_id->target_wwpn,
7039 sizeof(struct lpfc_name));
7040 if (lun_info->available)
7041 *found_lun_status =
7042 OAS_LUN_STATUS_EXISTS;
7043 else
7044 *found_lun_status = 0;
b5749fe1 7045 *found_lun_pri = lun_info->priority;
1ba981fd
JS
7046 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
7047 memset(vport_wwpn, 0x0,
7048 sizeof(struct lpfc_name));
7049 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
7050 memset(target_wwpn, 0x0,
7051 sizeof(struct lpfc_name));
7052 found = true;
7053 } else if (found) {
7054 *starting_lun = device_id->lun;
7055 memcpy(vport_wwpn, &device_id->vport_wwpn,
7056 sizeof(struct lpfc_name));
7057 memcpy(target_wwpn, &device_id->target_wwpn,
7058 sizeof(struct lpfc_name));
7059 break;
7060 }
7061 }
7062 }
7063 spin_unlock_irqrestore(&phba->devicelock, flags);
7064 return found;
7065}
7066
7067/**
7068 * lpfc_enable_oas_lun - enables a lun for OAS operations
eceee00e 7069 * @phba: Pointer to host bus adapter structure.
1ba981fd
JS
7070 * @vport_wwpn: Pointer to vport's wwpn information
7071 * @target_wwpn: Pointer to target's wwpn information
7072 * @lun: Lun
eceee00e 7073 * @pri: Priority
1ba981fd
JS
7074 *
7075 * This routine enables a lun for oas operations. The routines does so by
7076 * doing the following :
7077 *
7078 * 1) Checks to see if the device data for the lun has been created.
7079 * 2) If found, sets the OAS enabled flag if not set and returns.
7080 * 3) Otherwise, creates a device data structure.
7081 * 4) If successfully created, indicates the device data is for an OAS lun,
7082 * indicates the lun is not available and add to the list of luns.
7083 *
7084 * Return codes:
7085 * false - Error
7086 * true - Success
7087 **/
7088bool
7089lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
c92c841c 7090 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
1ba981fd
JS
7091{
7092
7093 struct lpfc_device_data *lun_info;
7094 unsigned long flags;
7095
7096 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
f38fa0bb 7097 !phba->cfg_fof)
1ba981fd
JS
7098 return false;
7099
7100 spin_lock_irqsave(&phba->devicelock, flags);
7101
7102 /* Check to see if the device data for the lun has been created */
7103 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
7104 target_wwpn, lun);
7105 if (lun_info) {
7106 if (!lun_info->oas_enabled)
7107 lun_info->oas_enabled = true;
b5749fe1 7108 lun_info->priority = pri;
1ba981fd
JS
7109 spin_unlock_irqrestore(&phba->devicelock, flags);
7110 return true;
7111 }
7112
7113 /* Create an lun info structure and add to list of luns */
7114 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
2d71dc8e 7115 pri, true);
1ba981fd
JS
7116 if (lun_info) {
7117 lun_info->oas_enabled = true;
c92c841c 7118 lun_info->priority = pri;
1ba981fd
JS
7119 lun_info->available = false;
7120 list_add_tail(&lun_info->listentry, &phba->luns);
7121 spin_unlock_irqrestore(&phba->devicelock, flags);
7122 return true;
7123 }
7124 spin_unlock_irqrestore(&phba->devicelock, flags);
7125 return false;
7126}
7127
7128/**
7129 * lpfc_disable_oas_lun - disables a lun for OAS operations
eceee00e 7130 * @phba: Pointer to host bus adapter structure.
1ba981fd
JS
7131 * @vport_wwpn: Pointer to vport's wwpn information
7132 * @target_wwpn: Pointer to target's wwpn information
7133 * @lun: Lun
eceee00e 7134 * @pri: Priority
1ba981fd
JS
7135 *
7136 * This routine disables a lun for oas operations. The routines does so by
7137 * doing the following :
7138 *
7139 * 1) Checks to see if the device data for the lun is created.
7140 * 2) If present, clears the flag indicating this lun is for OAS.
7141 * 3) If the lun is not available by the system, the device data is
7142 * freed.
7143 *
7144 * Return codes:
7145 * false - Error
7146 * true - Success
7147 **/
7148bool
7149lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
b5749fe1 7150 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
1ba981fd
JS
7151{
7152
7153 struct lpfc_device_data *lun_info;
7154 unsigned long flags;
7155
7156 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
f38fa0bb 7157 !phba->cfg_fof)
1ba981fd
JS
7158 return false;
7159
7160 spin_lock_irqsave(&phba->devicelock, flags);
7161
7162 /* Check to see if the lun is available. */
7163 lun_info = __lpfc_get_device_data(phba,
7164 &phba->luns, vport_wwpn,
7165 target_wwpn, lun);
7166 if (lun_info) {
7167 lun_info->oas_enabled = false;
b5749fe1 7168 lun_info->priority = pri;
1ba981fd
JS
7169 if (!lun_info->available)
7170 lpfc_delete_device_data(phba, lun_info);
7171 spin_unlock_irqrestore(&phba->devicelock, flags);
7172 return true;
7173 }
7174
7175 spin_unlock_irqrestore(&phba->devicelock, flags);
7176 return false;
7177}
92d7f7b0 7178
895427bd
JS
7179static int
7180lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
7181{
7182 return SCSI_MLQUEUE_HOST_BUSY;
7183}
7184
7185static int
7186lpfc_no_handler(struct scsi_cmnd *cmnd)
7187{
7188 return FAILED;
7189}
7190
7191static int
7192lpfc_no_slave(struct scsi_device *sdev)
7193{
7194 return -ENODEV;
7195}
7196
7197struct scsi_host_template lpfc_template_nvme = {
7198 .module = THIS_MODULE,
7199 .name = LPFC_DRIVER_NAME,
7200 .proc_name = LPFC_DRIVER_NAME,
7201 .info = lpfc_info,
7202 .queuecommand = lpfc_no_command,
7203 .eh_abort_handler = lpfc_no_handler,
7204 .eh_device_reset_handler = lpfc_no_handler,
7205 .eh_target_reset_handler = lpfc_no_handler,
7206 .eh_bus_reset_handler = lpfc_no_handler,
7207 .eh_host_reset_handler = lpfc_no_handler,
7208 .slave_alloc = lpfc_no_slave,
7209 .slave_configure = lpfc_no_slave,
7210 .scan_finished = lpfc_scan_finished,
7211 .this_id = -1,
7212 .sg_tablesize = 1,
7213 .cmd_per_lun = 1,
895427bd 7214 .shost_attrs = lpfc_hba_attrs,
7c30bb62 7215 .max_sectors = 0xFFFFFFFF,
895427bd
JS
7216 .vendor_id = LPFC_NL_VENDOR_ID,
7217 .track_queue_depth = 0,
7218};
7219
dea3101e
JB
7220struct scsi_host_template lpfc_template = {
7221 .module = THIS_MODULE,
7222 .name = LPFC_DRIVER_NAME,
08dcd4cf 7223 .proc_name = LPFC_DRIVER_NAME,
dea3101e
JB
7224 .info = lpfc_info,
7225 .queuecommand = lpfc_queuecommand,
b6a05c82 7226 .eh_timed_out = fc_eh_timed_out,
7f3a79a7 7227 .eh_should_retry_cmd = fc_eh_should_retry_cmd,
dea3101e 7228 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
7229 .eh_device_reset_handler = lpfc_device_reset_handler,
7230 .eh_target_reset_handler = lpfc_target_reset_handler,
7054a606 7231 .eh_bus_reset_handler = lpfc_bus_reset_handler,
27b01b82 7232 .eh_host_reset_handler = lpfc_host_reset_handler,
dea3101e
JB
7233 .slave_alloc = lpfc_slave_alloc,
7234 .slave_configure = lpfc_slave_configure,
7235 .slave_destroy = lpfc_slave_destroy,
47a8617c 7236 .scan_finished = lpfc_scan_finished,
dea3101e 7237 .this_id = -1,
83108bd3 7238 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
dea3101e 7239 .cmd_per_lun = LPFC_CMD_PER_LUN,
2e0fef85 7240 .shost_attrs = lpfc_hba_attrs,
7c30bb62 7241 .max_sectors = 0xFFFFFFFF,
f1c3b0fc 7242 .vendor_id = LPFC_NL_VENDOR_ID,
db5ed4df 7243 .change_queue_depth = scsi_change_queue_depth,
c40ecc12 7244 .track_queue_depth = 1,
dea3101e 7245};