]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/lpfc/lpfc_scsi.c
[SCSI] lpfc 8.3.31: Fix unable to create vports on FCoE SLI4 adapter
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / lpfc / lpfc_scsi.c
CommitLineData
d85296cf 1/*******************************************************************
dea3101e 2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
acd6859b 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
dea3101e 21#include <linux/pci.h>
5a0e3ad6 22#include <linux/slab.h>
dea3101e 23#include <linux/interrupt.h>
09703660 24#include <linux/export.h>
a90f5684 25#include <linux/delay.h>
e2a0a9d6 26#include <asm/unaligned.h>
dea3101e
JB
27
28#include <scsi/scsi.h>
29#include <scsi/scsi_device.h>
e2a0a9d6 30#include <scsi/scsi_eh.h>
dea3101e
JB
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_tcq.h>
33#include <scsi/scsi_transport_fc.h>
34
35#include "lpfc_version.h"
da0436e9 36#include "lpfc_hw4.h"
dea3101e
JB
37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
da0436e9 39#include "lpfc_sli4.h"
ea2151b4 40#include "lpfc_nl.h"
dea3101e 41#include "lpfc_disc.h"
dea3101e 42#include "lpfc.h"
9a6b09c0 43#include "lpfc_scsi.h"
dea3101e
JB
44#include "lpfc_logmsg.h"
45#include "lpfc_crtn.h"
92d7f7b0 46#include "lpfc_vport.h"
dea3101e
JB
47
48#define LPFC_RESET_WAIT 2
49#define LPFC_ABORT_WAIT 2
50
e2a0a9d6
JS
51int _dump_buf_done;
52
53static char *dif_op_str[] = {
9a6b09c0
JS
54 "PROT_NORMAL",
55 "PROT_READ_INSERT",
56 "PROT_WRITE_STRIP",
57 "PROT_READ_STRIP",
58 "PROT_WRITE_INSERT",
59 "PROT_READ_PASS",
60 "PROT_WRITE_PASS",
61};
62
63static char *dif_grd_str[] = {
64 "NO_GUARD",
65 "DIF_CRC",
66 "DIX_IP",
e2a0a9d6 67};
f9bb2da1
JS
68
69struct scsi_dif_tuple {
70 __be16 guard_tag; /* Checksum */
71 __be16 app_tag; /* Opaque storage */
72 __be32 ref_tag; /* Target LBA or indirect LBA */
73};
74
da0436e9
JS
75static void
76lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
1c6f4ef5
JS
77static void
78lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
e2a0a9d6
JS
79
80static void
6a9c52cf 81lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
82{
83 void *src, *dst;
84 struct scatterlist *sgde = scsi_sglist(cmnd);
85
86 if (!_dump_buf_data) {
6a9c52cf
JS
87 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
88 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
89 __func__);
90 return;
91 }
92
93
94 if (!sgde) {
6a9c52cf
JS
95 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
96 "9051 BLKGRD: ERROR: data scatterlist is null\n");
e2a0a9d6
JS
97 return;
98 }
99
100 dst = (void *) _dump_buf_data;
101 while (sgde) {
102 src = sg_virt(sgde);
103 memcpy(dst, src, sgde->length);
104 dst += sgde->length;
105 sgde = sg_next(sgde);
106 }
107}
108
109static void
6a9c52cf 110lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
111{
112 void *src, *dst;
113 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
114
115 if (!_dump_buf_dif) {
6a9c52cf
JS
116 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
117 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
118 __func__);
119 return;
120 }
121
122 if (!sgde) {
6a9c52cf
JS
123 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
124 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
e2a0a9d6
JS
125 return;
126 }
127
128 dst = _dump_buf_dif;
129 while (sgde) {
130 src = sg_virt(sgde);
131 memcpy(dst, src, sgde->length);
132 dst += sgde->length;
133 sgde = sg_next(sgde);
134 }
135}
136
f1126688
JS
137/**
138 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
139 * @phba: Pointer to HBA object.
140 * @lpfc_cmd: lpfc scsi command object pointer.
141 *
142 * This function is called from the lpfc_prep_task_mgmt_cmd function to
143 * set the last bit in the response sge entry.
144 **/
145static void
146lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
147 struct lpfc_scsi_buf *lpfc_cmd)
148{
149 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
150 if (sgl) {
151 sgl += 1;
152 sgl->word2 = le32_to_cpu(sgl->word2);
153 bf_set(lpfc_sli4_sge_last, sgl, 1);
154 sgl->word2 = cpu_to_le32(sgl->word2);
155 }
156}
157
ea2151b4 158/**
3621a710 159 * lpfc_update_stats - Update statistical data for the command completion
ea2151b4
JS
160 * @phba: Pointer to HBA object.
161 * @lpfc_cmd: lpfc scsi command object pointer.
162 *
163 * This function is called when there is a command completion and this
164 * function updates the statistical data for the command completion.
165 **/
166static void
167lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
168{
169 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
170 struct lpfc_nodelist *pnode = rdata->pnode;
171 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
172 unsigned long flags;
173 struct Scsi_Host *shost = cmd->device->host;
174 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
175 unsigned long latency;
176 int i;
177
178 if (cmd->result)
179 return;
180
9f1e1b50
JS
181 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
182
ea2151b4
JS
183 spin_lock_irqsave(shost->host_lock, flags);
184 if (!vport->stat_data_enabled ||
185 vport->stat_data_blocked ||
5989b8d4 186 !pnode ||
ea2151b4
JS
187 !pnode->lat_data ||
188 (phba->bucket_type == LPFC_NO_BUCKET)) {
189 spin_unlock_irqrestore(shost->host_lock, flags);
190 return;
191 }
ea2151b4
JS
192
193 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
194 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
195 phba->bucket_step;
9f1e1b50
JS
196 /* check array subscript bounds */
197 if (i < 0)
198 i = 0;
199 else if (i >= LPFC_MAX_BUCKET_COUNT)
200 i = LPFC_MAX_BUCKET_COUNT - 1;
ea2151b4
JS
201 } else {
202 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
203 if (latency <= (phba->bucket_base +
204 ((1<<i)*phba->bucket_step)))
205 break;
206 }
207
208 pnode->lat_data[i].cmd_count++;
209 spin_unlock_irqrestore(shost->host_lock, flags);
210}
211
ea2151b4 212/**
3621a710 213 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
ea2151b4
JS
214 * @phba: Pointer to HBA context object.
215 * @vport: Pointer to vport object.
216 * @ndlp: Pointer to FC node associated with the target.
217 * @lun: Lun number of the scsi device.
218 * @old_val: Old value of the queue depth.
219 * @new_val: New value of the queue depth.
220 *
221 * This function sends an event to the mgmt application indicating
222 * there is a change in the scsi device queue depth.
223 **/
224static void
225lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
226 struct lpfc_vport *vport,
227 struct lpfc_nodelist *ndlp,
228 uint32_t lun,
229 uint32_t old_val,
230 uint32_t new_val)
231{
232 struct lpfc_fast_path_event *fast_path_evt;
233 unsigned long flags;
234
235 fast_path_evt = lpfc_alloc_fast_evt(phba);
236 if (!fast_path_evt)
237 return;
238
239 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
240 FC_REG_SCSI_EVENT;
241 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
242 LPFC_EVENT_VARQUEDEPTH;
243
244 /* Report all luns with change in queue depth */
245 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
246 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
247 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
248 &ndlp->nlp_portname, sizeof(struct lpfc_name));
249 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
250 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
251 }
252
253 fast_path_evt->un.queue_depth_evt.oldval = old_val;
254 fast_path_evt->un.queue_depth_evt.newval = new_val;
255 fast_path_evt->vport = vport;
256
257 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
258 spin_lock_irqsave(&phba->hbalock, flags);
259 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
260 spin_unlock_irqrestore(&phba->hbalock, flags);
261 lpfc_worker_wake_up(phba);
262
263 return;
264}
265
5ffc266e
JS
266/**
267 * lpfc_change_queue_depth - Alter scsi device queue depth
268 * @sdev: Pointer the scsi device on which to change the queue depth.
269 * @qdepth: New queue depth to set the sdev to.
270 * @reason: The reason for the queue depth change.
271 *
272 * This function is called by the midlayer and the LLD to alter the queue
273 * depth for a scsi device. This function sets the queue depth to the new
274 * value and sends an event out to log the queue depth change.
275 **/
276int
277lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
278{
279 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
280 struct lpfc_hba *phba = vport->phba;
281 struct lpfc_rport_data *rdata;
282 unsigned long new_queue_depth, old_queue_depth;
283
284 old_queue_depth = sdev->queue_depth;
285 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
286 new_queue_depth = sdev->queue_depth;
287 rdata = sdev->hostdata;
288 if (rdata)
289 lpfc_send_sdev_queuedepth_change_event(phba, vport,
290 rdata->pnode, sdev->lun,
291 old_queue_depth,
292 new_queue_depth);
293 return sdev->queue_depth;
294}
295
9bad7671 296/**
3621a710 297 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
9bad7671
JS
298 * @phba: The Hba for which this call is being executed.
299 *
300 * This routine is called when there is resource error in driver or firmware.
301 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
302 * posts at most 1 event each second. This routine wakes up worker thread of
303 * @phba to process WORKER_RAM_DOWN_EVENT event.
304 *
305 * This routine should be called with no lock held.
306 **/
92d7f7b0 307void
eaf15d5b 308lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
92d7f7b0
JS
309{
310 unsigned long flags;
5e9d9b82 311 uint32_t evt_posted;
92d7f7b0
JS
312
313 spin_lock_irqsave(&phba->hbalock, flags);
314 atomic_inc(&phba->num_rsrc_err);
315 phba->last_rsrc_error_time = jiffies;
316
317 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
318 spin_unlock_irqrestore(&phba->hbalock, flags);
319 return;
320 }
321
322 phba->last_ramp_down_time = jiffies;
323
324 spin_unlock_irqrestore(&phba->hbalock, flags);
325
326 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
327 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
328 if (!evt_posted)
92d7f7b0 329 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
92d7f7b0
JS
330 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
331
5e9d9b82
JS
332 if (!evt_posted)
333 lpfc_worker_wake_up(phba);
92d7f7b0
JS
334 return;
335}
336
9bad7671 337/**
3621a710 338 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
9bad7671
JS
339 * @phba: The Hba for which this call is being executed.
340 *
341 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
342 * post at most 1 event every 5 minute after last_ramp_up_time or
343 * last_rsrc_error_time. This routine wakes up worker thread of @phba
344 * to process WORKER_RAM_DOWN_EVENT event.
345 *
346 * This routine should be called with no lock held.
347 **/
92d7f7b0 348static inline void
3de2a653 349lpfc_rampup_queue_depth(struct lpfc_vport *vport,
a257bf90 350 uint32_t queue_depth)
92d7f7b0
JS
351{
352 unsigned long flags;
3de2a653 353 struct lpfc_hba *phba = vport->phba;
5e9d9b82 354 uint32_t evt_posted;
92d7f7b0
JS
355 atomic_inc(&phba->num_cmd_success);
356
a257bf90 357 if (vport->cfg_lun_queue_depth <= queue_depth)
92d7f7b0 358 return;
92d7f7b0 359 spin_lock_irqsave(&phba->hbalock, flags);
5ffc266e
JS
360 if (time_before(jiffies,
361 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
362 time_before(jiffies,
363 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
92d7f7b0
JS
364 spin_unlock_irqrestore(&phba->hbalock, flags);
365 return;
366 }
92d7f7b0
JS
367 phba->last_ramp_up_time = jiffies;
368 spin_unlock_irqrestore(&phba->hbalock, flags);
369
370 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
371 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
372 if (!evt_posted)
92d7f7b0 373 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
92d7f7b0
JS
374 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
375
5e9d9b82
JS
376 if (!evt_posted)
377 lpfc_worker_wake_up(phba);
378 return;
92d7f7b0
JS
379}
380
9bad7671 381/**
3621a710 382 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
9bad7671
JS
383 * @phba: The Hba for which this call is being executed.
384 *
385 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
386 * thread.This routine reduces queue depth for all scsi device on each vport
387 * associated with @phba.
388 **/
92d7f7b0
JS
389void
390lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
391{
549e55cd
JS
392 struct lpfc_vport **vports;
393 struct Scsi_Host *shost;
92d7f7b0 394 struct scsi_device *sdev;
5ffc266e 395 unsigned long new_queue_depth;
92d7f7b0 396 unsigned long num_rsrc_err, num_cmd_success;
549e55cd 397 int i;
92d7f7b0
JS
398
399 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
400 num_cmd_success = atomic_read(&phba->num_cmd_success);
401
549e55cd
JS
402 vports = lpfc_create_vport_work_array(phba);
403 if (vports != NULL)
21e9a0a5 404 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
405 shost = lpfc_shost_from_vport(vports[i]);
406 shost_for_each_device(sdev, shost) {
92d7f7b0 407 new_queue_depth =
549e55cd
JS
408 sdev->queue_depth * num_rsrc_err /
409 (num_rsrc_err + num_cmd_success);
410 if (!new_queue_depth)
411 new_queue_depth = sdev->queue_depth - 1;
412 else
413 new_queue_depth = sdev->queue_depth -
414 new_queue_depth;
5ffc266e
JS
415 lpfc_change_queue_depth(sdev, new_queue_depth,
416 SCSI_QDEPTH_DEFAULT);
549e55cd 417 }
92d7f7b0 418 }
09372820 419 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
420 atomic_set(&phba->num_rsrc_err, 0);
421 atomic_set(&phba->num_cmd_success, 0);
422}
423
9bad7671 424/**
3621a710 425 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
9bad7671
JS
426 * @phba: The Hba for which this call is being executed.
427 *
428 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
429 * thread.This routine increases queue depth for all scsi device on each vport
430 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
431 * num_cmd_success to zero.
432 **/
92d7f7b0
JS
433void
434lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
435{
549e55cd
JS
436 struct lpfc_vport **vports;
437 struct Scsi_Host *shost;
92d7f7b0 438 struct scsi_device *sdev;
549e55cd
JS
439 int i;
440
441 vports = lpfc_create_vport_work_array(phba);
442 if (vports != NULL)
21e9a0a5 443 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
444 shost = lpfc_shost_from_vport(vports[i]);
445 shost_for_each_device(sdev, shost) {
97eab634
JS
446 if (vports[i]->cfg_lun_queue_depth <=
447 sdev->queue_depth)
448 continue;
5ffc266e
JS
449 lpfc_change_queue_depth(sdev,
450 sdev->queue_depth+1,
451 SCSI_QDEPTH_RAMP_UP);
549e55cd 452 }
92d7f7b0 453 }
09372820 454 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
455 atomic_set(&phba->num_rsrc_err, 0);
456 atomic_set(&phba->num_cmd_success, 0);
457}
458
a8e497d5 459/**
3621a710 460 * lpfc_scsi_dev_block - set all scsi hosts to block state
a8e497d5
JS
461 * @phba: Pointer to HBA context object.
462 *
463 * This function walks vport list and set each SCSI host to block state
464 * by invoking fc_remote_port_delete() routine. This function is invoked
465 * with EEH when device's PCI slot has been permanently disabled.
466 **/
467void
468lpfc_scsi_dev_block(struct lpfc_hba *phba)
469{
470 struct lpfc_vport **vports;
471 struct Scsi_Host *shost;
472 struct scsi_device *sdev;
473 struct fc_rport *rport;
474 int i;
475
476 vports = lpfc_create_vport_work_array(phba);
477 if (vports != NULL)
21e9a0a5 478 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8e497d5
JS
479 shost = lpfc_shost_from_vport(vports[i]);
480 shost_for_each_device(sdev, shost) {
481 rport = starget_to_rport(scsi_target(sdev));
482 fc_remote_port_delete(rport);
483 }
484 }
485 lpfc_destroy_vport_work_array(phba, vports);
486}
487
9bad7671 488/**
3772a991 489 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
9bad7671 490 * @vport: The virtual port for which this call being executed.
3772a991 491 * @num_to_allocate: The requested number of buffers to allocate.
9bad7671 492 *
3772a991
JS
493 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
494 * the scsi buffer contains all the necessary information needed to initiate
495 * a SCSI I/O. The non-DMAable buffer region contains information to build
496 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
497 * and the initial BPL. In addition to allocating memory, the FCP CMND and
498 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
9bad7671
JS
499 *
500 * Return codes:
3772a991
JS
501 * int - number of scsi buffers that were allocated.
502 * 0 = failure, less than num_to_alloc is a partial failure.
9bad7671 503 **/
3772a991
JS
504static int
505lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
dea3101e 506{
2e0fef85 507 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
508 struct lpfc_scsi_buf *psb;
509 struct ulp_bde64 *bpl;
510 IOCB_t *iocb;
34b02dcd
JS
511 dma_addr_t pdma_phys_fcp_cmd;
512 dma_addr_t pdma_phys_fcp_rsp;
513 dma_addr_t pdma_phys_bpl;
604a3e30 514 uint16_t iotag;
3772a991 515 int bcnt;
dea3101e 516
3772a991
JS
517 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
518 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
519 if (!psb)
520 break;
dea3101e 521
3772a991
JS
522 /*
523 * Get memory from the pci pool to map the virt space to pci
524 * bus space for an I/O. The DMA buffer includes space for the
525 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
526 * necessary to support the sg_tablesize.
527 */
528 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
529 GFP_KERNEL, &psb->dma_handle);
530 if (!psb->data) {
531 kfree(psb);
532 break;
533 }
534
535 /* Initialize virtual ptrs to dma_buf region. */
536 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
537
538 /* Allocate iotag for psb->cur_iocbq. */
539 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
540 if (iotag == 0) {
541 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
542 psb->data, psb->dma_handle);
543 kfree(psb);
544 break;
545 }
546 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
547
548 psb->fcp_cmnd = psb->data;
549 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
550 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
34b02dcd 551 sizeof(struct fcp_rsp);
dea3101e 552
3772a991
JS
553 /* Initialize local short-hand pointers. */
554 bpl = psb->fcp_bpl;
555 pdma_phys_fcp_cmd = psb->dma_handle;
556 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
557 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
558 sizeof(struct fcp_rsp);
559
560 /*
561 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
562 * are sg list bdes. Initialize the first two and leave the
563 * rest for queuecommand.
564 */
565 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
566 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
567 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
568 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
569 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
570
571 /* Setup the physical region for the FCP RSP */
572 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
573 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
574 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
575 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
576 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
577
578 /*
579 * Since the IOCB for the FCP I/O is built into this
580 * lpfc_scsi_buf, initialize it with all known data now.
581 */
582 iocb = &psb->cur_iocbq.iocb;
583 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
584 if ((phba->sli_rev == 3) &&
585 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
586 /* fill in immediate fcp command BDE */
587 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
588 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
589 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
590 unsli3.fcp_ext.icd);
591 iocb->un.fcpi64.bdl.addrHigh = 0;
592 iocb->ulpBdeCount = 0;
593 iocb->ulpLe = 0;
25985edc 594 /* fill in response BDE */
3772a991
JS
595 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
596 BUFF_TYPE_BDE_64;
597 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
598 sizeof(struct fcp_rsp);
599 iocb->unsli3.fcp_ext.rbde.addrLow =
600 putPaddrLow(pdma_phys_fcp_rsp);
601 iocb->unsli3.fcp_ext.rbde.addrHigh =
602 putPaddrHigh(pdma_phys_fcp_rsp);
603 } else {
604 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
605 iocb->un.fcpi64.bdl.bdeSize =
606 (2 * sizeof(struct ulp_bde64));
607 iocb->un.fcpi64.bdl.addrLow =
608 putPaddrLow(pdma_phys_bpl);
609 iocb->un.fcpi64.bdl.addrHigh =
610 putPaddrHigh(pdma_phys_bpl);
611 iocb->ulpBdeCount = 1;
612 iocb->ulpLe = 1;
613 }
614 iocb->ulpClass = CLASS3;
615 psb->status = IOSTAT_SUCCESS;
da0436e9 616 /* Put it back into the SCSI buffer list */
eee8877e 617 psb->cur_iocbq.context1 = psb;
1c6f4ef5 618 lpfc_release_scsi_buf_s3(phba, psb);
dea3101e 619
34b02dcd 620 }
dea3101e 621
3772a991 622 return bcnt;
dea3101e
JB
623}
624
1151e3ec
JS
625/**
626 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
627 * @vport: pointer to lpfc vport data structure.
628 *
629 * This routine is invoked by the vport cleanup for deletions and the cleanup
630 * for an ndlp on removal.
631 **/
632void
633lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
634{
635 struct lpfc_hba *phba = vport->phba;
636 struct lpfc_scsi_buf *psb, *next_psb;
637 unsigned long iflag = 0;
638
639 spin_lock_irqsave(&phba->hbalock, iflag);
640 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
641 list_for_each_entry_safe(psb, next_psb,
642 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
643 if (psb->rdata && psb->rdata->pnode
644 && psb->rdata->pnode->vport == vport)
645 psb->rdata = NULL;
646 }
647 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
648 spin_unlock_irqrestore(&phba->hbalock, iflag);
649}
650
da0436e9
JS
651/**
652 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
653 * @phba: pointer to lpfc hba data structure.
654 * @axri: pointer to the fcp xri abort wcqe structure.
655 *
656 * This routine is invoked by the worker thread to process a SLI4 fast-path
657 * FCP aborted xri.
658 **/
659void
660lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
661 struct sli4_wcqe_xri_aborted *axri)
662{
663 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
19ca7609 664 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
da0436e9
JS
665 struct lpfc_scsi_buf *psb, *next_psb;
666 unsigned long iflag = 0;
0f65ff68
JS
667 struct lpfc_iocbq *iocbq;
668 int i;
19ca7609
JS
669 struct lpfc_nodelist *ndlp;
670 int rrq_empty = 0;
589a52d6 671 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
da0436e9 672
0f65ff68
JS
673 spin_lock_irqsave(&phba->hbalock, iflag);
674 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
da0436e9
JS
675 list_for_each_entry_safe(psb, next_psb,
676 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
677 if (psb->cur_iocbq.sli4_xritag == xri) {
678 list_del(&psb->list);
341af102 679 psb->exch_busy = 0;
da0436e9 680 psb->status = IOSTAT_SUCCESS;
0f65ff68
JS
681 spin_unlock(
682 &phba->sli4_hba.abts_scsi_buf_list_lock);
1151e3ec
JS
683 if (psb->rdata && psb->rdata->pnode)
684 ndlp = psb->rdata->pnode;
685 else
686 ndlp = NULL;
687
19ca7609 688 rrq_empty = list_empty(&phba->active_rrq_list);
0f65ff68 689 spin_unlock_irqrestore(&phba->hbalock, iflag);
cb69f7de 690 if (ndlp) {
19ca7609 691 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
cb69f7de
JS
692 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
693 }
da0436e9 694 lpfc_release_scsi_buf_s4(phba, psb);
19ca7609
JS
695 if (rrq_empty)
696 lpfc_worker_wake_up(phba);
da0436e9
JS
697 return;
698 }
699 }
0f65ff68
JS
700 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
701 for (i = 1; i <= phba->sli.last_iotag; i++) {
702 iocbq = phba->sli.iocbq_lookup[i];
703
704 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
705 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
706 continue;
707 if (iocbq->sli4_xritag != xri)
708 continue;
709 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
710 psb->exch_busy = 0;
711 spin_unlock_irqrestore(&phba->hbalock, iflag);
589a52d6
JS
712 if (pring->txq_cnt)
713 lpfc_worker_wake_up(phba);
0f65ff68
JS
714 return;
715
716 }
717 spin_unlock_irqrestore(&phba->hbalock, iflag);
da0436e9
JS
718}
719
720/**
8a9d2e80 721 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
da0436e9 722 * @phba: pointer to lpfc hba data structure.
8a9d2e80 723 * @post_sblist: pointer to the scsi buffer list.
da0436e9 724 *
8a9d2e80
JS
725 * This routine walks a list of scsi buffers that was passed in. It attempts
726 * to construct blocks of scsi buffer sgls which contains contiguous xris and
727 * uses the non-embedded SGL block post mailbox commands to post to the port.
728 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
729 * embedded SGL post mailbox command for posting. The @post_sblist passed in
730 * must be local list, thus no lock is needed when manipulate the list.
da0436e9 731 *
8a9d2e80 732 * Returns: 0 = failure, non-zero number of successfully posted buffers.
da0436e9
JS
733 **/
734int
8a9d2e80
JS
735lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
736 struct list_head *post_sblist, int sb_count)
da0436e9 737{
8a9d2e80
JS
738 struct lpfc_scsi_buf *psb, *psb_next;
739 int status;
740 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
741 dma_addr_t pdma_phys_bpl1;
742 int last_xritag = NO_XRI;
743 LIST_HEAD(prep_sblist);
744 LIST_HEAD(blck_sblist);
745 LIST_HEAD(scsi_sblist);
746
747 /* sanity check */
748 if (sb_count <= 0)
749 return -EINVAL;
750
751 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
752 list_del_init(&psb->list);
753 block_cnt++;
754 if ((last_xritag != NO_XRI) &&
755 (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
756 /* a hole in xri block, form a sgl posting block */
757 list_splice_init(&prep_sblist, &blck_sblist);
758 post_cnt = block_cnt - 1;
759 /* prepare list for next posting block */
760 list_add_tail(&psb->list, &prep_sblist);
761 block_cnt = 1;
762 } else {
763 /* prepare list for next posting block */
764 list_add_tail(&psb->list, &prep_sblist);
765 /* enough sgls for non-embed sgl mbox command */
766 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
767 list_splice_init(&prep_sblist, &blck_sblist);
768 post_cnt = block_cnt;
769 block_cnt = 0;
da0436e9 770 }
8a9d2e80
JS
771 }
772 num_posting++;
773 last_xritag = psb->cur_iocbq.sli4_xritag;
da0436e9 774
8a9d2e80
JS
775 /* end of repost sgl list condition for SCSI buffers */
776 if (num_posting == sb_count) {
777 if (post_cnt == 0) {
778 /* last sgl posting block */
779 list_splice_init(&prep_sblist, &blck_sblist);
780 post_cnt = block_cnt;
781 } else if (block_cnt == 1) {
782 /* last single sgl with non-contiguous xri */
783 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
784 pdma_phys_bpl1 = psb->dma_phys_bpl +
785 SGL_PAGE_SIZE;
786 else
787 pdma_phys_bpl1 = 0;
788 status = lpfc_sli4_post_sgl(phba,
789 psb->dma_phys_bpl,
790 pdma_phys_bpl1,
791 psb->cur_iocbq.sli4_xritag);
792 if (status) {
793 /* failure, put on abort scsi list */
794 psb->exch_busy = 1;
795 } else {
796 /* success, put on SCSI buffer list */
797 psb->exch_busy = 0;
798 psb->status = IOSTAT_SUCCESS;
799 num_posted++;
800 }
801 /* success, put on SCSI buffer sgl list */
802 list_add_tail(&psb->list, &scsi_sblist);
803 }
804 }
da0436e9 805
8a9d2e80
JS
806 /* continue until a nembed page worth of sgls */
807 if (post_cnt == 0)
da0436e9 808 continue;
8a9d2e80
JS
809
810 /* post block of SCSI buffer list sgls */
811 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
812 post_cnt);
813
814 /* don't reset xirtag due to hole in xri block */
815 if (block_cnt == 0)
816 last_xritag = NO_XRI;
817
818 /* reset SCSI buffer post count for next round of posting */
819 post_cnt = 0;
820
821 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
822 while (!list_empty(&blck_sblist)) {
823 list_remove_head(&blck_sblist, psb,
824 struct lpfc_scsi_buf, list);
da0436e9 825 if (status) {
8a9d2e80 826 /* failure, put on abort scsi list */
341af102 827 psb->exch_busy = 1;
341af102 828 } else {
8a9d2e80 829 /* success, put on SCSI buffer list */
341af102 830 psb->exch_busy = 0;
da0436e9 831 psb->status = IOSTAT_SUCCESS;
8a9d2e80 832 num_posted++;
341af102 833 }
8a9d2e80 834 list_add_tail(&psb->list, &scsi_sblist);
da0436e9
JS
835 }
836 }
8a9d2e80
JS
837 /* Push SCSI buffers with sgl posted to the availble list */
838 while (!list_empty(&scsi_sblist)) {
839 list_remove_head(&scsi_sblist, psb,
840 struct lpfc_scsi_buf, list);
841 lpfc_release_scsi_buf_s4(phba, psb);
842 }
843 return num_posted;
844}
845
846/**
847 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
848 * @phba: pointer to lpfc hba data structure.
849 *
850 * This routine walks the list of scsi buffers that have been allocated and
851 * repost them to the port by using SGL block post. This is needed after a
852 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
853 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
854 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
855 *
856 * Returns: 0 = success, non-zero failure.
857 **/
858int
859lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
860{
861 LIST_HEAD(post_sblist);
862 int num_posted, rc = 0;
863
864 /* get all SCSI buffers need to repost to a local list */
865 spin_lock(&phba->scsi_buf_list_lock);
866 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
867 spin_unlock(&phba->scsi_buf_list_lock);
868
869 /* post the list of scsi buffer sgls to port if available */
870 if (!list_empty(&post_sblist)) {
871 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
872 phba->sli4_hba.scsi_xri_cnt);
873 /* failed to post any scsi buffer, return error */
874 if (num_posted == 0)
875 rc = -EIO;
876 }
da0436e9
JS
877 return rc;
878}
879
880/**
881 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
882 * @vport: The virtual port for which this call being executed.
883 * @num_to_allocate: The requested number of buffers to allocate.
884 *
8a9d2e80 885 * This routine allocates scsi buffers for device with SLI-4 interface spec,
da0436e9 886 * the scsi buffer contains all the necessary information needed to initiate
8a9d2e80
JS
887 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
888 * them on a list, it post them to the port by using SGL block post.
da0436e9
JS
889 *
890 * Return codes:
8a9d2e80 891 * int - number of scsi buffers that were allocated and posted.
da0436e9
JS
892 * 0 = failure, less than num_to_alloc is a partial failure.
893 **/
894static int
895lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
896{
897 struct lpfc_hba *phba = vport->phba;
898 struct lpfc_scsi_buf *psb;
899 struct sli4_sge *sgl;
900 IOCB_t *iocb;
901 dma_addr_t pdma_phys_fcp_cmd;
902 dma_addr_t pdma_phys_fcp_rsp;
903 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
8a9d2e80
JS
904 uint16_t iotag, lxri = 0;
905 int bcnt, num_posted;
906 LIST_HEAD(prep_sblist);
907 LIST_HEAD(post_sblist);
908 LIST_HEAD(scsi_sblist);
da0436e9
JS
909
910 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
911 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
912 if (!psb)
913 break;
da0436e9 914 /*
8a9d2e80
JS
915 * Get memory from the pci pool to map the virt space to
916 * pci bus space for an I/O. The DMA buffer includes space
917 * for the struct fcp_cmnd, struct fcp_rsp and the number
918 * of bde's necessary to support the sg_tablesize.
da0436e9
JS
919 */
920 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
921 GFP_KERNEL, &psb->dma_handle);
922 if (!psb->data) {
923 kfree(psb);
924 break;
925 }
da0436e9
JS
926 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
927
928 /* Allocate iotag for psb->cur_iocbq. */
929 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
930 if (iotag == 0) {
b92938b4
JS
931 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
932 psb->data, psb->dma_handle);
da0436e9
JS
933 kfree(psb);
934 break;
935 }
936
6d368e53
JS
937 lxri = lpfc_sli4_next_xritag(phba);
938 if (lxri == NO_XRI) {
da0436e9
JS
939 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
940 psb->data, psb->dma_handle);
941 kfree(psb);
942 break;
943 }
6d368e53
JS
944 psb->cur_iocbq.sli4_lxritag = lxri;
945 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
da0436e9 946 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
da0436e9
JS
947 psb->fcp_bpl = psb->data;
948 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
949 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
950 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
951 sizeof(struct fcp_cmnd));
952
953 /* Initialize local short-hand pointers. */
954 sgl = (struct sli4_sge *)psb->fcp_bpl;
955 pdma_phys_bpl = psb->dma_handle;
956 pdma_phys_fcp_cmd =
957 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
958 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
959 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
960
961 /*
8a9d2e80
JS
962 * The first two bdes are the FCP_CMD and FCP_RSP.
963 * The balance are sg list bdes. Initialize the
964 * first two and leave the rest for queuecommand.
da0436e9
JS
965 */
966 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
967 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
0558056c 968 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
969 bf_set(lpfc_sli4_sge_last, sgl, 0);
970 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 971 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
da0436e9
JS
972 sgl++;
973
974 /* Setup the physical region for the FCP RSP */
975 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
976 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
0558056c 977 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
978 bf_set(lpfc_sli4_sge_last, sgl, 1);
979 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 980 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
da0436e9
JS
981
982 /*
983 * Since the IOCB for the FCP I/O is built into this
984 * lpfc_scsi_buf, initialize it with all known data now.
985 */
986 iocb = &psb->cur_iocbq.iocb;
987 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
988 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
989 /* setting the BLP size to 2 * sizeof BDE may not be correct.
990 * We are setting the bpl to point to out sgl. An sgl's
991 * entries are 16 bytes, a bpl entries are 12 bytes.
992 */
993 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
994 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
995 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
996 iocb->ulpBdeCount = 1;
997 iocb->ulpLe = 1;
998 iocb->ulpClass = CLASS3;
8a9d2e80 999 psb->cur_iocbq.context1 = psb;
da0436e9
JS
1000 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1001 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
1002 else
1003 pdma_phys_bpl1 = 0;
1004 psb->dma_phys_bpl = pdma_phys_bpl;
da0436e9 1005
8a9d2e80
JS
1006 /* add the scsi buffer to a post list */
1007 list_add_tail(&psb->list, &post_sblist);
1008 spin_lock_irq(&phba->scsi_buf_list_lock);
1009 phba->sli4_hba.scsi_xri_cnt++;
1010 spin_unlock_irq(&phba->scsi_buf_list_lock);
1011 }
1012 lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1013 "3021 Allocate %d out of %d requested new SCSI "
1014 "buffers\n", bcnt, num_to_alloc);
1015
1016 /* post the list of scsi buffer sgls to port if available */
1017 if (!list_empty(&post_sblist))
1018 num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1019 &post_sblist, bcnt);
1020 else
1021 num_posted = 0;
1022
1023 return num_posted;
da0436e9
JS
1024}
1025
9bad7671 1026/**
3772a991
JS
1027 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
1028 * @vport: The virtual port for which this call being executed.
1029 * @num_to_allocate: The requested number of buffers to allocate.
1030 *
1031 * This routine wraps the actual SCSI buffer allocator function pointer from
1032 * the lpfc_hba struct.
1033 *
1034 * Return codes:
1035 * int - number of scsi buffers that were allocated.
1036 * 0 = failure, less than num_to_alloc is a partial failure.
1037 **/
1038static inline int
1039lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1040{
1041 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1042}
1043
1044/**
19ca7609 1045 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
3772a991 1046 * @phba: The HBA for which this call is being executed.
9bad7671
JS
1047 *
1048 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1049 * and returns to caller.
1050 *
1051 * Return codes:
1052 * NULL - Error
1053 * Pointer to lpfc_scsi_buf - Success
1054 **/
455c53ec 1055static struct lpfc_scsi_buf*
19ca7609 1056lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 1057{
0bd4ca25
JSEC
1058 struct lpfc_scsi_buf * lpfc_cmd = NULL;
1059 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
875fbdfe 1060 unsigned long iflag = 0;
0bd4ca25 1061
875fbdfe 1062 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 1063 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1dcb58e5
JS
1064 if (lpfc_cmd) {
1065 lpfc_cmd->seg_cnt = 0;
1066 lpfc_cmd->nonsg_phys = 0;
e2a0a9d6 1067 lpfc_cmd->prot_seg_cnt = 0;
1dcb58e5 1068 }
875fbdfe 1069 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
0bd4ca25
JSEC
1070 return lpfc_cmd;
1071}
19ca7609
JS
1072/**
1073 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1074 * @phba: The HBA for which this call is being executed.
1075 *
1076 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1077 * and returns to caller.
1078 *
1079 * Return codes:
1080 * NULL - Error
1081 * Pointer to lpfc_scsi_buf - Success
1082 **/
1083static struct lpfc_scsi_buf*
1084lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1085{
1151e3ec 1086 struct lpfc_scsi_buf *lpfc_cmd ;
19ca7609
JS
1087 unsigned long iflag = 0;
1088 int found = 0;
1089
1090 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1151e3ec
JS
1091 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
1092 list) {
19ca7609 1093 if (lpfc_test_rrq_active(phba, ndlp,
1151e3ec
JS
1094 lpfc_cmd->cur_iocbq.sli4_xritag))
1095 continue;
1096 list_del(&lpfc_cmd->list);
19ca7609
JS
1097 found = 1;
1098 lpfc_cmd->seg_cnt = 0;
1099 lpfc_cmd->nonsg_phys = 0;
1100 lpfc_cmd->prot_seg_cnt = 0;
1151e3ec 1101 break;
19ca7609 1102 }
1151e3ec
JS
1103 spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
1104 iflag);
1105 if (!found)
1106 return NULL;
1107 else
1108 return lpfc_cmd;
19ca7609
JS
1109}
1110/**
1111 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1112 * @phba: The HBA for which this call is being executed.
1113 *
1114 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1115 * and returns to caller.
1116 *
1117 * Return codes:
1118 * NULL - Error
1119 * Pointer to lpfc_scsi_buf - Success
1120 **/
1121static struct lpfc_scsi_buf*
1122lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1123{
1124 return phba->lpfc_get_scsi_buf(phba, ndlp);
1125}
dea3101e 1126
9bad7671 1127/**
3772a991 1128 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
9bad7671
JS
1129 * @phba: The Hba for which this call is being executed.
1130 * @psb: The scsi buffer which is being released.
1131 *
1132 * This routine releases @psb scsi buffer by adding it to tail of @phba
1133 * lpfc_scsi_buf_list list.
1134 **/
0bd4ca25 1135static void
3772a991 1136lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
0bd4ca25 1137{
875fbdfe 1138 unsigned long iflag = 0;
dea3101e 1139
875fbdfe 1140 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 1141 psb->pCmd = NULL;
dea3101e 1142 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
875fbdfe 1143 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
dea3101e
JB
1144}
1145
da0436e9
JS
1146/**
1147 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1148 * @phba: The Hba for which this call is being executed.
1149 * @psb: The scsi buffer which is being released.
1150 *
1151 * This routine releases @psb scsi buffer by adding it to tail of @phba
1152 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1153 * and cannot be reused for at least RA_TOV amount of time if it was
1154 * aborted.
1155 **/
1156static void
1157lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1158{
1159 unsigned long iflag = 0;
1160
341af102 1161 if (psb->exch_busy) {
da0436e9
JS
1162 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1163 iflag);
1164 psb->pCmd = NULL;
1165 list_add_tail(&psb->list,
1166 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1167 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1168 iflag);
1169 } else {
1170
1171 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1172 psb->pCmd = NULL;
1173 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1174 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1175 }
1176}
1177
9bad7671 1178/**
3772a991
JS
1179 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1180 * @phba: The Hba for which this call is being executed.
1181 * @psb: The scsi buffer which is being released.
1182 *
1183 * This routine releases @psb scsi buffer by adding it to tail of @phba
1184 * lpfc_scsi_buf_list list.
1185 **/
1186static void
1187lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1188{
1189
1190 phba->lpfc_release_scsi_buf(phba, psb);
1191}
1192
1193/**
1194 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
9bad7671
JS
1195 * @phba: The Hba for which this call is being executed.
1196 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1197 *
1198 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3772a991
JS
1199 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1200 * through sg elements and format the bdea. This routine also initializes all
1201 * IOCB fields which are dependent on scsi command request buffer.
9bad7671
JS
1202 *
1203 * Return codes:
1204 * 1 - Error
1205 * 0 - Success
1206 **/
dea3101e 1207static int
3772a991 1208lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dea3101e
JB
1209{
1210 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1211 struct scatterlist *sgel = NULL;
1212 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1213 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
0f65ff68 1214 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
dea3101e 1215 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
34b02dcd 1216 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dea3101e 1217 dma_addr_t physaddr;
34b02dcd 1218 uint32_t num_bde = 0;
a0b4f78f 1219 int nseg, datadir = scsi_cmnd->sc_data_direction;
dea3101e
JB
1220
1221 /*
1222 * There are three possibilities here - use scatter-gather segment, use
1223 * the single mapping, or neither. Start the lpfc command prep by
1224 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1225 * data bde entry.
1226 */
1227 bpl += 2;
c59fd9eb 1228 if (scsi_sg_count(scsi_cmnd)) {
dea3101e
JB
1229 /*
1230 * The driver stores the segment count returned from pci_map_sg
1231 * because this a count of dma-mappings used to map the use_sg
1232 * pages. They are not guaranteed to be the same for those
1233 * architectures that implement an IOMMU.
1234 */
dea3101e 1235
c59fd9eb
FT
1236 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1237 scsi_sg_count(scsi_cmnd), datadir);
1238 if (unlikely(!nseg))
1239 return 1;
1240
a0b4f78f 1241 lpfc_cmd->seg_cnt = nseg;
dea3101e 1242 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
1243 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1244 "9064 BLKGRD: %s: Too many sg segments from "
e2a0a9d6 1245 "dma_map_sg. Config %d, seg_cnt %d\n",
cadbd4a5 1246 __func__, phba->cfg_sg_seg_cnt,
dea3101e 1247 lpfc_cmd->seg_cnt);
a0b4f78f 1248 scsi_dma_unmap(scsi_cmnd);
dea3101e
JB
1249 return 1;
1250 }
1251
1252 /*
1253 * The driver established a maximum scatter-gather segment count
1254 * during probe that limits the number of sg elements in any
1255 * single scsi command. Just run through the seg_cnt and format
1256 * the bde's.
34b02dcd
JS
1257 * When using SLI-3 the driver will try to fit all the BDEs into
1258 * the IOCB. If it can't then the BDEs get added to a BPL as it
1259 * does for SLI-2 mode.
dea3101e 1260 */
34b02dcd 1261 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
dea3101e 1262 physaddr = sg_dma_address(sgel);
34b02dcd 1263 if (phba->sli_rev == 3 &&
e2a0a9d6 1264 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
0f65ff68 1265 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
34b02dcd
JS
1266 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1267 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1268 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1269 data_bde->addrLow = putPaddrLow(physaddr);
1270 data_bde->addrHigh = putPaddrHigh(physaddr);
1271 data_bde++;
1272 } else {
1273 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1274 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1275 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1276 bpl->addrLow =
1277 le32_to_cpu(putPaddrLow(physaddr));
1278 bpl->addrHigh =
1279 le32_to_cpu(putPaddrHigh(physaddr));
1280 bpl++;
1281 }
dea3101e 1282 }
c59fd9eb 1283 }
dea3101e
JB
1284
1285 /*
1286 * Finish initializing those IOCB fields that are dependent on the
34b02dcd
JS
1287 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1288 * explicitly reinitialized and for SLI-3 the extended bde count is
1289 * explicitly reinitialized since all iocb memory resources are reused.
dea3101e 1290 */
e2a0a9d6 1291 if (phba->sli_rev == 3 &&
0f65ff68
JS
1292 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1293 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
34b02dcd
JS
1294 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1295 /*
1296 * The extended IOCB format can only fit 3 BDE or a BPL.
1297 * This I/O has more than 3 BDE so the 1st data bde will
1298 * be a BPL that is filled in here.
1299 */
1300 physaddr = lpfc_cmd->dma_handle;
1301 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1302 data_bde->tus.f.bdeSize = (num_bde *
1303 sizeof(struct ulp_bde64));
1304 physaddr += (sizeof(struct fcp_cmnd) +
1305 sizeof(struct fcp_rsp) +
1306 (2 * sizeof(struct ulp_bde64)));
1307 data_bde->addrHigh = putPaddrHigh(physaddr);
1308 data_bde->addrLow = putPaddrLow(physaddr);
25985edc 1309 /* ebde count includes the response bde and data bpl */
34b02dcd
JS
1310 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1311 } else {
25985edc 1312 /* ebde count includes the response bde and data bdes */
34b02dcd
JS
1313 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1314 }
1315 } else {
1316 iocb_cmd->un.fcpi64.bdl.bdeSize =
1317 ((num_bde + 2) * sizeof(struct ulp_bde64));
0f65ff68 1318 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
34b02dcd 1319 }
09372820 1320 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
e2a0a9d6
JS
1321
1322 /*
1323 * Due to difference in data length between DIF/non-DIF paths,
1324 * we need to set word 4 of IOCB here
1325 */
a257bf90 1326 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
e2a0a9d6
JS
1327 return 0;
1328}
1329
f9bb2da1
JS
1330static inline unsigned
1331lpfc_cmd_blksize(struct scsi_cmnd *sc)
1332{
1333 return sc->device->sector_size;
1334}
1335
1336#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 1337
9a6b09c0
JS
1338/* Return if if error injection is detected by Initiator */
1339#define BG_ERR_INIT 0x1
1340/* Return if if error injection is detected by Target */
1341#define BG_ERR_TGT 0x2
1342/* Return if if swapping CSUM<-->CRC is required for error injection */
1343#define BG_ERR_SWAP 0x10
1344/* Return if disabling Guard/Ref/App checking is required for error injection */
1345#define BG_ERR_CHECK 0x20
acd6859b
JS
1346
1347/**
1348 * lpfc_bg_err_inject - Determine if we should inject an error
1349 * @phba: The Hba for which this call is being executed.
f9bb2da1
JS
1350 * @sc: The SCSI command to examine
1351 * @reftag: (out) BlockGuard reference tag for transmitted data
1352 * @apptag: (out) BlockGuard application tag for transmitted data
1353 * @new_guard (in) Value to replace CRC with if needed
1354 *
9a6b09c0 1355 * Returns BG_ERR_* bit mask or 0 if request ignored
acd6859b 1356 **/
f9bb2da1
JS
1357static int
1358lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1359 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1360{
1361 struct scatterlist *sgpe; /* s/g prot entry */
1362 struct scatterlist *sgde; /* s/g data entry */
9a6b09c0 1363 struct lpfc_scsi_buf *lpfc_cmd = NULL;
acd6859b 1364 struct scsi_dif_tuple *src = NULL;
4ac9b226
JS
1365 struct lpfc_nodelist *ndlp;
1366 struct lpfc_rport_data *rdata;
f9bb2da1
JS
1367 uint32_t op = scsi_get_prot_op(sc);
1368 uint32_t blksize;
1369 uint32_t numblks;
1370 sector_t lba;
1371 int rc = 0;
acd6859b 1372 int blockoff = 0;
f9bb2da1
JS
1373
1374 if (op == SCSI_PROT_NORMAL)
1375 return 0;
1376
acd6859b
JS
1377 sgpe = scsi_prot_sglist(sc);
1378 sgde = scsi_sglist(sc);
f9bb2da1 1379 lba = scsi_get_lba(sc);
4ac9b226
JS
1380
1381 /* First check if we need to match the LBA */
f9bb2da1
JS
1382 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1383 blksize = lpfc_cmd_blksize(sc);
1384 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1385
1386 /* Make sure we have the right LBA if one is specified */
1387 if ((phba->lpfc_injerr_lba < lba) ||
1388 (phba->lpfc_injerr_lba >= (lba + numblks)))
1389 return 0;
acd6859b
JS
1390 if (sgpe) {
1391 blockoff = phba->lpfc_injerr_lba - lba;
1392 numblks = sg_dma_len(sgpe) /
1393 sizeof(struct scsi_dif_tuple);
1394 if (numblks < blockoff)
1395 blockoff = numblks;
acd6859b 1396 }
f9bb2da1
JS
1397 }
1398
4ac9b226
JS
1399 /* Next check if we need to match the remote NPortID or WWPN */
1400 rdata = sc->device->hostdata;
1401 if (rdata && rdata->pnode) {
1402 ndlp = rdata->pnode;
1403
1404 /* Make sure we have the right NPortID if one is specified */
1405 if (phba->lpfc_injerr_nportid &&
1406 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1407 return 0;
1408
1409 /*
1410 * Make sure we have the right WWPN if one is specified.
1411 * wwn[0] should be a non-zero NAA in a good WWPN.
1412 */
1413 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1414 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1415 sizeof(struct lpfc_name)) != 0))
1416 return 0;
1417 }
1418
1419 /* Setup a ptr to the protection data if the SCSI host provides it */
1420 if (sgpe) {
1421 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1422 src += blockoff;
1423 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1424 }
1425
f9bb2da1
JS
1426 /* Should we change the Reference Tag */
1427 if (reftag) {
acd6859b
JS
1428 if (phba->lpfc_injerr_wref_cnt) {
1429 switch (op) {
1430 case SCSI_PROT_WRITE_PASS:
9a6b09c0
JS
1431 if (src) {
1432 /*
1433 * For WRITE_PASS, force the error
1434 * to be sent on the wire. It should
1435 * be detected by the Target.
1436 * If blockoff != 0 error will be
1437 * inserted in middle of the IO.
1438 */
acd6859b
JS
1439
1440 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1441 "9076 BLKGRD: Injecting reftag error: "
1442 "write lba x%lx + x%x oldrefTag x%x\n",
1443 (unsigned long)lba, blockoff,
9a6b09c0 1444 be32_to_cpu(src->ref_tag));
f9bb2da1 1445
acd6859b 1446 /*
9a6b09c0
JS
1447 * Save the old ref_tag so we can
1448 * restore it on completion.
acd6859b 1449 */
9a6b09c0
JS
1450 if (lpfc_cmd) {
1451 lpfc_cmd->prot_data_type =
1452 LPFC_INJERR_REFTAG;
1453 lpfc_cmd->prot_data_segment =
1454 src;
1455 lpfc_cmd->prot_data =
1456 src->ref_tag;
1457 }
1458 src->ref_tag = cpu_to_be32(0xDEADBEEF);
acd6859b 1459 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1460 if (phba->lpfc_injerr_wref_cnt == 0) {
1461 phba->lpfc_injerr_nportid = 0;
1462 phba->lpfc_injerr_lba =
1463 LPFC_INJERR_LBA_OFF;
1464 memset(&phba->lpfc_injerr_wwpn,
1465 0, sizeof(struct lpfc_name));
1466 }
9a6b09c0
JS
1467 rc = BG_ERR_TGT | BG_ERR_CHECK;
1468
acd6859b
JS
1469 break;
1470 }
1471 /* Drop thru */
9a6b09c0 1472 case SCSI_PROT_WRITE_INSERT:
acd6859b 1473 /*
9a6b09c0
JS
1474 * For WRITE_INSERT, force the error
1475 * to be sent on the wire. It should be
1476 * detected by the Target.
acd6859b 1477 */
9a6b09c0 1478 /* DEADBEEF will be the reftag on the wire */
acd6859b
JS
1479 *reftag = 0xDEADBEEF;
1480 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1481 if (phba->lpfc_injerr_wref_cnt == 0) {
1482 phba->lpfc_injerr_nportid = 0;
1483 phba->lpfc_injerr_lba =
1484 LPFC_INJERR_LBA_OFF;
1485 memset(&phba->lpfc_injerr_wwpn,
1486 0, sizeof(struct lpfc_name));
1487 }
9a6b09c0 1488 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b
JS
1489
1490 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1491 "9078 BLKGRD: Injecting reftag error: "
acd6859b
JS
1492 "write lba x%lx\n", (unsigned long)lba);
1493 break;
9a6b09c0 1494 case SCSI_PROT_WRITE_STRIP:
acd6859b 1495 /*
9a6b09c0
JS
1496 * For WRITE_STRIP and WRITE_PASS,
1497 * force the error on data
1498 * being copied from SLI-Host to SLI-Port.
acd6859b 1499 */
f9bb2da1
JS
1500 *reftag = 0xDEADBEEF;
1501 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1502 if (phba->lpfc_injerr_wref_cnt == 0) {
1503 phba->lpfc_injerr_nportid = 0;
1504 phba->lpfc_injerr_lba =
1505 LPFC_INJERR_LBA_OFF;
1506 memset(&phba->lpfc_injerr_wwpn,
1507 0, sizeof(struct lpfc_name));
1508 }
9a6b09c0 1509 rc = BG_ERR_INIT;
f9bb2da1
JS
1510
1511 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1512 "9077 BLKGRD: Injecting reftag error: "
f9bb2da1 1513 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1514 break;
f9bb2da1 1515 }
acd6859b
JS
1516 }
1517 if (phba->lpfc_injerr_rref_cnt) {
1518 switch (op) {
1519 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1520 case SCSI_PROT_READ_STRIP:
1521 case SCSI_PROT_READ_PASS:
1522 /*
1523 * For READ_STRIP and READ_PASS, force the
1524 * error on data being read off the wire. It
1525 * should force an IO error to the driver.
1526 */
f9bb2da1
JS
1527 *reftag = 0xDEADBEEF;
1528 phba->lpfc_injerr_rref_cnt--;
4ac9b226
JS
1529 if (phba->lpfc_injerr_rref_cnt == 0) {
1530 phba->lpfc_injerr_nportid = 0;
1531 phba->lpfc_injerr_lba =
1532 LPFC_INJERR_LBA_OFF;
1533 memset(&phba->lpfc_injerr_wwpn,
1534 0, sizeof(struct lpfc_name));
1535 }
acd6859b 1536 rc = BG_ERR_INIT;
f9bb2da1
JS
1537
1538 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 1539 "9079 BLKGRD: Injecting reftag error: "
f9bb2da1 1540 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1541 break;
f9bb2da1
JS
1542 }
1543 }
1544 }
1545
1546 /* Should we change the Application Tag */
1547 if (apptag) {
acd6859b
JS
1548 if (phba->lpfc_injerr_wapp_cnt) {
1549 switch (op) {
1550 case SCSI_PROT_WRITE_PASS:
4ac9b226 1551 if (src) {
9a6b09c0
JS
1552 /*
1553 * For WRITE_PASS, force the error
1554 * to be sent on the wire. It should
1555 * be detected by the Target.
1556 * If blockoff != 0 error will be
1557 * inserted in middle of the IO.
1558 */
1559
acd6859b
JS
1560 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1561 "9080 BLKGRD: Injecting apptag error: "
1562 "write lba x%lx + x%x oldappTag x%x\n",
1563 (unsigned long)lba, blockoff,
9a6b09c0 1564 be16_to_cpu(src->app_tag));
acd6859b
JS
1565
1566 /*
9a6b09c0
JS
1567 * Save the old app_tag so we can
1568 * restore it on completion.
acd6859b 1569 */
9a6b09c0
JS
1570 if (lpfc_cmd) {
1571 lpfc_cmd->prot_data_type =
1572 LPFC_INJERR_APPTAG;
1573 lpfc_cmd->prot_data_segment =
1574 src;
1575 lpfc_cmd->prot_data =
1576 src->app_tag;
1577 }
1578 src->app_tag = cpu_to_be16(0xDEAD);
acd6859b 1579 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1580 if (phba->lpfc_injerr_wapp_cnt == 0) {
1581 phba->lpfc_injerr_nportid = 0;
1582 phba->lpfc_injerr_lba =
1583 LPFC_INJERR_LBA_OFF;
1584 memset(&phba->lpfc_injerr_wwpn,
1585 0, sizeof(struct lpfc_name));
1586 }
9a6b09c0 1587 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b
JS
1588 break;
1589 }
1590 /* Drop thru */
9a6b09c0 1591 case SCSI_PROT_WRITE_INSERT:
acd6859b 1592 /*
9a6b09c0
JS
1593 * For WRITE_INSERT, force the
1594 * error to be sent on the wire. It should be
1595 * detected by the Target.
acd6859b 1596 */
9a6b09c0 1597 /* DEAD will be the apptag on the wire */
acd6859b
JS
1598 *apptag = 0xDEAD;
1599 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1600 if (phba->lpfc_injerr_wapp_cnt == 0) {
1601 phba->lpfc_injerr_nportid = 0;
1602 phba->lpfc_injerr_lba =
1603 LPFC_INJERR_LBA_OFF;
1604 memset(&phba->lpfc_injerr_wwpn,
1605 0, sizeof(struct lpfc_name));
1606 }
9a6b09c0 1607 rc = BG_ERR_TGT | BG_ERR_CHECK;
f9bb2da1 1608
acd6859b 1609 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1610 "0813 BLKGRD: Injecting apptag error: "
acd6859b
JS
1611 "write lba x%lx\n", (unsigned long)lba);
1612 break;
9a6b09c0 1613 case SCSI_PROT_WRITE_STRIP:
acd6859b 1614 /*
9a6b09c0
JS
1615 * For WRITE_STRIP and WRITE_PASS,
1616 * force the error on data
1617 * being copied from SLI-Host to SLI-Port.
acd6859b 1618 */
f9bb2da1
JS
1619 *apptag = 0xDEAD;
1620 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1621 if (phba->lpfc_injerr_wapp_cnt == 0) {
1622 phba->lpfc_injerr_nportid = 0;
1623 phba->lpfc_injerr_lba =
1624 LPFC_INJERR_LBA_OFF;
1625 memset(&phba->lpfc_injerr_wwpn,
1626 0, sizeof(struct lpfc_name));
1627 }
9a6b09c0 1628 rc = BG_ERR_INIT;
f9bb2da1
JS
1629
1630 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1631 "0812 BLKGRD: Injecting apptag error: "
f9bb2da1 1632 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1633 break;
f9bb2da1 1634 }
acd6859b
JS
1635 }
1636 if (phba->lpfc_injerr_rapp_cnt) {
1637 switch (op) {
1638 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1639 case SCSI_PROT_READ_STRIP:
1640 case SCSI_PROT_READ_PASS:
1641 /*
1642 * For READ_STRIP and READ_PASS, force the
1643 * error on data being read off the wire. It
1644 * should force an IO error to the driver.
1645 */
f9bb2da1
JS
1646 *apptag = 0xDEAD;
1647 phba->lpfc_injerr_rapp_cnt--;
4ac9b226
JS
1648 if (phba->lpfc_injerr_rapp_cnt == 0) {
1649 phba->lpfc_injerr_nportid = 0;
1650 phba->lpfc_injerr_lba =
1651 LPFC_INJERR_LBA_OFF;
1652 memset(&phba->lpfc_injerr_wwpn,
1653 0, sizeof(struct lpfc_name));
1654 }
acd6859b 1655 rc = BG_ERR_INIT;
f9bb2da1
JS
1656
1657 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 1658 "0814 BLKGRD: Injecting apptag error: "
f9bb2da1 1659 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1660 break;
f9bb2da1
JS
1661 }
1662 }
1663 }
1664
acd6859b 1665
f9bb2da1 1666 /* Should we change the Guard Tag */
acd6859b
JS
1667 if (new_guard) {
1668 if (phba->lpfc_injerr_wgrd_cnt) {
1669 switch (op) {
1670 case SCSI_PROT_WRITE_PASS:
9a6b09c0 1671 rc = BG_ERR_CHECK;
acd6859b 1672 /* Drop thru */
9a6b09c0
JS
1673
1674 case SCSI_PROT_WRITE_INSERT:
acd6859b 1675 /*
9a6b09c0
JS
1676 * For WRITE_INSERT, force the
1677 * error to be sent on the wire. It should be
1678 * detected by the Target.
acd6859b
JS
1679 */
1680 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1681 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1682 phba->lpfc_injerr_nportid = 0;
1683 phba->lpfc_injerr_lba =
1684 LPFC_INJERR_LBA_OFF;
1685 memset(&phba->lpfc_injerr_wwpn,
1686 0, sizeof(struct lpfc_name));
1687 }
f9bb2da1 1688
9a6b09c0 1689 rc |= BG_ERR_TGT | BG_ERR_SWAP;
acd6859b 1690 /* Signals the caller to swap CRC->CSUM */
f9bb2da1 1691
acd6859b 1692 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1693 "0817 BLKGRD: Injecting guard error: "
acd6859b
JS
1694 "write lba x%lx\n", (unsigned long)lba);
1695 break;
9a6b09c0 1696 case SCSI_PROT_WRITE_STRIP:
acd6859b 1697 /*
9a6b09c0
JS
1698 * For WRITE_STRIP and WRITE_PASS,
1699 * force the error on data
1700 * being copied from SLI-Host to SLI-Port.
acd6859b
JS
1701 */
1702 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1703 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1704 phba->lpfc_injerr_nportid = 0;
1705 phba->lpfc_injerr_lba =
1706 LPFC_INJERR_LBA_OFF;
1707 memset(&phba->lpfc_injerr_wwpn,
1708 0, sizeof(struct lpfc_name));
1709 }
f9bb2da1 1710
9a6b09c0 1711 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1712 /* Signals the caller to swap CRC->CSUM */
1713
1714 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1715 "0816 BLKGRD: Injecting guard error: "
acd6859b
JS
1716 "write lba x%lx\n", (unsigned long)lba);
1717 break;
1718 }
1719 }
1720 if (phba->lpfc_injerr_rgrd_cnt) {
1721 switch (op) {
1722 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1723 case SCSI_PROT_READ_STRIP:
1724 case SCSI_PROT_READ_PASS:
1725 /*
1726 * For READ_STRIP and READ_PASS, force the
1727 * error on data being read off the wire. It
1728 * should force an IO error to the driver.
1729 */
acd6859b 1730 phba->lpfc_injerr_rgrd_cnt--;
4ac9b226
JS
1731 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1732 phba->lpfc_injerr_nportid = 0;
1733 phba->lpfc_injerr_lba =
1734 LPFC_INJERR_LBA_OFF;
1735 memset(&phba->lpfc_injerr_wwpn,
1736 0, sizeof(struct lpfc_name));
1737 }
acd6859b 1738
9a6b09c0 1739 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1740 /* Signals the caller to swap CRC->CSUM */
1741
1742 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1743 "0818 BLKGRD: Injecting guard error: "
1744 "read lba x%lx\n", (unsigned long)lba);
1745 }
f9bb2da1
JS
1746 }
1747 }
acd6859b 1748
f9bb2da1
JS
1749 return rc;
1750}
1751#endif
1752
acd6859b
JS
1753/**
1754 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1755 * the specified SCSI command.
1756 * @phba: The Hba for which this call is being executed.
6c8eea54
JS
1757 * @sc: The SCSI command to examine
1758 * @txopt: (out) BlockGuard operation for transmitted data
1759 * @rxopt: (out) BlockGuard operation for received data
1760 *
1761 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1762 *
acd6859b 1763 **/
e2a0a9d6 1764static int
6c8eea54
JS
1765lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1766 uint8_t *txop, uint8_t *rxop)
e2a0a9d6
JS
1767{
1768 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
6c8eea54 1769 uint8_t ret = 0;
e2a0a9d6
JS
1770
1771 if (guard_type == SHOST_DIX_GUARD_IP) {
1772 switch (scsi_get_prot_op(sc)) {
1773 case SCSI_PROT_READ_INSERT:
1774 case SCSI_PROT_WRITE_STRIP:
6c8eea54 1775 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1776 *txop = BG_OP_IN_CSUM_OUT_NODIF;
e2a0a9d6
JS
1777 break;
1778
1779 case SCSI_PROT_READ_STRIP:
1780 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1781 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1782 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1783 break;
1784
c6af4042
MP
1785 case SCSI_PROT_READ_PASS:
1786 case SCSI_PROT_WRITE_PASS:
6c8eea54 1787 *rxop = BG_OP_IN_CRC_OUT_CSUM;
4ac9b226 1788 *txop = BG_OP_IN_CSUM_OUT_CRC;
e2a0a9d6
JS
1789 break;
1790
e2a0a9d6
JS
1791 case SCSI_PROT_NORMAL:
1792 default:
6a9c52cf 1793 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7c56b9fd
JS
1794 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1795 scsi_get_prot_op(sc));
6c8eea54 1796 ret = 1;
e2a0a9d6
JS
1797 break;
1798
1799 }
7c56b9fd 1800 } else {
e2a0a9d6
JS
1801 switch (scsi_get_prot_op(sc)) {
1802 case SCSI_PROT_READ_STRIP:
1803 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1804 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1805 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1806 break;
1807
1808 case SCSI_PROT_READ_PASS:
1809 case SCSI_PROT_WRITE_PASS:
6c8eea54 1810 *rxop = BG_OP_IN_CRC_OUT_CRC;
4ac9b226 1811 *txop = BG_OP_IN_CRC_OUT_CRC;
e2a0a9d6
JS
1812 break;
1813
e2a0a9d6
JS
1814 case SCSI_PROT_READ_INSERT:
1815 case SCSI_PROT_WRITE_STRIP:
7c56b9fd 1816 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1817 *txop = BG_OP_IN_CRC_OUT_NODIF;
7c56b9fd
JS
1818 break;
1819
e2a0a9d6
JS
1820 case SCSI_PROT_NORMAL:
1821 default:
6a9c52cf 1822 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7c56b9fd
JS
1823 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1824 scsi_get_prot_op(sc));
6c8eea54 1825 ret = 1;
e2a0a9d6
JS
1826 break;
1827 }
e2a0a9d6
JS
1828 }
1829
6c8eea54 1830 return ret;
e2a0a9d6
JS
1831}
1832
acd6859b
JS
1833#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1834/**
1835 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1836 * the specified SCSI command in order to force a guard tag error.
1837 * @phba: The Hba for which this call is being executed.
1838 * @sc: The SCSI command to examine
1839 * @txopt: (out) BlockGuard operation for transmitted data
1840 * @rxopt: (out) BlockGuard operation for received data
1841 *
1842 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1843 *
1844 **/
1845static int
1846lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1847 uint8_t *txop, uint8_t *rxop)
1848{
1849 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1850 uint8_t ret = 0;
1851
1852 if (guard_type == SHOST_DIX_GUARD_IP) {
1853 switch (scsi_get_prot_op(sc)) {
1854 case SCSI_PROT_READ_INSERT:
1855 case SCSI_PROT_WRITE_STRIP:
acd6859b 1856 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1857 *txop = BG_OP_IN_CRC_OUT_NODIF;
acd6859b
JS
1858 break;
1859
1860 case SCSI_PROT_READ_STRIP:
1861 case SCSI_PROT_WRITE_INSERT:
acd6859b 1862 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1863 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1864 break;
1865
1866 case SCSI_PROT_READ_PASS:
1867 case SCSI_PROT_WRITE_PASS:
4ac9b226 1868 *rxop = BG_OP_IN_CSUM_OUT_CRC;
9a6b09c0 1869 *txop = BG_OP_IN_CRC_OUT_CSUM;
acd6859b
JS
1870 break;
1871
1872 case SCSI_PROT_NORMAL:
1873 default:
1874 break;
1875
1876 }
1877 } else {
1878 switch (scsi_get_prot_op(sc)) {
1879 case SCSI_PROT_READ_STRIP:
1880 case SCSI_PROT_WRITE_INSERT:
acd6859b 1881 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1882 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1883 break;
1884
1885 case SCSI_PROT_READ_PASS:
1886 case SCSI_PROT_WRITE_PASS:
4ac9b226 1887 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
9a6b09c0 1888 *txop = BG_OP_IN_CSUM_OUT_CSUM;
acd6859b
JS
1889 break;
1890
1891 case SCSI_PROT_READ_INSERT:
1892 case SCSI_PROT_WRITE_STRIP:
acd6859b 1893 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1894 *txop = BG_OP_IN_CSUM_OUT_NODIF;
acd6859b
JS
1895 break;
1896
1897 case SCSI_PROT_NORMAL:
1898 default:
1899 break;
1900 }
1901 }
1902
1903 return ret;
1904}
1905#endif
1906
1907/**
1908 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1909 * @phba: The Hba for which this call is being executed.
1910 * @sc: pointer to scsi command we're working on
1911 * @bpl: pointer to buffer list for protection groups
1912 * @datacnt: number of segments of data that have been dma mapped
1913 *
1914 * This function sets up BPL buffer list for protection groups of
e2a0a9d6
JS
1915 * type LPFC_PG_TYPE_NO_DIF
1916 *
1917 * This is usually used when the HBA is instructed to generate
1918 * DIFs and insert them into data stream (or strip DIF from
1919 * incoming data stream)
1920 *
1921 * The buffer list consists of just one protection group described
1922 * below:
1923 * +-------------------------+
6c8eea54
JS
1924 * start of prot group --> | PDE_5 |
1925 * +-------------------------+
1926 * | PDE_6 |
e2a0a9d6
JS
1927 * +-------------------------+
1928 * | Data BDE |
1929 * +-------------------------+
1930 * |more Data BDE's ... (opt)|
1931 * +-------------------------+
1932 *
e2a0a9d6
JS
1933 *
1934 * Note: Data s/g buffers have been dma mapped
acd6859b
JS
1935 *
1936 * Returns the number of BDEs added to the BPL.
1937 **/
e2a0a9d6
JS
1938static int
1939lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1940 struct ulp_bde64 *bpl, int datasegcnt)
1941{
1942 struct scatterlist *sgde = NULL; /* s/g data entry */
6c8eea54
JS
1943 struct lpfc_pde5 *pde5 = NULL;
1944 struct lpfc_pde6 *pde6 = NULL;
e2a0a9d6 1945 dma_addr_t physaddr;
6c8eea54 1946 int i = 0, num_bde = 0, status;
e2a0a9d6 1947 int datadir = sc->sc_data_direction;
acd6859b
JS
1948 uint32_t rc;
1949 uint32_t checking = 1;
e2a0a9d6 1950 uint32_t reftag;
7c56b9fd 1951 unsigned blksize;
6c8eea54 1952 uint8_t txop, rxop;
e2a0a9d6 1953
6c8eea54
JS
1954 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1955 if (status)
e2a0a9d6
JS
1956 goto out;
1957
6c8eea54 1958 /* extract some info from the scsi command for pde*/
e2a0a9d6 1959 blksize = lpfc_cmd_blksize(sc);
acd6859b 1960 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
e2a0a9d6 1961
f9bb2da1 1962#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 1963 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 1964 if (rc) {
9a6b09c0 1965 if (rc & BG_ERR_SWAP)
acd6859b 1966 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 1967 if (rc & BG_ERR_CHECK)
acd6859b
JS
1968 checking = 0;
1969 }
f9bb2da1
JS
1970#endif
1971
6c8eea54
JS
1972 /* setup PDE5 with what we have */
1973 pde5 = (struct lpfc_pde5 *) bpl;
1974 memset(pde5, 0, sizeof(struct lpfc_pde5));
1975 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
6c8eea54 1976
bc73905a 1977 /* Endianness conversion if necessary for PDE5 */
589a52d6 1978 pde5->word0 = cpu_to_le32(pde5->word0);
7c56b9fd 1979 pde5->reftag = cpu_to_le32(reftag);
589a52d6 1980
6c8eea54
JS
1981 /* advance bpl and increment bde count */
1982 num_bde++;
1983 bpl++;
1984 pde6 = (struct lpfc_pde6 *) bpl;
1985
1986 /* setup PDE6 with the rest of the info */
1987 memset(pde6, 0, sizeof(struct lpfc_pde6));
1988 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1989 bf_set(pde6_optx, pde6, txop);
1990 bf_set(pde6_oprx, pde6, rxop);
1991 if (datadir == DMA_FROM_DEVICE) {
acd6859b
JS
1992 bf_set(pde6_ce, pde6, checking);
1993 bf_set(pde6_re, pde6, checking);
6c8eea54
JS
1994 }
1995 bf_set(pde6_ai, pde6, 1);
7c56b9fd
JS
1996 bf_set(pde6_ae, pde6, 0);
1997 bf_set(pde6_apptagval, pde6, 0);
e2a0a9d6 1998
bc73905a 1999 /* Endianness conversion if necessary for PDE6 */
589a52d6
JS
2000 pde6->word0 = cpu_to_le32(pde6->word0);
2001 pde6->word1 = cpu_to_le32(pde6->word1);
2002 pde6->word2 = cpu_to_le32(pde6->word2);
2003
6c8eea54 2004 /* advance bpl and increment bde count */
e2a0a9d6
JS
2005 num_bde++;
2006 bpl++;
2007
2008 /* assumption: caller has already run dma_map_sg on command data */
2009 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2010 physaddr = sg_dma_address(sgde);
2011 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2012 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2013 bpl->tus.f.bdeSize = sg_dma_len(sgde);
2014 if (datadir == DMA_TO_DEVICE)
2015 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2016 else
2017 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2018 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2019 bpl++;
2020 num_bde++;
2021 }
2022
2023out:
2024 return num_bde;
2025}
2026
acd6859b
JS
2027/**
2028 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
2029 * @phba: The Hba for which this call is being executed.
2030 * @sc: pointer to scsi command we're working on
2031 * @bpl: pointer to buffer list for protection groups
2032 * @datacnt: number of segments of data that have been dma mapped
2033 * @protcnt: number of segment of protection data that have been dma mapped
2034 *
2035 * This function sets up BPL buffer list for protection groups of
2036 * type LPFC_PG_TYPE_DIF
e2a0a9d6
JS
2037 *
2038 * This is usually used when DIFs are in their own buffers,
2039 * separate from the data. The HBA can then by instructed
2040 * to place the DIFs in the outgoing stream. For read operations,
2041 * The HBA could extract the DIFs and place it in DIF buffers.
2042 *
2043 * The buffer list for this type consists of one or more of the
2044 * protection groups described below:
2045 * +-------------------------+
6c8eea54 2046 * start of first prot group --> | PDE_5 |
e2a0a9d6 2047 * +-------------------------+
6c8eea54
JS
2048 * | PDE_6 |
2049 * +-------------------------+
2050 * | PDE_7 (Prot BDE) |
e2a0a9d6
JS
2051 * +-------------------------+
2052 * | Data BDE |
2053 * +-------------------------+
2054 * |more Data BDE's ... (opt)|
2055 * +-------------------------+
6c8eea54 2056 * start of new prot group --> | PDE_5 |
e2a0a9d6
JS
2057 * +-------------------------+
2058 * | ... |
2059 * +-------------------------+
2060 *
e2a0a9d6
JS
2061 * Note: It is assumed that both data and protection s/g buffers have been
2062 * mapped for DMA
acd6859b
JS
2063 *
2064 * Returns the number of BDEs added to the BPL.
2065 **/
e2a0a9d6
JS
2066static int
2067lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2068 struct ulp_bde64 *bpl, int datacnt, int protcnt)
2069{
2070 struct scatterlist *sgde = NULL; /* s/g data entry */
2071 struct scatterlist *sgpe = NULL; /* s/g prot entry */
6c8eea54
JS
2072 struct lpfc_pde5 *pde5 = NULL;
2073 struct lpfc_pde6 *pde6 = NULL;
7f86059a 2074 struct lpfc_pde7 *pde7 = NULL;
e2a0a9d6
JS
2075 dma_addr_t dataphysaddr, protphysaddr;
2076 unsigned short curr_data = 0, curr_prot = 0;
7f86059a
JS
2077 unsigned int split_offset;
2078 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
e2a0a9d6
JS
2079 unsigned int protgrp_blks, protgrp_bytes;
2080 unsigned int remainder, subtotal;
6c8eea54 2081 int status;
e2a0a9d6
JS
2082 int datadir = sc->sc_data_direction;
2083 unsigned char pgdone = 0, alldone = 0;
2084 unsigned blksize;
acd6859b
JS
2085 uint32_t rc;
2086 uint32_t checking = 1;
e2a0a9d6 2087 uint32_t reftag;
6c8eea54 2088 uint8_t txop, rxop;
e2a0a9d6
JS
2089 int num_bde = 0;
2090
2091 sgpe = scsi_prot_sglist(sc);
2092 sgde = scsi_sglist(sc);
2093
2094 if (!sgpe || !sgde) {
2095 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
acd6859b
JS
2096 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2097 sgpe, sgde);
2098 return 0;
2099 }
2100
2101 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2102 if (status)
2103 goto out;
2104
2105 /* extract some info from the scsi command */
2106 blksize = lpfc_cmd_blksize(sc);
2107 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2108
2109#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2110 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2111 if (rc) {
9a6b09c0 2112 if (rc & BG_ERR_SWAP)
acd6859b 2113 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2114 if (rc & BG_ERR_CHECK)
acd6859b
JS
2115 checking = 0;
2116 }
2117#endif
2118
2119 split_offset = 0;
2120 do {
2121 /* setup PDE5 with what we have */
2122 pde5 = (struct lpfc_pde5 *) bpl;
2123 memset(pde5, 0, sizeof(struct lpfc_pde5));
2124 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2125
2126 /* Endianness conversion if necessary for PDE5 */
2127 pde5->word0 = cpu_to_le32(pde5->word0);
2128 pde5->reftag = cpu_to_le32(reftag);
2129
2130 /* advance bpl and increment bde count */
2131 num_bde++;
2132 bpl++;
2133 pde6 = (struct lpfc_pde6 *) bpl;
2134
2135 /* setup PDE6 with the rest of the info */
2136 memset(pde6, 0, sizeof(struct lpfc_pde6));
2137 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2138 bf_set(pde6_optx, pde6, txop);
2139 bf_set(pde6_oprx, pde6, rxop);
2140 bf_set(pde6_ce, pde6, checking);
2141 bf_set(pde6_re, pde6, checking);
2142 bf_set(pde6_ai, pde6, 1);
2143 bf_set(pde6_ae, pde6, 0);
2144 bf_set(pde6_apptagval, pde6, 0);
2145
2146 /* Endianness conversion if necessary for PDE6 */
2147 pde6->word0 = cpu_to_le32(pde6->word0);
2148 pde6->word1 = cpu_to_le32(pde6->word1);
2149 pde6->word2 = cpu_to_le32(pde6->word2);
2150
2151 /* advance bpl and increment bde count */
2152 num_bde++;
2153 bpl++;
2154
2155 /* setup the first BDE that points to protection buffer */
2156 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2157 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2158
2159 /* must be integer multiple of the DIF block length */
2160 BUG_ON(protgroup_len % 8);
2161
2162 pde7 = (struct lpfc_pde7 *) bpl;
2163 memset(pde7, 0, sizeof(struct lpfc_pde7));
2164 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2165
2166 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2167 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2168
2169 protgrp_blks = protgroup_len / 8;
2170 protgrp_bytes = protgrp_blks * blksize;
2171
2172 /* check if this pde is crossing the 4K boundary; if so split */
2173 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2174 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2175 protgroup_offset += protgroup_remainder;
2176 protgrp_blks = protgroup_remainder / 8;
2177 protgrp_bytes = protgrp_blks * blksize;
2178 } else {
2179 protgroup_offset = 0;
2180 curr_prot++;
2181 }
2182
2183 num_bde++;
2184
2185 /* setup BDE's for data blocks associated with DIF data */
2186 pgdone = 0;
2187 subtotal = 0; /* total bytes processed for current prot grp */
2188 while (!pgdone) {
2189 if (!sgde) {
2190 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2191 "9065 BLKGRD:%s Invalid data segment\n",
2192 __func__);
2193 return 0;
2194 }
2195 bpl++;
2196 dataphysaddr = sg_dma_address(sgde) + split_offset;
2197 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2198 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2199
2200 remainder = sg_dma_len(sgde) - split_offset;
2201
2202 if ((subtotal + remainder) <= protgrp_bytes) {
2203 /* we can use this whole buffer */
2204 bpl->tus.f.bdeSize = remainder;
2205 split_offset = 0;
2206
2207 if ((subtotal + remainder) == protgrp_bytes)
2208 pgdone = 1;
2209 } else {
2210 /* must split this buffer with next prot grp */
2211 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2212 split_offset += bpl->tus.f.bdeSize;
2213 }
2214
2215 subtotal += bpl->tus.f.bdeSize;
2216
2217 if (datadir == DMA_TO_DEVICE)
2218 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2219 else
2220 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2221 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2222
2223 num_bde++;
2224 curr_data++;
2225
2226 if (split_offset)
2227 break;
2228
2229 /* Move to the next s/g segment if possible */
2230 sgde = sg_next(sgde);
2231
2232 }
2233
2234 if (protgroup_offset) {
2235 /* update the reference tag */
2236 reftag += protgrp_blks;
2237 bpl++;
2238 continue;
2239 }
2240
2241 /* are we done ? */
2242 if (curr_prot == protcnt) {
2243 alldone = 1;
2244 } else if (curr_prot < protcnt) {
2245 /* advance to next prot buffer */
2246 sgpe = sg_next(sgpe);
2247 bpl++;
2248
2249 /* update the reference tag */
2250 reftag += protgrp_blks;
2251 } else {
2252 /* if we're here, we have a bug */
2253 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2254 "9054 BLKGRD: bug in %s\n", __func__);
2255 }
2256
2257 } while (!alldone);
2258out:
2259
2260 return num_bde;
2261}
2262
2263/**
2264 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2265 * @phba: The Hba for which this call is being executed.
2266 * @sc: pointer to scsi command we're working on
2267 * @sgl: pointer to buffer list for protection groups
2268 * @datacnt: number of segments of data that have been dma mapped
2269 *
2270 * This function sets up SGL buffer list for protection groups of
2271 * type LPFC_PG_TYPE_NO_DIF
2272 *
2273 * This is usually used when the HBA is instructed to generate
2274 * DIFs and insert them into data stream (or strip DIF from
2275 * incoming data stream)
2276 *
2277 * The buffer list consists of just one protection group described
2278 * below:
2279 * +-------------------------+
2280 * start of prot group --> | DI_SEED |
2281 * +-------------------------+
2282 * | Data SGE |
2283 * +-------------------------+
2284 * |more Data SGE's ... (opt)|
2285 * +-------------------------+
2286 *
2287 *
2288 * Note: Data s/g buffers have been dma mapped
2289 *
2290 * Returns the number of SGEs added to the SGL.
2291 **/
2292static int
2293lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2294 struct sli4_sge *sgl, int datasegcnt)
2295{
2296 struct scatterlist *sgde = NULL; /* s/g data entry */
2297 struct sli4_sge_diseed *diseed = NULL;
2298 dma_addr_t physaddr;
2299 int i = 0, num_sge = 0, status;
2300 int datadir = sc->sc_data_direction;
2301 uint32_t reftag;
2302 unsigned blksize;
2303 uint8_t txop, rxop;
2304 uint32_t rc;
2305 uint32_t checking = 1;
2306 uint32_t dma_len;
2307 uint32_t dma_offset = 0;
2308
2309 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2310 if (status)
2311 goto out;
2312
2313 /* extract some info from the scsi command for pde*/
2314 blksize = lpfc_cmd_blksize(sc);
2315 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2316
2317#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2318 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2319 if (rc) {
9a6b09c0 2320 if (rc & BG_ERR_SWAP)
acd6859b 2321 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2322 if (rc & BG_ERR_CHECK)
acd6859b
JS
2323 checking = 0;
2324 }
2325#endif
2326
2327 /* setup DISEED with what we have */
2328 diseed = (struct sli4_sge_diseed *) sgl;
2329 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2330 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2331
2332 /* Endianness conversion if necessary */
2333 diseed->ref_tag = cpu_to_le32(reftag);
2334 diseed->ref_tag_tran = diseed->ref_tag;
2335
2336 /* setup DISEED with the rest of the info */
2337 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2338 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2339 if (datadir == DMA_FROM_DEVICE) {
2340 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2341 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2342 }
2343 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2344 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2345
2346 /* Endianness conversion if necessary for DISEED */
2347 diseed->word2 = cpu_to_le32(diseed->word2);
2348 diseed->word3 = cpu_to_le32(diseed->word3);
2349
2350 /* advance bpl and increment sge count */
2351 num_sge++;
2352 sgl++;
2353
2354 /* assumption: caller has already run dma_map_sg on command data */
2355 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2356 physaddr = sg_dma_address(sgde);
2357 dma_len = sg_dma_len(sgde);
2358 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2359 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2360 if ((i + 1) == datasegcnt)
2361 bf_set(lpfc_sli4_sge_last, sgl, 1);
2362 else
2363 bf_set(lpfc_sli4_sge_last, sgl, 0);
2364 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2365 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2366
2367 sgl->sge_len = cpu_to_le32(dma_len);
2368 dma_offset += dma_len;
2369
2370 sgl++;
2371 num_sge++;
2372 }
2373
2374out:
2375 return num_sge;
2376}
2377
2378/**
2379 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2380 * @phba: The Hba for which this call is being executed.
2381 * @sc: pointer to scsi command we're working on
2382 * @sgl: pointer to buffer list for protection groups
2383 * @datacnt: number of segments of data that have been dma mapped
2384 * @protcnt: number of segment of protection data that have been dma mapped
2385 *
2386 * This function sets up SGL buffer list for protection groups of
2387 * type LPFC_PG_TYPE_DIF
2388 *
2389 * This is usually used when DIFs are in their own buffers,
2390 * separate from the data. The HBA can then by instructed
2391 * to place the DIFs in the outgoing stream. For read operations,
2392 * The HBA could extract the DIFs and place it in DIF buffers.
2393 *
2394 * The buffer list for this type consists of one or more of the
2395 * protection groups described below:
2396 * +-------------------------+
2397 * start of first prot group --> | DISEED |
2398 * +-------------------------+
2399 * | DIF (Prot SGE) |
2400 * +-------------------------+
2401 * | Data SGE |
2402 * +-------------------------+
2403 * |more Data SGE's ... (opt)|
2404 * +-------------------------+
2405 * start of new prot group --> | DISEED |
2406 * +-------------------------+
2407 * | ... |
2408 * +-------------------------+
2409 *
2410 * Note: It is assumed that both data and protection s/g buffers have been
2411 * mapped for DMA
2412 *
2413 * Returns the number of SGEs added to the SGL.
2414 **/
2415static int
2416lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2417 struct sli4_sge *sgl, int datacnt, int protcnt)
2418{
2419 struct scatterlist *sgde = NULL; /* s/g data entry */
2420 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2421 struct sli4_sge_diseed *diseed = NULL;
2422 dma_addr_t dataphysaddr, protphysaddr;
2423 unsigned short curr_data = 0, curr_prot = 0;
2424 unsigned int split_offset;
2425 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2426 unsigned int protgrp_blks, protgrp_bytes;
2427 unsigned int remainder, subtotal;
2428 int status;
2429 unsigned char pgdone = 0, alldone = 0;
2430 unsigned blksize;
2431 uint32_t reftag;
2432 uint8_t txop, rxop;
2433 uint32_t dma_len;
2434 uint32_t rc;
2435 uint32_t checking = 1;
2436 uint32_t dma_offset = 0;
2437 int num_sge = 0;
2438
2439 sgpe = scsi_prot_sglist(sc);
2440 sgde = scsi_sglist(sc);
2441
2442 if (!sgpe || !sgde) {
2443 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2444 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
e2a0a9d6
JS
2445 sgpe, sgde);
2446 return 0;
2447 }
2448
6c8eea54
JS
2449 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2450 if (status)
e2a0a9d6
JS
2451 goto out;
2452
6c8eea54 2453 /* extract some info from the scsi command */
e2a0a9d6 2454 blksize = lpfc_cmd_blksize(sc);
acd6859b 2455 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
e2a0a9d6 2456
f9bb2da1 2457#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2458 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2459 if (rc) {
9a6b09c0 2460 if (rc & BG_ERR_SWAP)
acd6859b 2461 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2462 if (rc & BG_ERR_CHECK)
acd6859b
JS
2463 checking = 0;
2464 }
f9bb2da1
JS
2465#endif
2466
e2a0a9d6
JS
2467 split_offset = 0;
2468 do {
acd6859b
JS
2469 /* setup DISEED with what we have */
2470 diseed = (struct sli4_sge_diseed *) sgl;
2471 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2472 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2473
2474 /* Endianness conversion if necessary */
2475 diseed->ref_tag = cpu_to_le32(reftag);
2476 diseed->ref_tag_tran = diseed->ref_tag;
2477
2478 /* setup DISEED with the rest of the info */
2479 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2480 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2481 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2482 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2483 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2484 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2485
2486 /* Endianness conversion if necessary for DISEED */
2487 diseed->word2 = cpu_to_le32(diseed->word2);
2488 diseed->word3 = cpu_to_le32(diseed->word3);
2489
2490 /* advance sgl and increment bde count */
2491 num_sge++;
2492 sgl++;
e2a0a9d6
JS
2493
2494 /* setup the first BDE that points to protection buffer */
7f86059a
JS
2495 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2496 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
e2a0a9d6 2497
e2a0a9d6
JS
2498 /* must be integer multiple of the DIF block length */
2499 BUG_ON(protgroup_len % 8);
2500
acd6859b
JS
2501 /* Now setup DIF SGE */
2502 sgl->word2 = 0;
2503 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2504 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2505 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2506 sgl->word2 = cpu_to_le32(sgl->word2);
7f86059a 2507
e2a0a9d6
JS
2508 protgrp_blks = protgroup_len / 8;
2509 protgrp_bytes = protgrp_blks * blksize;
2510
acd6859b
JS
2511 /* check if DIF SGE is crossing the 4K boundary; if so split */
2512 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2513 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
7f86059a
JS
2514 protgroup_offset += protgroup_remainder;
2515 protgrp_blks = protgroup_remainder / 8;
7c56b9fd 2516 protgrp_bytes = protgrp_blks * blksize;
7f86059a
JS
2517 } else {
2518 protgroup_offset = 0;
2519 curr_prot++;
2520 }
e2a0a9d6 2521
acd6859b 2522 num_sge++;
e2a0a9d6 2523
acd6859b 2524 /* setup SGE's for data blocks associated with DIF data */
e2a0a9d6
JS
2525 pgdone = 0;
2526 subtotal = 0; /* total bytes processed for current prot grp */
2527 while (!pgdone) {
2528 if (!sgde) {
6a9c52cf 2529 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 2530 "9086 BLKGRD:%s Invalid data segment\n",
e2a0a9d6
JS
2531 __func__);
2532 return 0;
2533 }
acd6859b 2534 sgl++;
e2a0a9d6 2535 dataphysaddr = sg_dma_address(sgde) + split_offset;
e2a0a9d6
JS
2536
2537 remainder = sg_dma_len(sgde) - split_offset;
2538
2539 if ((subtotal + remainder) <= protgrp_bytes) {
2540 /* we can use this whole buffer */
acd6859b 2541 dma_len = remainder;
e2a0a9d6
JS
2542 split_offset = 0;
2543
2544 if ((subtotal + remainder) == protgrp_bytes)
2545 pgdone = 1;
2546 } else {
2547 /* must split this buffer with next prot grp */
acd6859b
JS
2548 dma_len = protgrp_bytes - subtotal;
2549 split_offset += dma_len;
e2a0a9d6
JS
2550 }
2551
acd6859b 2552 subtotal += dma_len;
e2a0a9d6 2553
acd6859b
JS
2554 sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2555 sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2556 bf_set(lpfc_sli4_sge_last, sgl, 0);
2557 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2558 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
e2a0a9d6 2559
acd6859b
JS
2560 sgl->sge_len = cpu_to_le32(dma_len);
2561 dma_offset += dma_len;
2562
2563 num_sge++;
e2a0a9d6
JS
2564 curr_data++;
2565
2566 if (split_offset)
2567 break;
2568
2569 /* Move to the next s/g segment if possible */
2570 sgde = sg_next(sgde);
2571 }
2572
7f86059a
JS
2573 if (protgroup_offset) {
2574 /* update the reference tag */
2575 reftag += protgrp_blks;
acd6859b 2576 sgl++;
7f86059a
JS
2577 continue;
2578 }
2579
e2a0a9d6
JS
2580 /* are we done ? */
2581 if (curr_prot == protcnt) {
acd6859b 2582 bf_set(lpfc_sli4_sge_last, sgl, 1);
e2a0a9d6
JS
2583 alldone = 1;
2584 } else if (curr_prot < protcnt) {
2585 /* advance to next prot buffer */
2586 sgpe = sg_next(sgpe);
acd6859b 2587 sgl++;
e2a0a9d6
JS
2588
2589 /* update the reference tag */
2590 reftag += protgrp_blks;
2591 } else {
2592 /* if we're here, we have a bug */
6a9c52cf 2593 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 2594 "9085 BLKGRD: bug in %s\n", __func__);
e2a0a9d6
JS
2595 }
2596
2597 } while (!alldone);
acd6859b 2598
e2a0a9d6
JS
2599out:
2600
acd6859b 2601 return num_sge;
e2a0a9d6 2602}
7f86059a 2603
acd6859b
JS
2604/**
2605 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2606 * @phba: The Hba for which this call is being executed.
2607 * @sc: pointer to scsi command we're working on
2608 *
e2a0a9d6
JS
2609 * Given a SCSI command that supports DIF, determine composition of protection
2610 * groups involved in setting up buffer lists
2611 *
acd6859b
JS
2612 * Returns: Protection group type (with or without DIF)
2613 *
2614 **/
e2a0a9d6
JS
2615static int
2616lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2617{
2618 int ret = LPFC_PG_TYPE_INVALID;
2619 unsigned char op = scsi_get_prot_op(sc);
2620
2621 switch (op) {
2622 case SCSI_PROT_READ_STRIP:
2623 case SCSI_PROT_WRITE_INSERT:
2624 ret = LPFC_PG_TYPE_NO_DIF;
2625 break;
2626 case SCSI_PROT_READ_INSERT:
2627 case SCSI_PROT_WRITE_STRIP:
2628 case SCSI_PROT_READ_PASS:
2629 case SCSI_PROT_WRITE_PASS:
e2a0a9d6
JS
2630 ret = LPFC_PG_TYPE_DIF_BUF;
2631 break;
2632 default:
2633 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2634 "9021 Unsupported protection op:%d\n", op);
2635 break;
2636 }
2637
2638 return ret;
2639}
2640
acd6859b
JS
2641/**
2642 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2643 * @phba: The Hba for which this call is being executed.
2644 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2645 *
e2a0a9d6
JS
2646 * This is the protection/DIF aware version of
2647 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2648 * two functions eventually, but for now, it's here
acd6859b 2649 **/
e2a0a9d6 2650static int
acd6859b 2651lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
e2a0a9d6
JS
2652 struct lpfc_scsi_buf *lpfc_cmd)
2653{
2654 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2655 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2656 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2657 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2658 uint32_t num_bde = 0;
2659 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2660 int prot_group_type = 0;
2661 int diflen, fcpdl;
2662 unsigned blksize;
2663
2664 /*
2665 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2666 * fcp_rsp regions to the first data bde entry
2667 */
2668 bpl += 2;
2669 if (scsi_sg_count(scsi_cmnd)) {
2670 /*
2671 * The driver stores the segment count returned from pci_map_sg
2672 * because this a count of dma-mappings used to map the use_sg
2673 * pages. They are not guaranteed to be the same for those
2674 * architectures that implement an IOMMU.
2675 */
2676 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2677 scsi_sglist(scsi_cmnd),
2678 scsi_sg_count(scsi_cmnd), datadir);
2679 if (unlikely(!datasegcnt))
2680 return 1;
2681
2682 lpfc_cmd->seg_cnt = datasegcnt;
2683 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
2684 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2685 "9067 BLKGRD: %s: Too many sg segments"
2686 " from dma_map_sg. Config %d, seg_cnt"
2687 " %d\n",
e2a0a9d6
JS
2688 __func__, phba->cfg_sg_seg_cnt,
2689 lpfc_cmd->seg_cnt);
2690 scsi_dma_unmap(scsi_cmnd);
2691 return 1;
2692 }
2693
2694 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2695
2696 switch (prot_group_type) {
2697 case LPFC_PG_TYPE_NO_DIF:
2698 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2699 datasegcnt);
c9404c9c 2700 /* we should have 2 or more entries in buffer list */
e2a0a9d6
JS
2701 if (num_bde < 2)
2702 goto err;
2703 break;
2704 case LPFC_PG_TYPE_DIF_BUF:{
2705 /*
2706 * This type indicates that protection buffers are
2707 * passed to the driver, so that needs to be prepared
2708 * for DMA
2709 */
2710 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2711 scsi_prot_sglist(scsi_cmnd),
2712 scsi_prot_sg_count(scsi_cmnd), datadir);
2713 if (unlikely(!protsegcnt)) {
2714 scsi_dma_unmap(scsi_cmnd);
2715 return 1;
2716 }
2717
2718 lpfc_cmd->prot_seg_cnt = protsegcnt;
2719 if (lpfc_cmd->prot_seg_cnt
2720 > phba->cfg_prot_sg_seg_cnt) {
6a9c52cf
JS
2721 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2722 "9068 BLKGRD: %s: Too many prot sg "
2723 "segments from dma_map_sg. Config %d,"
e2a0a9d6
JS
2724 "prot_seg_cnt %d\n", __func__,
2725 phba->cfg_prot_sg_seg_cnt,
2726 lpfc_cmd->prot_seg_cnt);
2727 dma_unmap_sg(&phba->pcidev->dev,
2728 scsi_prot_sglist(scsi_cmnd),
2729 scsi_prot_sg_count(scsi_cmnd),
2730 datadir);
2731 scsi_dma_unmap(scsi_cmnd);
2732 return 1;
2733 }
2734
2735 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2736 datasegcnt, protsegcnt);
c9404c9c 2737 /* we should have 3 or more entries in buffer list */
e2a0a9d6
JS
2738 if (num_bde < 3)
2739 goto err;
2740 break;
2741 }
2742 case LPFC_PG_TYPE_INVALID:
2743 default:
2744 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2745 "9022 Unexpected protection group %i\n",
2746 prot_group_type);
2747 return 1;
2748 }
2749 }
2750
2751 /*
2752 * Finish initializing those IOCB fields that are dependent on the
2753 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2754 * reinitialized since all iocb memory resources are used many times
2755 * for transmit, receive, and continuation bpl's.
2756 */
2757 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2758 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2759 iocb_cmd->ulpBdeCount = 1;
2760 iocb_cmd->ulpLe = 1;
2761
2762 fcpdl = scsi_bufflen(scsi_cmnd);
2763
2764 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
2765 /*
2766 * We are in DIF Type 1 mode
2767 * Every data block has a 8 byte DIF (trailer)
2768 * attached to it. Must ajust FCP data length
2769 */
2770 blksize = lpfc_cmd_blksize(scsi_cmnd);
2771 diflen = (fcpdl / blksize) * 8;
2772 fcpdl += diflen;
2773 }
2774 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2775
2776 /*
2777 * Due to difference in data length between DIF/non-DIF paths,
2778 * we need to set word 4 of IOCB here
2779 */
2780 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2781
dea3101e 2782 return 0;
e2a0a9d6
JS
2783err:
2784 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2785 "9023 Could not setup all needed BDE's"
2786 "prot_group_type=%d, num_bde=%d\n",
2787 prot_group_type, num_bde);
2788 return 1;
2789}
2790
2791/*
2792 * This function checks for BlockGuard errors detected by
2793 * the HBA. In case of errors, the ASC/ASCQ fields in the
2794 * sense buffer will be set accordingly, paired with
2795 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2796 * detected corruption.
2797 *
2798 * Returns:
2799 * 0 - No error found
2800 * 1 - BlockGuard error found
2801 * -1 - Internal error (bad profile, ...etc)
2802 */
2803static int
2804lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2805 struct lpfc_iocbq *pIocbOut)
2806{
2807 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2808 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2809 int ret = 0;
2810 uint32_t bghm = bgf->bghm;
2811 uint32_t bgstat = bgf->bgstat;
2812 uint64_t failing_sector = 0;
2813
6a9c52cf
JS
2814 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
2815 " 0x%x lba 0x%llx blk cnt 0x%x "
e2a0a9d6 2816 "bgstat=0x%x bghm=0x%x\n",
87b5c328 2817 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
83096ebf 2818 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
2819
2820 spin_lock(&_dump_buf_lock);
2821 if (!_dump_buf_done) {
6a9c52cf
JS
2822 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
2823 " Data for %u blocks to debugfs\n",
e2a0a9d6 2824 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
6a9c52cf 2825 lpfc_debug_save_data(phba, cmd);
e2a0a9d6
JS
2826
2827 /* If we have a prot sgl, save the DIF buffer */
2828 if (lpfc_prot_group_type(phba, cmd) ==
2829 LPFC_PG_TYPE_DIF_BUF) {
6a9c52cf
JS
2830 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
2831 "Saving DIF for %u blocks to debugfs\n",
2832 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2833 lpfc_debug_save_dif(phba, cmd);
e2a0a9d6
JS
2834 }
2835
2836 _dump_buf_done = 1;
2837 }
2838 spin_unlock(&_dump_buf_lock);
2839
2840 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2841 cmd->result = ScsiResult(DID_ERROR, 0);
6a9c52cf
JS
2842 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
2843 " BlockGuard profile. bgstat:0x%x\n",
2844 bgstat);
e2a0a9d6
JS
2845 ret = (-1);
2846 goto out;
2847 }
2848
2849 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2850 cmd->result = ScsiResult(DID_ERROR, 0);
6a9c52cf
JS
2851 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
2852 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
e2a0a9d6
JS
2853 bgstat);
2854 ret = (-1);
2855 goto out;
2856 }
2857
2858 if (lpfc_bgs_get_guard_err(bgstat)) {
2859 ret = 1;
2860
2861 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2862 0x10, 0x1);
1c9fbafc 2863 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
2864 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2865 phba->bg_guard_err_cnt++;
6a9c52cf
JS
2866 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2867 "9055 BLKGRD: guard_tag error\n");
e2a0a9d6
JS
2868 }
2869
2870 if (lpfc_bgs_get_reftag_err(bgstat)) {
2871 ret = 1;
2872
2873 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2874 0x10, 0x3);
1c9fbafc 2875 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
2876 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2877
2878 phba->bg_reftag_err_cnt++;
6a9c52cf
JS
2879 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2880 "9056 BLKGRD: ref_tag error\n");
e2a0a9d6
JS
2881 }
2882
2883 if (lpfc_bgs_get_apptag_err(bgstat)) {
2884 ret = 1;
2885
2886 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2887 0x10, 0x2);
1c9fbafc 2888 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
2889 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2890
2891 phba->bg_apptag_err_cnt++;
6a9c52cf
JS
2892 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2893 "9061 BLKGRD: app_tag error\n");
e2a0a9d6
JS
2894 }
2895
2896 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2897 /*
2898 * setup sense data descriptor 0 per SPC-4 as an information
7c56b9fd
JS
2899 * field, and put the failing LBA in it.
2900 * This code assumes there was also a guard/app/ref tag error
2901 * indication.
e2a0a9d6 2902 */
7c56b9fd
JS
2903 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2904 cmd->sense_buffer[8] = 0; /* Information descriptor type */
2905 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
2906 cmd->sense_buffer[10] = 0x80; /* Validity bit */
acd6859b
JS
2907
2908 /* bghm is a "on the wire" FC frame based count */
2909 switch (scsi_get_prot_op(cmd)) {
2910 case SCSI_PROT_READ_INSERT:
2911 case SCSI_PROT_WRITE_STRIP:
2912 bghm /= cmd->device->sector_size;
2913 break;
2914 case SCSI_PROT_READ_STRIP:
2915 case SCSI_PROT_WRITE_INSERT:
2916 case SCSI_PROT_READ_PASS:
2917 case SCSI_PROT_WRITE_PASS:
2918 bghm /= (cmd->device->sector_size +
2919 sizeof(struct scsi_dif_tuple));
2920 break;
2921 }
e2a0a9d6
JS
2922
2923 failing_sector = scsi_get_lba(cmd);
2924 failing_sector += bghm;
2925
7c56b9fd
JS
2926 /* Descriptor Information */
2927 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
e2a0a9d6
JS
2928 }
2929
2930 if (!ret) {
2931 /* No error was reported - problem in FW? */
2932 cmd->result = ScsiResult(DID_ERROR, 0);
6a9c52cf 2933 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4ac9b226 2934 "9057 BLKGRD: Unknown error reported!\n");
e2a0a9d6
JS
2935 }
2936
2937out:
2938 return ret;
dea3101e
JB
2939}
2940
da0436e9
JS
2941/**
2942 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
2943 * @phba: The Hba for which this call is being executed.
2944 * @lpfc_cmd: The scsi buffer which is going to be mapped.
2945 *
2946 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
2947 * field of @lpfc_cmd for device with SLI-4 interface spec.
2948 *
2949 * Return codes:
6c8eea54
JS
2950 * 1 - Error
2951 * 0 - Success
da0436e9
JS
2952 **/
2953static int
2954lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2955{
2956 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2957 struct scatterlist *sgel = NULL;
2958 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2959 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
fedd3b7b 2960 struct sli4_sge *first_data_sgl;
da0436e9
JS
2961 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2962 dma_addr_t physaddr;
2963 uint32_t num_bde = 0;
2964 uint32_t dma_len;
2965 uint32_t dma_offset = 0;
2966 int nseg;
fedd3b7b 2967 struct ulp_bde64 *bde;
da0436e9
JS
2968
2969 /*
2970 * There are three possibilities here - use scatter-gather segment, use
2971 * the single mapping, or neither. Start the lpfc command prep by
2972 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2973 * data bde entry.
2974 */
2975 if (scsi_sg_count(scsi_cmnd)) {
2976 /*
2977 * The driver stores the segment count returned from pci_map_sg
2978 * because this a count of dma-mappings used to map the use_sg
2979 * pages. They are not guaranteed to be the same for those
2980 * architectures that implement an IOMMU.
2981 */
2982
2983 nseg = scsi_dma_map(scsi_cmnd);
2984 if (unlikely(!nseg))
2985 return 1;
2986 sgl += 1;
2987 /* clear the last flag in the fcp_rsp map entry */
2988 sgl->word2 = le32_to_cpu(sgl->word2);
2989 bf_set(lpfc_sli4_sge_last, sgl, 0);
2990 sgl->word2 = cpu_to_le32(sgl->word2);
2991 sgl += 1;
fedd3b7b 2992 first_data_sgl = sgl;
da0436e9
JS
2993 lpfc_cmd->seg_cnt = nseg;
2994 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
2995 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
2996 " %s: Too many sg segments from "
2997 "dma_map_sg. Config %d, seg_cnt %d\n",
2998 __func__, phba->cfg_sg_seg_cnt,
da0436e9
JS
2999 lpfc_cmd->seg_cnt);
3000 scsi_dma_unmap(scsi_cmnd);
3001 return 1;
3002 }
3003
3004 /*
3005 * The driver established a maximum scatter-gather segment count
3006 * during probe that limits the number of sg elements in any
3007 * single scsi command. Just run through the seg_cnt and format
3008 * the sge's.
3009 * When using SLI-3 the driver will try to fit all the BDEs into
3010 * the IOCB. If it can't then the BDEs get added to a BPL as it
3011 * does for SLI-2 mode.
3012 */
3013 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3014 physaddr = sg_dma_address(sgel);
3015 dma_len = sg_dma_len(sgel);
da0436e9
JS
3016 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3017 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
0558056c 3018 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
3019 if ((num_bde + 1) == nseg)
3020 bf_set(lpfc_sli4_sge_last, sgl, 1);
3021 else
3022 bf_set(lpfc_sli4_sge_last, sgl, 0);
3023 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
f9bb2da1 3024 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
da0436e9 3025 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 3026 sgl->sge_len = cpu_to_le32(dma_len);
da0436e9
JS
3027 dma_offset += dma_len;
3028 sgl++;
3029 }
fedd3b7b
JS
3030 /* setup the performance hint (first data BDE) if enabled */
3031 if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3032 bde = (struct ulp_bde64 *)
3033 &(iocb_cmd->unsli3.sli3Words[5]);
3034 bde->addrLow = first_data_sgl->addr_lo;
3035 bde->addrHigh = first_data_sgl->addr_hi;
3036 bde->tus.f.bdeSize =
3037 le32_to_cpu(first_data_sgl->sge_len);
3038 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3039 bde->tus.w = cpu_to_le32(bde->tus.w);
3040 }
da0436e9
JS
3041 } else {
3042 sgl += 1;
3043 /* clear the last flag in the fcp_rsp map entry */
3044 sgl->word2 = le32_to_cpu(sgl->word2);
3045 bf_set(lpfc_sli4_sge_last, sgl, 1);
3046 sgl->word2 = cpu_to_le32(sgl->word2);
3047 }
3048
3049 /*
3050 * Finish initializing those IOCB fields that are dependent on the
3051 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3052 * explicitly reinitialized.
3053 * all iocb memory resources are reused.
3054 */
3055 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3056
3057 /*
3058 * Due to difference in data length between DIF/non-DIF paths,
3059 * we need to set word 4 of IOCB here
3060 */
3061 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3062 return 0;
3063}
3064
acd6859b
JS
3065/**
3066 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
3067 * @phba: The Hba for which this call is being executed.
3068 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
3069 *
3070 * Adjust the data length to account for how much data
3071 * is actually on the wire.
3072 *
3073 * returns the adjusted data length
3074 **/
3075static int
3076lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
3077 struct lpfc_scsi_buf *lpfc_cmd)
3078{
3079 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
3080 int diflen, fcpdl;
3081 unsigned blksize;
3082
3083 fcpdl = scsi_bufflen(sc);
3084
3085 /* Check if there is protection data on the wire */
3086 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
3087 /* Read */
3088 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
3089 return fcpdl;
3090
3091 } else {
3092 /* Write */
3093 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
3094 return fcpdl;
3095 }
3096
3097 /* If protection data on the wire, adjust the count accordingly */
3098 blksize = lpfc_cmd_blksize(sc);
3099 diflen = (fcpdl / blksize) * 8;
3100 fcpdl += diflen;
3101 return fcpdl;
3102}
3103
3104/**
3105 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3106 * @phba: The Hba for which this call is being executed.
3107 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3108 *
3109 * This is the protection/DIF aware version of
3110 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3111 * two functions eventually, but for now, it's here
3112 **/
3113static int
3114lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3115 struct lpfc_scsi_buf *lpfc_cmd)
3116{
3117 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3118 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3119 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3120 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3121 uint32_t num_bde = 0;
3122 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3123 int prot_group_type = 0;
3124 int fcpdl;
3125
3126 /*
3127 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3128 * fcp_rsp regions to the first data bde entry
3129 */
3130 if (scsi_sg_count(scsi_cmnd)) {
3131 /*
3132 * The driver stores the segment count returned from pci_map_sg
3133 * because this a count of dma-mappings used to map the use_sg
3134 * pages. They are not guaranteed to be the same for those
3135 * architectures that implement an IOMMU.
3136 */
3137 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3138 scsi_sglist(scsi_cmnd),
3139 scsi_sg_count(scsi_cmnd), datadir);
3140 if (unlikely(!datasegcnt))
3141 return 1;
3142
3143 sgl += 1;
3144 /* clear the last flag in the fcp_rsp map entry */
3145 sgl->word2 = le32_to_cpu(sgl->word2);
3146 bf_set(lpfc_sli4_sge_last, sgl, 0);
3147 sgl->word2 = cpu_to_le32(sgl->word2);
3148
3149 sgl += 1;
3150 lpfc_cmd->seg_cnt = datasegcnt;
3151 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3152 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3153 "9087 BLKGRD: %s: Too many sg segments"
3154 " from dma_map_sg. Config %d, seg_cnt"
3155 " %d\n",
3156 __func__, phba->cfg_sg_seg_cnt,
3157 lpfc_cmd->seg_cnt);
3158 scsi_dma_unmap(scsi_cmnd);
3159 return 1;
3160 }
3161
3162 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3163
3164 switch (prot_group_type) {
3165 case LPFC_PG_TYPE_NO_DIF:
3166 num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3167 datasegcnt);
3168 /* we should have 2 or more entries in buffer list */
3169 if (num_bde < 2)
3170 goto err;
3171 break;
3172 case LPFC_PG_TYPE_DIF_BUF:{
3173 /*
3174 * This type indicates that protection buffers are
3175 * passed to the driver, so that needs to be prepared
3176 * for DMA
3177 */
3178 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3179 scsi_prot_sglist(scsi_cmnd),
3180 scsi_prot_sg_count(scsi_cmnd), datadir);
3181 if (unlikely(!protsegcnt)) {
3182 scsi_dma_unmap(scsi_cmnd);
3183 return 1;
3184 }
3185
3186 lpfc_cmd->prot_seg_cnt = protsegcnt;
3187 if (lpfc_cmd->prot_seg_cnt
3188 > phba->cfg_prot_sg_seg_cnt) {
3189 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3190 "9088 BLKGRD: %s: Too many prot sg "
3191 "segments from dma_map_sg. Config %d,"
3192 "prot_seg_cnt %d\n", __func__,
3193 phba->cfg_prot_sg_seg_cnt,
3194 lpfc_cmd->prot_seg_cnt);
3195 dma_unmap_sg(&phba->pcidev->dev,
3196 scsi_prot_sglist(scsi_cmnd),
3197 scsi_prot_sg_count(scsi_cmnd),
3198 datadir);
3199 scsi_dma_unmap(scsi_cmnd);
3200 return 1;
3201 }
3202
3203 num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3204 datasegcnt, protsegcnt);
3205 /* we should have 3 or more entries in buffer list */
3206 if (num_bde < 3)
3207 goto err;
3208 break;
3209 }
3210 case LPFC_PG_TYPE_INVALID:
3211 default:
3212 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3213 "9083 Unexpected protection group %i\n",
3214 prot_group_type);
3215 return 1;
3216 }
3217 }
3218
3219 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3220
3221 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3222
3223 /*
3224 * Due to difference in data length between DIF/non-DIF paths,
3225 * we need to set word 4 of IOCB here
3226 */
3227 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3228 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
3229
3230 return 0;
3231err:
3232 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3233 "9084 Could not setup all needed BDE's"
3234 "prot_group_type=%d, num_bde=%d\n",
3235 prot_group_type, num_bde);
3236 return 1;
3237}
3238
3772a991
JS
3239/**
3240 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3241 * @phba: The Hba for which this call is being executed.
3242 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3243 *
3244 * This routine wraps the actual DMA mapping function pointer from the
3245 * lpfc_hba struct.
3246 *
3247 * Return codes:
6c8eea54
JS
3248 * 1 - Error
3249 * 0 - Success
3772a991
JS
3250 **/
3251static inline int
3252lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3253{
3254 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3255}
3256
acd6859b
JS
3257/**
3258 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3259 * using BlockGuard.
3260 * @phba: The Hba for which this call is being executed.
3261 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3262 *
3263 * This routine wraps the actual DMA mapping function pointer from the
3264 * lpfc_hba struct.
3265 *
3266 * Return codes:
3267 * 1 - Error
3268 * 0 - Success
3269 **/
3270static inline int
3271lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3272{
3273 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3274}
3275
ea2151b4 3276/**
3621a710 3277 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
ea2151b4
JS
3278 * @phba: Pointer to hba context object.
3279 * @vport: Pointer to vport object.
3280 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3281 * @rsp_iocb: Pointer to response iocb object which reported error.
3282 *
3283 * This function posts an event when there is a SCSI command reporting
3284 * error from the scsi device.
3285 **/
3286static void
3287lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3288 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3289 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3290 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3291 uint32_t resp_info = fcprsp->rspStatus2;
3292 uint32_t scsi_status = fcprsp->rspStatus3;
3293 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3294 struct lpfc_fast_path_event *fast_path_evt = NULL;
3295 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3296 unsigned long flags;
3297
5989b8d4
JS
3298 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3299 return;
3300
ea2151b4
JS
3301 /* If there is queuefull or busy condition send a scsi event */
3302 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3303 (cmnd->result == SAM_STAT_BUSY)) {
3304 fast_path_evt = lpfc_alloc_fast_evt(phba);
3305 if (!fast_path_evt)
3306 return;
3307 fast_path_evt->un.scsi_evt.event_type =
3308 FC_REG_SCSI_EVENT;
3309 fast_path_evt->un.scsi_evt.subcategory =
3310 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3311 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3312 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3313 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3314 &pnode->nlp_portname, sizeof(struct lpfc_name));
3315 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3316 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3317 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3318 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3319 fast_path_evt = lpfc_alloc_fast_evt(phba);
3320 if (!fast_path_evt)
3321 return;
3322 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3323 FC_REG_SCSI_EVENT;
3324 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3325 LPFC_EVENT_CHECK_COND;
3326 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3327 cmnd->device->lun;
3328 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3329 &pnode->nlp_portname, sizeof(struct lpfc_name));
3330 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3331 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3332 fast_path_evt->un.check_cond_evt.sense_key =
3333 cmnd->sense_buffer[2] & 0xf;
3334 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3335 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3336 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3337 fcpi_parm &&
3338 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3339 ((scsi_status == SAM_STAT_GOOD) &&
3340 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3341 /*
3342 * If status is good or resid does not match with fcp_param and
3343 * there is valid fcpi_parm, then there is a read_check error
3344 */
3345 fast_path_evt = lpfc_alloc_fast_evt(phba);
3346 if (!fast_path_evt)
3347 return;
3348 fast_path_evt->un.read_check_error.header.event_type =
3349 FC_REG_FABRIC_EVENT;
3350 fast_path_evt->un.read_check_error.header.subcategory =
3351 LPFC_EVENT_FCPRDCHKERR;
3352 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3353 &pnode->nlp_portname, sizeof(struct lpfc_name));
3354 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3355 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3356 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3357 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3358 fast_path_evt->un.read_check_error.fcpiparam =
3359 fcpi_parm;
3360 } else
3361 return;
3362
3363 fast_path_evt->vport = vport;
3364 spin_lock_irqsave(&phba->hbalock, flags);
3365 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3366 spin_unlock_irqrestore(&phba->hbalock, flags);
3367 lpfc_worker_wake_up(phba);
3368 return;
3369}
9bad7671
JS
3370
3371/**
f1126688 3372 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3772a991 3373 * @phba: The HBA for which this call is being executed.
9bad7671
JS
3374 * @psb: The scsi buffer which is going to be un-mapped.
3375 *
3376 * This routine does DMA un-mapping of scatter gather list of scsi command
3772a991 3377 * field of @lpfc_cmd for device with SLI-3 interface spec.
9bad7671 3378 **/
bcf4dbfa 3379static void
f1126688 3380lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
bcf4dbfa
JS
3381{
3382 /*
3383 * There are only two special cases to consider. (1) the scsi command
3384 * requested scatter-gather usage or (2) the scsi command allocated
3385 * a request buffer, but did not request use_sg. There is a third
3386 * case, but it does not require resource deallocation.
3387 */
a0b4f78f
FT
3388 if (psb->seg_cnt > 0)
3389 scsi_dma_unmap(psb->pCmd);
e2a0a9d6
JS
3390 if (psb->prot_seg_cnt > 0)
3391 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3392 scsi_prot_sg_count(psb->pCmd),
3393 psb->pCmd->sc_data_direction);
bcf4dbfa
JS
3394}
3395
9bad7671 3396/**
3621a710 3397 * lpfc_handler_fcp_err - FCP response handler
9bad7671
JS
3398 * @vport: The virtual port for which this call is being executed.
3399 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3400 * @rsp_iocb: The response IOCB which contains FCP error.
3401 *
3402 * This routine is called to process response IOCB with status field
3403 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3404 * based upon SCSI and FCP error.
3405 **/
dea3101e 3406static void
2e0fef85
JS
3407lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3408 struct lpfc_iocbq *rsp_iocb)
dea3101e
JB
3409{
3410 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3411 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3412 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
7054a606 3413 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
dea3101e
JB
3414 uint32_t resp_info = fcprsp->rspStatus2;
3415 uint32_t scsi_status = fcprsp->rspStatus3;
c7743956 3416 uint32_t *lp;
dea3101e
JB
3417 uint32_t host_status = DID_OK;
3418 uint32_t rsplen = 0;
c7743956 3419 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
dea3101e 3420
ea2151b4 3421
dea3101e
JB
3422 /*
3423 * If this is a task management command, there is no
3424 * scsi packet associated with this lpfc_cmd. The driver
3425 * consumes it.
3426 */
3427 if (fcpcmd->fcpCntl2) {
3428 scsi_status = 0;
3429 goto out;
3430 }
3431
6a9c52cf
JS
3432 if (resp_info & RSP_LEN_VALID) {
3433 rsplen = be32_to_cpu(fcprsp->rspRspLen);
e40a02c1 3434 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
6a9c52cf
JS
3435 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3436 "2719 Invalid response length: "
3437 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
3438 cmnd->device->id,
3439 cmnd->device->lun, cmnd->cmnd[0],
3440 rsplen);
3441 host_status = DID_ERROR;
3442 goto out;
3443 }
e40a02c1
JS
3444 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3445 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3446 "2757 Protocol failure detected during "
3447 "processing of FCP I/O op: "
3448 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
3449 cmnd->device->id,
3450 cmnd->device->lun, cmnd->cmnd[0],
3451 fcprsp->rspInfo3);
3452 host_status = DID_ERROR;
3453 goto out;
3454 }
6a9c52cf
JS
3455 }
3456
c7743956
JS
3457 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3458 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3459 if (snslen > SCSI_SENSE_BUFFERSIZE)
3460 snslen = SCSI_SENSE_BUFFERSIZE;
3461
3462 if (resp_info & RSP_LEN_VALID)
3463 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3464 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3465 }
3466 lp = (uint32_t *)cmnd->sense_buffer;
3467
73d91e50
JS
3468 if (!scsi_status && (resp_info & RESID_UNDER) &&
3469 vport->cfg_log_verbose & LOG_FCP_UNDER)
3470 logit = LOG_FCP_UNDER;
c7743956 3471
e8b62011 3472 lpfc_printf_vlog(vport, KERN_WARNING, logit,
e2a0a9d6 3473 "9024 FCP command x%x failed: x%x SNS x%x x%x "
e8b62011
JS
3474 "Data: x%x x%x x%x x%x x%x\n",
3475 cmnd->cmnd[0], scsi_status,
3476 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3477 be32_to_cpu(fcprsp->rspResId),
3478 be32_to_cpu(fcprsp->rspSnsLen),
3479 be32_to_cpu(fcprsp->rspRspLen),
3480 fcprsp->rspInfo3);
dea3101e 3481
a0b4f78f 3482 scsi_set_resid(cmnd, 0);
dea3101e 3483 if (resp_info & RESID_UNDER) {
a0b4f78f 3484 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
dea3101e 3485
73d91e50 3486 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
e2a0a9d6 3487 "9025 FCP Read Underrun, expected %d, "
e8b62011
JS
3488 "residual %d Data: x%x x%x x%x\n",
3489 be32_to_cpu(fcpcmd->fcpDl),
3490 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3491 cmnd->underflow);
dea3101e 3492
7054a606
JS
3493 /*
3494 * If there is an under run check if under run reported by
3495 * storage array is same as the under run reported by HBA.
3496 * If this is not same, there is a dropped frame.
3497 */
3498 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3499 fcpi_parm &&
a0b4f78f 3500 (scsi_get_resid(cmnd) != fcpi_parm)) {
e8b62011
JS
3501 lpfc_printf_vlog(vport, KERN_WARNING,
3502 LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 3503 "9026 FCP Read Check Error "
e8b62011
JS
3504 "and Underrun Data: x%x x%x x%x x%x\n",
3505 be32_to_cpu(fcpcmd->fcpDl),
3506 scsi_get_resid(cmnd), fcpi_parm,
3507 cmnd->cmnd[0]);
a0b4f78f 3508 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
7054a606
JS
3509 host_status = DID_ERROR;
3510 }
dea3101e
JB
3511 /*
3512 * The cmnd->underflow is the minimum number of bytes that must
25985edc 3513 * be transferred for this command. Provided a sense condition
dea3101e
JB
3514 * is not present, make sure the actual amount transferred is at
3515 * least the underflow value or fail.
3516 */
3517 if (!(resp_info & SNS_LEN_VALID) &&
3518 (scsi_status == SAM_STAT_GOOD) &&
a0b4f78f
FT
3519 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3520 < cmnd->underflow)) {
e8b62011 3521 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
e2a0a9d6 3522 "9027 FCP command x%x residual "
e8b62011
JS
3523 "underrun converted to error "
3524 "Data: x%x x%x x%x\n",
66dbfbe6 3525 cmnd->cmnd[0], scsi_bufflen(cmnd),
e8b62011 3526 scsi_get_resid(cmnd), cmnd->underflow);
dea3101e
JB
3527 host_status = DID_ERROR;
3528 }
3529 } else if (resp_info & RESID_OVER) {
e8b62011 3530 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
e2a0a9d6 3531 "9028 FCP command x%x residual overrun error. "
e4e74273 3532 "Data: x%x x%x\n", cmnd->cmnd[0],
e8b62011 3533 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
dea3101e
JB
3534 host_status = DID_ERROR;
3535
3536 /*
3537 * Check SLI validation that all the transfer was actually done
3538 * (fcpi_parm should be zero). Apply check only to reads.
3539 */
eee8877e 3540 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
e8b62011 3541 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 3542 "9029 FCP Read Check Error Data: "
eee8877e 3543 "x%x x%x x%x x%x x%x\n",
e8b62011
JS
3544 be32_to_cpu(fcpcmd->fcpDl),
3545 be32_to_cpu(fcprsp->rspResId),
eee8877e
JS
3546 fcpi_parm, cmnd->cmnd[0], scsi_status);
3547 switch (scsi_status) {
3548 case SAM_STAT_GOOD:
3549 case SAM_STAT_CHECK_CONDITION:
3550 /* Fabric dropped a data frame. Fail any successful
3551 * command in which we detected dropped frames.
3552 * A status of good or some check conditions could
3553 * be considered a successful command.
3554 */
3555 host_status = DID_ERROR;
3556 break;
3557 }
a0b4f78f 3558 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
dea3101e
JB
3559 }
3560
3561 out:
3562 cmnd->result = ScsiResult(host_status, scsi_status);
ea2151b4 3563 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
dea3101e
JB
3564}
3565
9bad7671 3566/**
3621a710 3567 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
9bad7671
JS
3568 * @phba: The Hba for which this call is being executed.
3569 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3772a991 3570 * @pIocbOut: The response IOCBQ for the scsi cmnd.
9bad7671
JS
3571 *
3572 * This routine assigns scsi command result by looking into response IOCB
3573 * status field appropriately. This routine handles QUEUE FULL condition as
3574 * well by ramping down device queue depth.
3575 **/
dea3101e
JB
3576static void
3577lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3578 struct lpfc_iocbq *pIocbOut)
3579{
3580 struct lpfc_scsi_buf *lpfc_cmd =
3581 (struct lpfc_scsi_buf *) pIocbIn->context1;
2e0fef85 3582 struct lpfc_vport *vport = pIocbIn->vport;
dea3101e
JB
3583 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3584 struct lpfc_nodelist *pnode = rdata->pnode;
75baf696 3585 struct scsi_cmnd *cmd;
445cf4f4 3586 int result;
a257bf90 3587 struct scsi_device *tmp_sdev;
5ffc266e 3588 int depth;
fa61a54e 3589 unsigned long flags;
ea2151b4 3590 struct lpfc_fast_path_event *fast_path_evt;
75baf696 3591 struct Scsi_Host *shost;
a257bf90 3592 uint32_t queue_depth, scsi_id;
73d91e50 3593 uint32_t logit = LOG_FCP;
dea3101e 3594
75baf696
JS
3595 /* Sanity check on return of outstanding command */
3596 if (!(lpfc_cmd->pCmd))
3597 return;
3598 cmd = lpfc_cmd->pCmd;
3599 shost = cmd->device->host;
3600
dea3101e
JB
3601 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
3602 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
341af102
JS
3603 /* pick up SLI4 exhange busy status from HBA */
3604 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3605
9a6b09c0
JS
3606#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3607 if (lpfc_cmd->prot_data_type) {
3608 struct scsi_dif_tuple *src = NULL;
3609
3610 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3611 /*
3612 * Used to restore any changes to protection
3613 * data for error injection.
3614 */
3615 switch (lpfc_cmd->prot_data_type) {
3616 case LPFC_INJERR_REFTAG:
3617 src->ref_tag =
3618 lpfc_cmd->prot_data;
3619 break;
3620 case LPFC_INJERR_APPTAG:
3621 src->app_tag =
3622 (uint16_t)lpfc_cmd->prot_data;
3623 break;
3624 case LPFC_INJERR_GUARD:
3625 src->guard_tag =
3626 (uint16_t)lpfc_cmd->prot_data;
3627 break;
3628 default:
3629 break;
3630 }
3631
3632 lpfc_cmd->prot_data = 0;
3633 lpfc_cmd->prot_data_type = 0;
3634 lpfc_cmd->prot_data_segment = NULL;
3635 }
3636#endif
109f6ed0
JS
3637 if (pnode && NLP_CHK_NODE_ACT(pnode))
3638 atomic_dec(&pnode->cmd_pending);
dea3101e
JB
3639
3640 if (lpfc_cmd->status) {
3641 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3642 (lpfc_cmd->result & IOERR_DRVR_MASK))
3643 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3644 else if (lpfc_cmd->status >= IOSTAT_CNT)
3645 lpfc_cmd->status = IOSTAT_DEFAULT;
73d91e50
JS
3646 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR
3647 && !lpfc_cmd->fcp_rsp->rspStatus3
3648 && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER)
3649 && !(phba->cfg_log_verbose & LOG_FCP_UNDER))
3650 logit = 0;
3651 else
3652 logit = LOG_FCP | LOG_FCP_UNDER;
3653 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3654 "9030 FCP cmd x%x failed <%d/%d> "
3655 "status: x%x result: x%x Data: x%x x%x\n",
3656 cmd->cmnd[0],
3657 cmd->device ? cmd->device->id : 0xffff,
3658 cmd->device ? cmd->device->lun : 0xffff,
3659 lpfc_cmd->status, lpfc_cmd->result,
3660 pIocbOut->iocb.ulpContext,
3661 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
dea3101e
JB
3662
3663 switch (lpfc_cmd->status) {
3664 case IOSTAT_FCP_RSP_ERROR:
3665 /* Call FCP RSP handler to determine result */
2e0fef85 3666 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
dea3101e
JB
3667 break;
3668 case IOSTAT_NPORT_BSY:
3669 case IOSTAT_FABRIC_BSY:
0f1f53a7 3670 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
ea2151b4
JS
3671 fast_path_evt = lpfc_alloc_fast_evt(phba);
3672 if (!fast_path_evt)
3673 break;
3674 fast_path_evt->un.fabric_evt.event_type =
3675 FC_REG_FABRIC_EVENT;
3676 fast_path_evt->un.fabric_evt.subcategory =
3677 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3678 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3679 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3680 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3681 &pnode->nlp_portname,
3682 sizeof(struct lpfc_name));
3683 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3684 &pnode->nlp_nodename,
3685 sizeof(struct lpfc_name));
3686 }
3687 fast_path_evt->vport = vport;
3688 fast_path_evt->work_evt.evt =
3689 LPFC_EVT_FASTPATH_MGMT_EVT;
3690 spin_lock_irqsave(&phba->hbalock, flags);
3691 list_add_tail(&fast_path_evt->work_evt.evt_listp,
3692 &phba->work_list);
3693 spin_unlock_irqrestore(&phba->hbalock, flags);
3694 lpfc_worker_wake_up(phba);
dea3101e 3695 break;
92d7f7b0 3696 case IOSTAT_LOCAL_REJECT:
1151e3ec 3697 case IOSTAT_REMOTE_STOP:
ab56dc2e
JS
3698 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3699 lpfc_cmd->result ==
3700 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3701 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3702 lpfc_cmd->result ==
3703 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3704 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
3705 break;
3706 }
d7c255b2 3707 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
92d7f7b0 3708 lpfc_cmd->result == IOERR_NO_RESOURCES ||
b92938b4
JS
3709 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3710 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
92d7f7b0 3711 cmd->result = ScsiResult(DID_REQUEUE, 0);
58da1ffb 3712 break;
e2a0a9d6 3713 }
e2a0a9d6
JS
3714 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3715 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3716 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3717 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3718 /*
3719 * This is a response for a BG enabled
3720 * cmd. Parse BG error
3721 */
3722 lpfc_parse_bg_err(phba, lpfc_cmd,
3723 pIocbOut);
3724 break;
3725 } else {
3726 lpfc_printf_vlog(vport, KERN_WARNING,
3727 LOG_BG,
3728 "9031 non-zero BGSTAT "
6a9c52cf 3729 "on unprotected cmd\n");
e2a0a9d6
JS
3730 }
3731 }
1151e3ec
JS
3732 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3733 && (phba->sli_rev == LPFC_SLI_REV4)
3734 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3735 /* This IO was aborted by the target, we don't
3736 * know the rxid and because we did not send the
3737 * ABTS we cannot generate and RRQ.
3738 */
3739 lpfc_set_rrq_active(phba, pnode,
3740 lpfc_cmd->cur_iocbq.sli4_xritag,
3741 0, 0);
3742 }
e2a0a9d6 3743 /* else: fall through */
dea3101e
JB
3744 default:
3745 cmd->result = ScsiResult(DID_ERROR, 0);
3746 break;
3747 }
3748
58da1ffb 3749 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
19a7b4ae 3750 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
0f1f53a7
JS
3751 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
3752 SAM_STAT_BUSY);
ab56dc2e 3753 } else
dea3101e 3754 cmd->result = ScsiResult(DID_OK, 0);
dea3101e
JB
3755
3756 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3757 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3758
e8b62011
JS
3759 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3760 "0710 Iodone <%d/%d> cmd %p, error "
3761 "x%x SNS x%x x%x Data: x%x x%x\n",
3762 cmd->device->id, cmd->device->lun, cmd,
3763 cmd->result, *lp, *(lp + 3), cmd->retries,
3764 scsi_get_resid(cmd));
dea3101e
JB
3765 }
3766
ea2151b4 3767 lpfc_update_stats(phba, lpfc_cmd);
445cf4f4 3768 result = cmd->result;
977b5a0a
JS
3769 if (vport->cfg_max_scsicmpl_time &&
3770 time_after(jiffies, lpfc_cmd->start_time +
3771 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
a257bf90 3772 spin_lock_irqsave(shost->host_lock, flags);
109f6ed0
JS
3773 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3774 if (pnode->cmd_qdepth >
3775 atomic_read(&pnode->cmd_pending) &&
3776 (atomic_read(&pnode->cmd_pending) >
3777 LPFC_MIN_TGT_QDEPTH) &&
3778 ((cmd->cmnd[0] == READ_10) ||
3779 (cmd->cmnd[0] == WRITE_10)))
3780 pnode->cmd_qdepth =
3781 atomic_read(&pnode->cmd_pending);
3782
3783 pnode->last_change_time = jiffies;
3784 }
a257bf90 3785 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0 3786 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
7dc517df 3787 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
977b5a0a 3788 time_after(jiffies, pnode->last_change_time +
109f6ed0 3789 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
a257bf90 3790 spin_lock_irqsave(shost->host_lock, flags);
7dc517df
JS
3791 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
3792 / 100;
3793 depth = depth ? depth : 1;
3794 pnode->cmd_qdepth += depth;
3795 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
3796 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
109f6ed0 3797 pnode->last_change_time = jiffies;
a257bf90 3798 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0 3799 }
977b5a0a
JS
3800 }
3801
1dcb58e5 3802 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
a257bf90
JS
3803
3804 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
3805 queue_depth = cmd->device->queue_depth;
3806 scsi_id = cmd->device->id;
0bd4ca25
JSEC
3807 cmd->scsi_done(cmd);
3808
b808608b 3809 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
fa61a54e
JS
3810 /*
3811 * If there is a thread waiting for command completion
3812 * wake up the thread.
3813 */
a257bf90 3814 spin_lock_irqsave(shost->host_lock, flags);
495a714c 3815 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
3816 if (lpfc_cmd->waitq)
3817 wake_up(lpfc_cmd->waitq);
a257bf90 3818 spin_unlock_irqrestore(shost->host_lock, flags);
b808608b
JW
3819 lpfc_release_scsi_buf(phba, lpfc_cmd);
3820 return;
3821 }
3822
92d7f7b0 3823 if (!result)
a257bf90 3824 lpfc_rampup_queue_depth(vport, queue_depth);
92d7f7b0 3825
445cf4f4
JSEC
3826 /*
3827 * Check for queue full. If the lun is reporting queue full, then
3828 * back off the lun queue depth to prevent target overloads.
3829 */
58da1ffb
JS
3830 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
3831 NLP_CHK_NODE_ACT(pnode)) {
a257bf90
JS
3832 shost_for_each_device(tmp_sdev, shost) {
3833 if (tmp_sdev->id != scsi_id)
445cf4f4
JSEC
3834 continue;
3835 depth = scsi_track_queue_full(tmp_sdev,
5ffc266e
JS
3836 tmp_sdev->queue_depth-1);
3837 if (depth <= 0)
3838 continue;
e8b62011
JS
3839 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3840 "0711 detected queue full - lun queue "
3841 "depth adjusted to %d.\n", depth);
ea2151b4 3842 lpfc_send_sdev_queuedepth_change_event(phba, vport,
5ffc266e
JS
3843 pnode,
3844 tmp_sdev->lun,
3845 depth+1, depth);
445cf4f4
JSEC
3846 }
3847 }
3848
fa61a54e
JS
3849 /*
3850 * If there is a thread waiting for command completion
3851 * wake up the thread.
3852 */
a257bf90 3853 spin_lock_irqsave(shost->host_lock, flags);
495a714c 3854 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
3855 if (lpfc_cmd->waitq)
3856 wake_up(lpfc_cmd->waitq);
a257bf90 3857 spin_unlock_irqrestore(shost->host_lock, flags);
fa61a54e 3858
0bd4ca25 3859 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e
JB
3860}
3861
34b02dcd 3862/**
3621a710 3863 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
34b02dcd
JS
3864 * @data: A pointer to the immediate command data portion of the IOCB.
3865 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
3866 *
3867 * The routine copies the entire FCP command from @fcp_cmnd to @data while
3868 * byte swapping the data to big endian format for transmission on the wire.
3869 **/
3870static void
3871lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
3872{
3873 int i, j;
3874 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
3875 i += sizeof(uint32_t), j++) {
3876 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
3877 }
3878}
3879
9bad7671 3880/**
f1126688 3881 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
9bad7671
JS
3882 * @vport: The virtual port for which this call is being executed.
3883 * @lpfc_cmd: The scsi command which needs to send.
3884 * @pnode: Pointer to lpfc_nodelist.
3885 *
3886 * This routine initializes fcp_cmnd and iocb data structure from scsi command
3772a991 3887 * to transfer for device with SLI3 interface spec.
9bad7671 3888 **/
dea3101e 3889static void
f1126688 3890lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2e0fef85 3891 struct lpfc_nodelist *pnode)
dea3101e 3892{
2e0fef85 3893 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
3894 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3895 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3896 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3897 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
3898 int datadir = scsi_cmnd->sc_data_direction;
7e2b19fb 3899 char tag[2];
dea3101e 3900
58da1ffb
JS
3901 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3902 return;
3903
dea3101e 3904 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
69859dc4
JSEC
3905 /* clear task management bits */
3906 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
dea3101e 3907
91886523
JSEC
3908 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
3909 &lpfc_cmd->fcp_cmnd->fcp_lun);
dea3101e 3910
df9e1b59
JS
3911 memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
3912 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
7e2b19fb
JS
3913 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
3914 switch (tag[0]) {
dea3101e
JB
3915 case HEAD_OF_QUEUE_TAG:
3916 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
3917 break;
3918 case ORDERED_QUEUE_TAG:
3919 fcp_cmnd->fcpCntl1 = ORDERED_Q;
3920 break;
3921 default:
3922 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3923 break;
3924 }
3925 } else
3926 fcp_cmnd->fcpCntl1 = 0;
3927
3928 /*
3929 * There are three possibilities here - use scatter-gather segment, use
3930 * the single mapping, or neither. Start the lpfc command prep by
3931 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3932 * data bde entry.
3933 */
a0b4f78f 3934 if (scsi_sg_count(scsi_cmnd)) {
dea3101e
JB
3935 if (datadir == DMA_TO_DEVICE) {
3936 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
3772a991
JS
3937 if (phba->sli_rev < LPFC_SLI_REV4) {
3938 iocb_cmd->un.fcpi.fcpi_parm = 0;
3939 iocb_cmd->ulpPU = 0;
3940 } else
3941 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e
JB
3942 fcp_cmnd->fcpCntl3 = WRITE_DATA;
3943 phba->fc4OutputRequests++;
3944 } else {
3945 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
3946 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e
JB
3947 fcp_cmnd->fcpCntl3 = READ_DATA;
3948 phba->fc4InputRequests++;
3949 }
3950 } else {
3951 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
3952 iocb_cmd->un.fcpi.fcpi_parm = 0;
3953 iocb_cmd->ulpPU = 0;
3954 fcp_cmnd->fcpCntl3 = 0;
3955 phba->fc4ControlRequests++;
3956 }
e2a0a9d6
JS
3957 if (phba->sli_rev == 3 &&
3958 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 3959 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e
JB
3960 /*
3961 * Finish initializing those IOCB fields that are independent
3962 * of the scsi_cmnd request_buffer
3963 */
3964 piocbq->iocb.ulpContext = pnode->nlp_rpi;
6d368e53
JS
3965 if (phba->sli_rev == LPFC_SLI_REV4)
3966 piocbq->iocb.ulpContext =
3967 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
dea3101e
JB
3968 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
3969 piocbq->iocb.ulpFCP2Rcvy = 1;
09372820
JS
3970 else
3971 piocbq->iocb.ulpFCP2Rcvy = 0;
dea3101e
JB
3972
3973 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
3974 piocbq->context1 = lpfc_cmd;
3975 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
3976 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2e0fef85 3977 piocbq->vport = vport;
dea3101e
JB
3978}
3979
da0436e9 3980/**
6d368e53 3981 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
9bad7671
JS
3982 * @vport: The virtual port for which this call is being executed.
3983 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3984 * @lun: Logical unit number.
3985 * @task_mgmt_cmd: SCSI task management command.
3986 *
3772a991
JS
3987 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
3988 * for device with SLI-3 interface spec.
9bad7671
JS
3989 *
3990 * Return codes:
3991 * 0 - Error
3992 * 1 - Success
3993 **/
dea3101e 3994static int
f1126688 3995lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
dea3101e 3996 struct lpfc_scsi_buf *lpfc_cmd,
420b630d 3997 unsigned int lun,
dea3101e
JB
3998 uint8_t task_mgmt_cmd)
3999{
dea3101e
JB
4000 struct lpfc_iocbq *piocbq;
4001 IOCB_t *piocb;
4002 struct fcp_cmnd *fcp_cmnd;
0b18ac42 4003 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
dea3101e
JB
4004 struct lpfc_nodelist *ndlp = rdata->pnode;
4005
58da1ffb
JS
4006 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4007 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
dea3101e 4008 return 0;
dea3101e 4009
dea3101e 4010 piocbq = &(lpfc_cmd->cur_iocbq);
2e0fef85
JS
4011 piocbq->vport = vport;
4012
dea3101e
JB
4013 piocb = &piocbq->iocb;
4014
4015 fcp_cmnd = lpfc_cmd->fcp_cmnd;
34b02dcd
JS
4016 /* Clear out any old data in the FCP command area */
4017 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4018 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
dea3101e 4019 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
e2a0a9d6
JS
4020 if (vport->phba->sli_rev == 3 &&
4021 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 4022 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 4023 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
dea3101e 4024 piocb->ulpContext = ndlp->nlp_rpi;
6d368e53
JS
4025 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4026 piocb->ulpContext =
4027 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4028 }
dea3101e
JB
4029 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4030 piocb->ulpFCP2Rcvy = 1;
4031 }
4032 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4033
4034 /* ulpTimeout is only one byte */
4035 if (lpfc_cmd->timeout > 0xff) {
4036 /*
4037 * Do not timeout the command at the firmware level.
4038 * The driver will provide the timeout mechanism.
4039 */
4040 piocb->ulpTimeout = 0;
f1126688 4041 } else
dea3101e 4042 piocb->ulpTimeout = lpfc_cmd->timeout;
da0436e9 4043
f1126688
JS
4044 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4045 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
3772a991 4046
f1126688 4047 return 1;
3772a991
JS
4048}
4049
4050/**
25985edc 4051 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
3772a991
JS
4052 * @phba: The hba struct for which this call is being executed.
4053 * @dev_grp: The HBA PCI-Device group number.
4054 *
4055 * This routine sets up the SCSI interface API function jump table in @phba
4056 * struct.
4057 * Returns: 0 - success, -ENODEV - failure.
4058 **/
4059int
4060lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4061{
4062
f1126688
JS
4063 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4064 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
f1126688 4065
3772a991
JS
4066 switch (dev_grp) {
4067 case LPFC_PCI_DEV_LP:
4068 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4069 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
acd6859b 4070 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
3772a991 4071 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
19ca7609 4072 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
3772a991 4073 break;
da0436e9
JS
4074 case LPFC_PCI_DEV_OC:
4075 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4076 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
acd6859b 4077 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
da0436e9 4078 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
19ca7609 4079 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
da0436e9 4080 break;
3772a991
JS
4081 default:
4082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4083 "1418 Invalid HBA PCI-device group: 0x%x\n",
4084 dev_grp);
4085 return -ENODEV;
4086 break;
4087 }
3772a991 4088 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
84d1b006 4089 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
3772a991
JS
4090 return 0;
4091}
4092
9bad7671 4093/**
3621a710 4094 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
9bad7671
JS
4095 * @phba: The Hba for which this call is being executed.
4096 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4097 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4098 *
4099 * This routine is IOCB completion routine for device reset and target reset
4100 * routine. This routine release scsi buffer associated with lpfc_cmd.
4101 **/
7054a606
JS
4102static void
4103lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4104 struct lpfc_iocbq *cmdiocbq,
4105 struct lpfc_iocbq *rspiocbq)
4106{
4107 struct lpfc_scsi_buf *lpfc_cmd =
4108 (struct lpfc_scsi_buf *) cmdiocbq->context1;
4109 if (lpfc_cmd)
4110 lpfc_release_scsi_buf(phba, lpfc_cmd);
4111 return;
4112}
4113
9bad7671 4114/**
3621a710 4115 * lpfc_info - Info entry point of scsi_host_template data structure
9bad7671
JS
4116 * @host: The scsi host for which this call is being executed.
4117 *
4118 * This routine provides module information about hba.
4119 *
4120 * Reutrn code:
4121 * Pointer to char - Success.
4122 **/
dea3101e
JB
4123const char *
4124lpfc_info(struct Scsi_Host *host)
4125{
2e0fef85
JS
4126 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4127 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
4128 int len;
4129 static char lpfcinfobuf[384];
4130
4131 memset(lpfcinfobuf,0,384);
4132 if (phba && phba->pcidev){
4133 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4134 len = strlen(lpfcinfobuf);
4135 snprintf(lpfcinfobuf + len,
4136 384-len,
4137 " on PCI bus %02x device %02x irq %d",
4138 phba->pcidev->bus->number,
4139 phba->pcidev->devfn,
4140 phba->pcidev->irq);
4141 len = strlen(lpfcinfobuf);
4142 if (phba->Port[0]) {
4143 snprintf(lpfcinfobuf + len,
4144 384-len,
4145 " port %s",
4146 phba->Port);
4147 }
65467b6b
JS
4148 len = strlen(lpfcinfobuf);
4149 if (phba->sli4_hba.link_state.logical_speed) {
4150 snprintf(lpfcinfobuf + len,
4151 384-len,
4152 " Logical Link Speed: %d Mbps",
4153 phba->sli4_hba.link_state.logical_speed * 10);
4154 }
dea3101e
JB
4155 }
4156 return lpfcinfobuf;
4157}
4158
9bad7671 4159/**
3621a710 4160 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
9bad7671
JS
4161 * @phba: The Hba for which this call is being executed.
4162 *
4163 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
4164 * The default value of cfg_poll_tmo is 10 milliseconds.
4165 **/
875fbdfe
JSEC
4166static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4167{
4168 unsigned long poll_tmo_expires =
4169 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4170
4171 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
4172 mod_timer(&phba->fcp_poll_timer,
4173 poll_tmo_expires);
4174}
4175
9bad7671 4176/**
3621a710 4177 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
9bad7671
JS
4178 * @phba: The Hba for which this call is being executed.
4179 *
4180 * This routine starts the fcp_poll_timer of @phba.
4181 **/
875fbdfe
JSEC
4182void lpfc_poll_start_timer(struct lpfc_hba * phba)
4183{
4184 lpfc_poll_rearm_timer(phba);
4185}
4186
9bad7671 4187/**
3621a710 4188 * lpfc_poll_timeout - Restart polling timer
9bad7671
JS
4189 * @ptr: Map to lpfc_hba data structure pointer.
4190 *
4191 * This routine restarts fcp_poll timer, when FCP ring polling is enable
4192 * and FCP Ring interrupt is disable.
4193 **/
4194
875fbdfe
JSEC
4195void lpfc_poll_timeout(unsigned long ptr)
4196{
2e0fef85 4197 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
875fbdfe
JSEC
4198
4199 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
4200 lpfc_sli_handle_fast_ring_event(phba,
4201 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4202
875fbdfe
JSEC
4203 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4204 lpfc_poll_rearm_timer(phba);
4205 }
875fbdfe
JSEC
4206}
4207
9bad7671 4208/**
3621a710 4209 * lpfc_queuecommand - scsi_host_template queuecommand entry point
9bad7671
JS
4210 * @cmnd: Pointer to scsi_cmnd data structure.
4211 * @done: Pointer to done routine.
4212 *
4213 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4214 * This routine prepares an IOCB from scsi command and provides to firmware.
4215 * The @done callback is invoked after driver finished processing the command.
4216 *
4217 * Return value :
4218 * 0 - Success
4219 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4220 **/
dea3101e 4221static int
f281233d 4222lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
dea3101e 4223{
2e0fef85
JS
4224 struct Scsi_Host *shost = cmnd->device->host;
4225 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4226 struct lpfc_hba *phba = vport->phba;
dea3101e 4227 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 4228 struct lpfc_nodelist *ndlp;
0bd4ca25 4229 struct lpfc_scsi_buf *lpfc_cmd;
19a7b4ae 4230 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
19a7b4ae 4231 int err;
dea3101e 4232
19a7b4ae
JSEC
4233 err = fc_remote_port_chkready(rport);
4234 if (err) {
4235 cmnd->result = err;
dea3101e
JB
4236 goto out_fail_command;
4237 }
1c6f4ef5 4238 ndlp = rdata->pnode;
dea3101e 4239
bf08611b 4240 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
acd6859b 4241 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
e2a0a9d6 4242
6a9c52cf
JS
4243 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4244 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4245 " op:%02x str=%s without registering for"
4246 " BlockGuard - Rejecting command\n",
e2a0a9d6
JS
4247 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4248 dif_op_str[scsi_get_prot_op(cmnd)]);
4249 goto out_fail_command;
4250 }
4251
dea3101e 4252 /*
19a7b4ae
JSEC
4253 * Catch race where our node has transitioned, but the
4254 * transport is still transitioning.
dea3101e 4255 */
b522d7d4 4256 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
f55ca84d 4257 cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
b522d7d4
JS
4258 goto out_fail_command;
4259 }
7dc517df 4260 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
3496343d 4261 goto out_tgt_busy;
a93ce024 4262
19ca7609 4263 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
dea3101e 4264 if (lpfc_cmd == NULL) {
eaf15d5b 4265 lpfc_rampdown_queue_depth(phba);
92d7f7b0 4266
e8b62011
JS
4267 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4268 "0707 driver's buffer pool is empty, "
4269 "IO busied\n");
dea3101e
JB
4270 goto out_host_busy;
4271 }
4272
4273 /*
4274 * Store the midlayer's command structure for the completion phase
4275 * and complete the command initialization.
4276 */
4277 lpfc_cmd->pCmd = cmnd;
4278 lpfc_cmd->rdata = rdata;
4279 lpfc_cmd->timeout = 0;
977b5a0a 4280 lpfc_cmd->start_time = jiffies;
dea3101e
JB
4281 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4282 cmnd->scsi_done = done;
4283
e2a0a9d6 4284 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
6a9c52cf
JS
4285 if (vport->phba->cfg_enable_bg) {
4286 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
9a6b09c0
JS
4287 "9033 BLKGRD: rcvd protected cmd:%02x op=%s "
4288 "guard=%s\n", cmnd->cmnd[0],
4289 dif_op_str[scsi_get_prot_op(cmnd)],
4290 dif_grd_str[scsi_host_get_guard(shost)]);
6a9c52cf
JS
4291 if (cmnd->cmnd[0] == READ_10)
4292 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6 4293 "9035 BLKGRD: READ @ sector %llu, "
9a6b09c0 4294 "cnt %u, rpt %d\n",
83096ebf 4295 (unsigned long long)scsi_get_lba(cmnd),
9a6b09c0
JS
4296 blk_rq_sectors(cmnd->request),
4297 (cmnd->cmnd[1]>>5));
6a9c52cf
JS
4298 else if (cmnd->cmnd[0] == WRITE_10)
4299 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6 4300 "9036 BLKGRD: WRITE @ sector %llu, "
9a6b09c0 4301 "cnt %u, wpt %d\n",
87b5c328 4302 (unsigned long long)scsi_get_lba(cmnd),
83096ebf 4303 blk_rq_sectors(cmnd->request),
9a6b09c0 4304 (cmnd->cmnd[1]>>5));
6a9c52cf 4305 }
e2a0a9d6
JS
4306
4307 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4308 } else {
6a9c52cf 4309 if (vport->phba->cfg_enable_bg) {
e2a0a9d6 4310 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
9a6b09c0
JS
4311 "9038 BLKGRD: rcvd unprotected cmd:"
4312 "%02x op=%s guard=%s\n", cmnd->cmnd[0],
4313 dif_op_str[scsi_get_prot_op(cmnd)],
4314 dif_grd_str[scsi_host_get_guard(shost)]);
6a9c52cf
JS
4315 if (cmnd->cmnd[0] == READ_10)
4316 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4317 "9040 dbg: READ @ sector %llu, "
9a6b09c0 4318 "cnt %u, rpt %d\n",
6a9c52cf 4319 (unsigned long long)scsi_get_lba(cmnd),
9a6b09c0
JS
4320 blk_rq_sectors(cmnd->request),
4321 (cmnd->cmnd[1]>>5));
6a9c52cf
JS
4322 else if (cmnd->cmnd[0] == WRITE_10)
4323 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
9a6b09c0
JS
4324 "9041 dbg: WRITE @ sector %llu, "
4325 "cnt %u, wpt %d\n",
4326 (unsigned long long)scsi_get_lba(cmnd),
4327 blk_rq_sectors(cmnd->request),
4328 (cmnd->cmnd[1]>>5));
6a9c52cf 4329 }
e2a0a9d6
JS
4330 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4331 }
4332
dea3101e
JB
4333 if (err)
4334 goto out_host_busy_free_buf;
4335
2e0fef85 4336 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
dea3101e 4337
977b5a0a 4338 atomic_inc(&ndlp->cmd_pending);
3772a991 4339 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
92d7f7b0 4340 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
eaf15d5b
JS
4341 if (err) {
4342 atomic_dec(&ndlp->cmd_pending);
dea3101e 4343 goto out_host_busy_free_buf;
eaf15d5b 4344 }
875fbdfe 4345 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
4346 spin_unlock(shost->host_lock);
4347 lpfc_sli_handle_fast_ring_event(phba,
4348 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4349
4350 spin_lock(shost->host_lock);
875fbdfe
JSEC
4351 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4352 lpfc_poll_rearm_timer(phba);
4353 }
4354
dea3101e
JB
4355 return 0;
4356
4357 out_host_busy_free_buf:
bcf4dbfa 4358 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
0bd4ca25 4359 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e
JB
4360 out_host_busy:
4361 return SCSI_MLQUEUE_HOST_BUSY;
4362
3496343d
MC
4363 out_tgt_busy:
4364 return SCSI_MLQUEUE_TARGET_BUSY;
4365
dea3101e
JB
4366 out_fail_command:
4367 done(cmnd);
4368 return 0;
4369}
4370
f281233d
JG
4371static DEF_SCSI_QCMD(lpfc_queuecommand)
4372
9bad7671 4373/**
3621a710 4374 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
9bad7671
JS
4375 * @cmnd: Pointer to scsi_cmnd data structure.
4376 *
4377 * This routine aborts @cmnd pending in base driver.
4378 *
4379 * Return code :
4380 * 0x2003 - Error
4381 * 0x2002 - Success
4382 **/
dea3101e 4383static int
63c59c3b 4384lpfc_abort_handler(struct scsi_cmnd *cmnd)
dea3101e 4385{
2e0fef85
JS
4386 struct Scsi_Host *shost = cmnd->device->host;
4387 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4388 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
4389 struct lpfc_iocbq *iocb;
4390 struct lpfc_iocbq *abtsiocb;
dea3101e 4391 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 4392 IOCB_t *cmd, *icmd;
0bd4ca25 4393 int ret = SUCCESS;
fa61a54e 4394 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
dea3101e 4395
589a52d6
JS
4396 ret = fc_block_scsi_eh(cmnd);
4397 if (ret)
4398 return ret;
0bd4ca25 4399 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
eee8877e
JS
4400 if (!lpfc_cmd) {
4401 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4402 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5cd049a5
CH
4403 "x%x ID %d LUN %d\n",
4404 ret, cmnd->device->id, cmnd->device->lun);
eee8877e
JS
4405 return SUCCESS;
4406 }
dea3101e 4407
0bd4ca25
JSEC
4408 /*
4409 * If pCmd field of the corresponding lpfc_scsi_buf structure
4410 * points to a different SCSI command, then the driver has
4411 * already completed this command, but the midlayer did not
4412 * see the completion before the eh fired. Just return
4413 * SUCCESS.
4414 */
4415 iocb = &lpfc_cmd->cur_iocbq;
4416 if (lpfc_cmd->pCmd != cmnd)
4417 goto out;
dea3101e 4418
0bd4ca25 4419 BUG_ON(iocb->context1 != lpfc_cmd);
dea3101e 4420
0bd4ca25
JSEC
4421 abtsiocb = lpfc_sli_get_iocbq(phba);
4422 if (abtsiocb == NULL) {
4423 ret = FAILED;
dea3101e
JB
4424 goto out;
4425 }
4426
dea3101e 4427 /*
0bd4ca25
JSEC
4428 * The scsi command can not be in txq and it is in flight because the
4429 * pCmd is still pointig at the SCSI command we have to abort. There
4430 * is no need to search the txcmplq. Just send an abort to the FW.
dea3101e 4431 */
dea3101e 4432
0bd4ca25
JSEC
4433 cmd = &iocb->iocb;
4434 icmd = &abtsiocb->iocb;
4435 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4436 icmd->un.acxri.abortContextTag = cmd->ulpContext;
3772a991
JS
4437 if (phba->sli_rev == LPFC_SLI_REV4)
4438 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4439 else
4440 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 4441
0bd4ca25
JSEC
4442 icmd->ulpLe = 1;
4443 icmd->ulpClass = cmd->ulpClass;
5ffc266e
JS
4444
4445 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4446 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
341af102 4447 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 4448
2e0fef85 4449 if (lpfc_is_link_up(phba))
0bd4ca25
JSEC
4450 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4451 else
4452 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 4453
0bd4ca25 4454 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2e0fef85 4455 abtsiocb->vport = vport;
3772a991
JS
4456 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4457 IOCB_ERROR) {
0bd4ca25
JSEC
4458 lpfc_sli_release_iocbq(phba, abtsiocb);
4459 ret = FAILED;
4460 goto out;
4461 }
dea3101e 4462
875fbdfe 4463 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
45ed1190
JS
4464 lpfc_sli_handle_fast_ring_event(phba,
4465 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe 4466
fa61a54e 4467 lpfc_cmd->waitq = &waitq;
0bd4ca25 4468 /* Wait for abort to complete */
fa61a54e
JS
4469 wait_event_timeout(waitq,
4470 (lpfc_cmd->pCmd != cmnd),
4471 (2*vport->cfg_devloss_tmo*HZ));
875fbdfe 4472
fa61a54e
JS
4473 spin_lock_irq(shost->host_lock);
4474 lpfc_cmd->waitq = NULL;
4475 spin_unlock_irq(shost->host_lock);
dea3101e 4476
0bd4ca25
JSEC
4477 if (lpfc_cmd->pCmd == cmnd) {
4478 ret = FAILED;
e8b62011
JS
4479 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4480 "0748 abort handler timed out waiting "
4481 "for abort to complete: ret %#x, ID %d, "
5cd049a5
CH
4482 "LUN %d\n",
4483 ret, cmnd->device->id, cmnd->device->lun);
dea3101e
JB
4484 }
4485
4486 out:
e8b62011
JS
4487 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4488 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5cd049a5
CH
4489 "LUN %d\n", ret, cmnd->device->id,
4490 cmnd->device->lun);
63c59c3b 4491 return ret;
8fa728a2
JG
4492}
4493
bbb9d180
JS
4494static char *
4495lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4496{
4497 switch (task_mgmt_cmd) {
4498 case FCP_ABORT_TASK_SET:
4499 return "ABORT_TASK_SET";
4500 case FCP_CLEAR_TASK_SET:
4501 return "FCP_CLEAR_TASK_SET";
4502 case FCP_BUS_RESET:
4503 return "FCP_BUS_RESET";
4504 case FCP_LUN_RESET:
4505 return "FCP_LUN_RESET";
4506 case FCP_TARGET_RESET:
4507 return "FCP_TARGET_RESET";
4508 case FCP_CLEAR_ACA:
4509 return "FCP_CLEAR_ACA";
4510 case FCP_TERMINATE_TASK:
4511 return "FCP_TERMINATE_TASK";
4512 default:
4513 return "unknown";
4514 }
4515}
4516
9bad7671 4517/**
bbb9d180
JS
4518 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4519 * @vport: The virtual port for which this call is being executed.
4520 * @rdata: Pointer to remote port local data
4521 * @tgt_id: Target ID of remote device.
4522 * @lun_id: Lun number for the TMF
4523 * @task_mgmt_cmd: type of TMF to send
9bad7671 4524 *
bbb9d180
JS
4525 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4526 * a remote port.
9bad7671 4527 *
bbb9d180
JS
4528 * Return Code:
4529 * 0x2003 - Error
4530 * 0x2002 - Success.
9bad7671 4531 **/
dea3101e 4532static int
bbb9d180
JS
4533lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4534 unsigned tgt_id, unsigned int lun_id,
4535 uint8_t task_mgmt_cmd)
dea3101e 4536{
2e0fef85 4537 struct lpfc_hba *phba = vport->phba;
0bd4ca25 4538 struct lpfc_scsi_buf *lpfc_cmd;
bbb9d180
JS
4539 struct lpfc_iocbq *iocbq;
4540 struct lpfc_iocbq *iocbqrsp;
5989b8d4 4541 struct lpfc_nodelist *pnode = rdata->pnode;
bbb9d180 4542 int ret;
915caaaf 4543 int status;
dea3101e 4544
5989b8d4 4545 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
915caaaf 4546 return FAILED;
bbb9d180 4547
19ca7609 4548 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
dea3101e 4549 if (lpfc_cmd == NULL)
915caaaf 4550 return FAILED;
dea3101e 4551 lpfc_cmd->timeout = 60;
0b18ac42 4552 lpfc_cmd->rdata = rdata;
dea3101e 4553
bbb9d180
JS
4554 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4555 task_mgmt_cmd);
915caaaf
JS
4556 if (!status) {
4557 lpfc_release_scsi_buf(phba, lpfc_cmd);
4558 return FAILED;
4559 }
dea3101e 4560
bbb9d180 4561 iocbq = &lpfc_cmd->cur_iocbq;
0bd4ca25 4562 iocbqrsp = lpfc_sli_get_iocbq(phba);
915caaaf
JS
4563 if (iocbqrsp == NULL) {
4564 lpfc_release_scsi_buf(phba, lpfc_cmd);
4565 return FAILED;
4566 }
bbb9d180 4567
e8b62011 4568 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
bbb9d180 4569 "0702 Issue %s to TGT %d LUN %d "
6d368e53 4570 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
bbb9d180 4571 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
6d368e53
JS
4572 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
4573 iocbq->iocb_flag);
bbb9d180 4574
3772a991 4575 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
915caaaf 4576 iocbq, iocbqrsp, lpfc_cmd->timeout);
bbb9d180
JS
4577 if (status != IOCB_SUCCESS) {
4578 if (status == IOCB_TIMEDOUT) {
4579 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4580 ret = TIMEOUT_ERROR;
4581 } else
915caaaf 4582 ret = FAILED;
bbb9d180
JS
4583 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4584 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6d368e53
JS
4585 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
4586 "iocb_flag x%x\n",
bbb9d180
JS
4587 lpfc_taskmgmt_name(task_mgmt_cmd),
4588 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
6d368e53
JS
4589 iocbqrsp->iocb.un.ulpWord[4],
4590 iocbq->iocb_flag);
2a9bf3d0
JS
4591 } else if (status == IOCB_BUSY)
4592 ret = FAILED;
4593 else
bbb9d180
JS
4594 ret = SUCCESS;
4595
6175c02a 4596 lpfc_sli_release_iocbq(phba, iocbqrsp);
bbb9d180
JS
4597
4598 if (ret != TIMEOUT_ERROR)
4599 lpfc_release_scsi_buf(phba, lpfc_cmd);
4600
4601 return ret;
4602}
4603
4604/**
4605 * lpfc_chk_tgt_mapped -
4606 * @vport: The virtual port to check on
4607 * @cmnd: Pointer to scsi_cmnd data structure.
4608 *
4609 * This routine delays until the scsi target (aka rport) for the
4610 * command exists (is present and logged in) or we declare it non-existent.
4611 *
4612 * Return code :
4613 * 0x2003 - Error
4614 * 0x2002 - Success
4615 **/
4616static int
4617lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
4618{
4619 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 4620 struct lpfc_nodelist *pnode;
bbb9d180
JS
4621 unsigned long later;
4622
1c6f4ef5
JS
4623 if (!rdata) {
4624 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4625 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
4626 return FAILED;
4627 }
4628 pnode = rdata->pnode;
bbb9d180
JS
4629 /*
4630 * If target is not in a MAPPED state, delay until
4631 * target is rediscovered or devloss timeout expires.
4632 */
4633 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4634 while (time_after(later, jiffies)) {
4635 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4636 return FAILED;
4637 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
4638 return SUCCESS;
4639 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
4640 rdata = cmnd->device->hostdata;
4641 if (!rdata)
4642 return FAILED;
4643 pnode = rdata->pnode;
4644 }
4645 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
4646 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4647 return FAILED;
4648 return SUCCESS;
4649}
4650
4651/**
4652 * lpfc_reset_flush_io_context -
4653 * @vport: The virtual port (scsi_host) for the flush context
4654 * @tgt_id: If aborting by Target contect - specifies the target id
4655 * @lun_id: If aborting by Lun context - specifies the lun id
4656 * @context: specifies the context level to flush at.
4657 *
4658 * After a reset condition via TMF, we need to flush orphaned i/o
4659 * contexts from the adapter. This routine aborts any contexts
4660 * outstanding, then waits for their completions. The wait is
4661 * bounded by devloss_tmo though.
4662 *
4663 * Return code :
4664 * 0x2003 - Error
4665 * 0x2002 - Success
4666 **/
4667static int
4668lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
4669 uint64_t lun_id, lpfc_ctx_cmd context)
4670{
4671 struct lpfc_hba *phba = vport->phba;
4672 unsigned long later;
4673 int cnt;
4674
4675 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6175c02a 4676 if (cnt)
51ef4c26 4677 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
bbb9d180 4678 tgt_id, lun_id, context);
915caaaf
JS
4679 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4680 while (time_after(later, jiffies) && cnt) {
4681 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
bbb9d180 4682 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
dea3101e 4683 }
dea3101e 4684 if (cnt) {
e8b62011 4685 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
bbb9d180
JS
4686 "0724 I/O flush failure for context %s : cnt x%x\n",
4687 ((context == LPFC_CTX_LUN) ? "LUN" :
4688 ((context == LPFC_CTX_TGT) ? "TGT" :
4689 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
4690 cnt);
4691 return FAILED;
dea3101e 4692 }
bbb9d180
JS
4693 return SUCCESS;
4694}
4695
4696/**
4697 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
4698 * @cmnd: Pointer to scsi_cmnd data structure.
4699 *
4700 * This routine does a device reset by sending a LUN_RESET task management
4701 * command.
4702 *
4703 * Return code :
4704 * 0x2003 - Error
4705 * 0x2002 - Success
4706 **/
4707static int
4708lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
4709{
4710 struct Scsi_Host *shost = cmnd->device->host;
4711 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4712 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 4713 struct lpfc_nodelist *pnode;
bbb9d180
JS
4714 unsigned tgt_id = cmnd->device->id;
4715 unsigned int lun_id = cmnd->device->lun;
4716 struct lpfc_scsi_event_header scsi_event;
4717 int status;
4718
1c6f4ef5
JS
4719 if (!rdata) {
4720 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4721 "0798 Device Reset rport failure: rdata x%p\n", rdata);
4722 return FAILED;
4723 }
4724 pnode = rdata->pnode;
589a52d6
JS
4725 status = fc_block_scsi_eh(cmnd);
4726 if (status)
4727 return status;
bbb9d180
JS
4728
4729 status = lpfc_chk_tgt_mapped(vport, cmnd);
4730 if (status == FAILED) {
4731 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4732 "0721 Device Reset rport failure: rdata x%p\n", rdata);
4733 return FAILED;
4734 }
4735
4736 scsi_event.event_type = FC_REG_SCSI_EVENT;
4737 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
4738 scsi_event.lun = lun_id;
4739 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4740 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4741
4742 fc_host_post_vendor_event(shost, fc_get_event_number(),
4743 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4744
4745 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4746 FCP_LUN_RESET);
4747
4748 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4749 "0713 SCSI layer issued Device Reset (%d, %d) "
4750 "return x%x\n", tgt_id, lun_id, status);
4751
4752 /*
4753 * We have to clean up i/o as : they may be orphaned by the TMF;
4754 * or if the TMF failed, they may be in an indeterminate state.
4755 * So, continue on.
4756 * We will report success if all the i/o aborts successfully.
4757 */
4758 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4759 LPFC_CTX_LUN);
4760 return status;
4761}
4762
4763/**
4764 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
4765 * @cmnd: Pointer to scsi_cmnd data structure.
4766 *
4767 * This routine does a target reset by sending a TARGET_RESET task management
4768 * command.
4769 *
4770 * Return code :
4771 * 0x2003 - Error
4772 * 0x2002 - Success
4773 **/
4774static int
4775lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
4776{
4777 struct Scsi_Host *shost = cmnd->device->host;
4778 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4779 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 4780 struct lpfc_nodelist *pnode;
bbb9d180
JS
4781 unsigned tgt_id = cmnd->device->id;
4782 unsigned int lun_id = cmnd->device->lun;
4783 struct lpfc_scsi_event_header scsi_event;
4784 int status;
4785
1c6f4ef5
JS
4786 if (!rdata) {
4787 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4788 "0799 Target Reset rport failure: rdata x%p\n", rdata);
4789 return FAILED;
4790 }
4791 pnode = rdata->pnode;
589a52d6
JS
4792 status = fc_block_scsi_eh(cmnd);
4793 if (status)
4794 return status;
bbb9d180
JS
4795
4796 status = lpfc_chk_tgt_mapped(vport, cmnd);
4797 if (status == FAILED) {
4798 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4799 "0722 Target Reset rport failure: rdata x%p\n", rdata);
4800 return FAILED;
4801 }
4802
4803 scsi_event.event_type = FC_REG_SCSI_EVENT;
4804 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
4805 scsi_event.lun = 0;
4806 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4807 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4808
4809 fc_host_post_vendor_event(shost, fc_get_event_number(),
4810 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4811
4812 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4813 FCP_TARGET_RESET);
4814
4815 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4816 "0723 SCSI layer issued Target Reset (%d, %d) "
4817 "return x%x\n", tgt_id, lun_id, status);
4818
4819 /*
4820 * We have to clean up i/o as : they may be orphaned by the TMF;
4821 * or if the TMF failed, they may be in an indeterminate state.
4822 * So, continue on.
4823 * We will report success if all the i/o aborts successfully.
4824 */
4825 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4826 LPFC_CTX_TGT);
4827 return status;
dea3101e
JB
4828}
4829
9bad7671 4830/**
3621a710 4831 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
9bad7671
JS
4832 * @cmnd: Pointer to scsi_cmnd data structure.
4833 *
bbb9d180
JS
4834 * This routine does target reset to all targets on @cmnd->device->host.
4835 * This emulates Parallel SCSI Bus Reset Semantics.
9bad7671 4836 *
bbb9d180
JS
4837 * Return code :
4838 * 0x2003 - Error
4839 * 0x2002 - Success
9bad7671 4840 **/
94d0e7b8 4841static int
7054a606 4842lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 4843{
2e0fef85
JS
4844 struct Scsi_Host *shost = cmnd->device->host;
4845 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
dea3101e 4846 struct lpfc_nodelist *ndlp = NULL;
ea2151b4 4847 struct lpfc_scsi_event_header scsi_event;
bbb9d180
JS
4848 int match;
4849 int ret = SUCCESS, status, i;
ea2151b4
JS
4850
4851 scsi_event.event_type = FC_REG_SCSI_EVENT;
4852 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
4853 scsi_event.lun = 0;
4854 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
4855 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
4856
bbb9d180
JS
4857 fc_host_post_vendor_event(shost, fc_get_event_number(),
4858 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
dea3101e 4859
bf08611b
JS
4860 status = fc_block_scsi_eh(cmnd);
4861 if (status)
4862 return status;
bbb9d180 4863
dea3101e
JB
4864 /*
4865 * Since the driver manages a single bus device, reset all
4866 * targets known to the driver. Should any target reset
4867 * fail, this routine returns failure to the midlayer.
4868 */
e17da18e 4869 for (i = 0; i < LPFC_MAX_TARGET; i++) {
685f0bf7 4870 /* Search for mapped node by target ID */
dea3101e 4871 match = 0;
2e0fef85
JS
4872 spin_lock_irq(shost->host_lock);
4873 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4874 if (!NLP_CHK_NODE_ACT(ndlp))
4875 continue;
685f0bf7 4876 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
915caaaf 4877 ndlp->nlp_sid == i &&
685f0bf7 4878 ndlp->rport) {
dea3101e
JB
4879 match = 1;
4880 break;
4881 }
4882 }
2e0fef85 4883 spin_unlock_irq(shost->host_lock);
dea3101e
JB
4884 if (!match)
4885 continue;
bbb9d180
JS
4886
4887 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
4888 i, 0, FCP_TARGET_RESET);
4889
4890 if (status != SUCCESS) {
e8b62011
JS
4891 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4892 "0700 Bus Reset on target %d failed\n",
4893 i);
915caaaf 4894 ret = FAILED;
dea3101e
JB
4895 }
4896 }
6175c02a 4897 /*
bbb9d180
JS
4898 * We have to clean up i/o as : they may be orphaned by the TMFs
4899 * above; or if any of the TMFs failed, they may be in an
4900 * indeterminate state.
4901 * We will report success if all the i/o aborts successfully.
6175c02a 4902 */
bbb9d180
JS
4903
4904 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
4905 if (status != SUCCESS)
0bd4ca25 4906 ret = FAILED;
bbb9d180 4907
e8b62011
JS
4908 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4909 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
dea3101e
JB
4910 return ret;
4911}
4912
9bad7671 4913/**
3621a710 4914 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
9bad7671
JS
4915 * @sdev: Pointer to scsi_device.
4916 *
4917 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
4918 * globally available list of scsi buffers. This routine also makes sure scsi
4919 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
4920 * of scsi buffer exists for the lifetime of the driver.
4921 *
4922 * Return codes:
4923 * non-0 - Error
4924 * 0 - Success
4925 **/
dea3101e
JB
4926static int
4927lpfc_slave_alloc(struct scsi_device *sdev)
4928{
2e0fef85
JS
4929 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
4930 struct lpfc_hba *phba = vport->phba;
19a7b4ae 4931 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3772a991 4932 uint32_t total = 0;
dea3101e 4933 uint32_t num_to_alloc = 0;
3772a991 4934 int num_allocated = 0;
d7c47992 4935 uint32_t sdev_cnt;
dea3101e 4936
19a7b4ae 4937 if (!rport || fc_remote_port_chkready(rport))
dea3101e
JB
4938 return -ENXIO;
4939
19a7b4ae 4940 sdev->hostdata = rport->dd_data;
d7c47992 4941 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
dea3101e
JB
4942
4943 /*
4944 * Populate the cmds_per_lun count scsi_bufs into this host's globally
4945 * available list of scsi buffers. Don't allocate more than the
a784efbf
JSEC
4946 * HBA limit conveyed to the midlayer via the host structure. The
4947 * formula accounts for the lun_queue_depth + error handlers + 1
4948 * extra. This list of scsi bufs exists for the lifetime of the driver.
dea3101e
JB
4949 */
4950 total = phba->total_scsi_bufs;
3de2a653 4951 num_to_alloc = vport->cfg_lun_queue_depth + 2;
92d7f7b0 4952
d7c47992
JS
4953 /* If allocated buffers are enough do nothing */
4954 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
4955 return 0;
4956
92d7f7b0
JS
4957 /* Allow some exchanges to be available always to complete discovery */
4958 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
4959 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4960 "0704 At limitation of %d preallocated "
4961 "command buffers\n", total);
dea3101e 4962 return 0;
92d7f7b0
JS
4963 /* Allow some exchanges to be available always to complete discovery */
4964 } else if (total + num_to_alloc >
4965 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
4966 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4967 "0705 Allocation request of %d "
4968 "command buffers will exceed max of %d. "
4969 "Reducing allocation request to %d.\n",
4970 num_to_alloc, phba->cfg_hba_queue_depth,
4971 (phba->cfg_hba_queue_depth - total));
dea3101e
JB
4972 num_to_alloc = phba->cfg_hba_queue_depth - total;
4973 }
3772a991
JS
4974 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
4975 if (num_to_alloc != num_allocated) {
4976 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4977 "0708 Allocation request of %d "
4978 "command buffers did not succeed. "
4979 "Allocated %d buffers.\n",
4980 num_to_alloc, num_allocated);
dea3101e 4981 }
1c6f4ef5
JS
4982 if (num_allocated > 0)
4983 phba->total_scsi_bufs += num_allocated;
dea3101e
JB
4984 return 0;
4985}
4986
9bad7671 4987/**
3621a710 4988 * lpfc_slave_configure - scsi_host_template slave_configure entry point
9bad7671
JS
4989 * @sdev: Pointer to scsi_device.
4990 *
4991 * This routine configures following items
4992 * - Tag command queuing support for @sdev if supported.
9bad7671
JS
4993 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
4994 *
4995 * Return codes:
4996 * 0 - Success
4997 **/
dea3101e
JB
4998static int
4999lpfc_slave_configure(struct scsi_device *sdev)
5000{
2e0fef85
JS
5001 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5002 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
5003
5004 if (sdev->tagged_supported)
3de2a653 5005 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 5006 else
3de2a653 5007 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 5008
875fbdfe 5009 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
5010 lpfc_sli_handle_fast_ring_event(phba,
5011 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe
JSEC
5012 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5013 lpfc_poll_rearm_timer(phba);
5014 }
5015
dea3101e
JB
5016 return 0;
5017}
5018
9bad7671 5019/**
3621a710 5020 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
9bad7671
JS
5021 * @sdev: Pointer to scsi_device.
5022 *
5023 * This routine sets @sdev hostatdata filed to null.
5024 **/
dea3101e
JB
5025static void
5026lpfc_slave_destroy(struct scsi_device *sdev)
5027{
d7c47992
JS
5028 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5029 struct lpfc_hba *phba = vport->phba;
5030 atomic_dec(&phba->sdev_cnt);
dea3101e
JB
5031 sdev->hostdata = NULL;
5032 return;
5033}
5034
92d7f7b0 5035
dea3101e
JB
5036struct scsi_host_template lpfc_template = {
5037 .module = THIS_MODULE,
5038 .name = LPFC_DRIVER_NAME,
5039 .info = lpfc_info,
5040 .queuecommand = lpfc_queuecommand,
5041 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
5042 .eh_device_reset_handler = lpfc_device_reset_handler,
5043 .eh_target_reset_handler = lpfc_target_reset_handler,
7054a606 5044 .eh_bus_reset_handler = lpfc_bus_reset_handler,
dea3101e
JB
5045 .slave_alloc = lpfc_slave_alloc,
5046 .slave_configure = lpfc_slave_configure,
5047 .slave_destroy = lpfc_slave_destroy,
47a8617c 5048 .scan_finished = lpfc_scan_finished,
dea3101e 5049 .this_id = -1,
83108bd3 5050 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
dea3101e
JB
5051 .cmd_per_lun = LPFC_CMD_PER_LUN,
5052 .use_clustering = ENABLE_CLUSTERING,
2e0fef85 5053 .shost_attrs = lpfc_hba_attrs,
564b2960 5054 .max_sectors = 0xFFFF,
f1c3b0fc 5055 .vendor_id = LPFC_NL_VENDOR_ID,
5ffc266e 5056 .change_queue_depth = lpfc_change_queue_depth,
dea3101e 5057};
3de2a653
JS
5058
5059struct scsi_host_template lpfc_vport_template = {
5060 .module = THIS_MODULE,
5061 .name = LPFC_DRIVER_NAME,
5062 .info = lpfc_info,
5063 .queuecommand = lpfc_queuecommand,
5064 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
5065 .eh_device_reset_handler = lpfc_device_reset_handler,
5066 .eh_target_reset_handler = lpfc_target_reset_handler,
3de2a653
JS
5067 .eh_bus_reset_handler = lpfc_bus_reset_handler,
5068 .slave_alloc = lpfc_slave_alloc,
5069 .slave_configure = lpfc_slave_configure,
5070 .slave_destroy = lpfc_slave_destroy,
5071 .scan_finished = lpfc_scan_finished,
5072 .this_id = -1,
83108bd3 5073 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3de2a653
JS
5074 .cmd_per_lun = LPFC_CMD_PER_LUN,
5075 .use_clustering = ENABLE_CLUSTERING,
5076 .shost_attrs = lpfc_vport_attrs,
5077 .max_sectors = 0xFFFF,
5ffc266e 5078 .change_queue_depth = lpfc_change_queue_depth,
3de2a653 5079};