]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/lpfc/lpfc_scsi.c
[SCSI] lpfc 8.3.1 : Fix up kernel-doc function comments
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <asm/unaligned.h>
25
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_eh.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32
33 #include "lpfc_version.h"
34 #include "lpfc_hw.h"
35 #include "lpfc_sli.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43
44 #define LPFC_RESET_WAIT 2
45 #define LPFC_ABORT_WAIT 2
46
47 int _dump_buf_done;
48
49 static char *dif_op_str[] = {
50 "SCSI_PROT_NORMAL",
51 "SCSI_PROT_READ_INSERT",
52 "SCSI_PROT_WRITE_STRIP",
53 "SCSI_PROT_READ_STRIP",
54 "SCSI_PROT_WRITE_INSERT",
55 "SCSI_PROT_READ_PASS",
56 "SCSI_PROT_WRITE_PASS",
57 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT"
59 };
60
61 static void
62 lpfc_debug_save_data(struct scsi_cmnd *cmnd)
63 {
64 void *src, *dst;
65 struct scatterlist *sgde = scsi_sglist(cmnd);
66
67 if (!_dump_buf_data) {
68 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
69 __func__);
70 return;
71 }
72
73
74 if (!sgde) {
75 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
76 return;
77 }
78
79 dst = (void *) _dump_buf_data;
80 while (sgde) {
81 src = sg_virt(sgde);
82 memcpy(dst, src, sgde->length);
83 dst += sgde->length;
84 sgde = sg_next(sgde);
85 }
86 }
87
88 static void
89 lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
90 {
91 void *src, *dst;
92 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
93
94 if (!_dump_buf_dif) {
95 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
96 __func__);
97 return;
98 }
99
100 if (!sgde) {
101 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
102 return;
103 }
104
105 dst = _dump_buf_dif;
106 while (sgde) {
107 src = sg_virt(sgde);
108 memcpy(dst, src, sgde->length);
109 dst += sgde->length;
110 sgde = sg_next(sgde);
111 }
112 }
113
114 /**
115 * lpfc_update_stats - Update statistical data for the command completion
116 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer.
118 *
119 * This function is called when there is a command completion and this
120 * function updates the statistical data for the command completion.
121 **/
122 static void
123 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
124 {
125 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
126 struct lpfc_nodelist *pnode = rdata->pnode;
127 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
128 unsigned long flags;
129 struct Scsi_Host *shost = cmd->device->host;
130 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
131 unsigned long latency;
132 int i;
133
134 if (cmd->result)
135 return;
136
137 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
138
139 spin_lock_irqsave(shost->host_lock, flags);
140 if (!vport->stat_data_enabled ||
141 vport->stat_data_blocked ||
142 !pnode->lat_data ||
143 (phba->bucket_type == LPFC_NO_BUCKET)) {
144 spin_unlock_irqrestore(shost->host_lock, flags);
145 return;
146 }
147
148 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
149 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
150 phba->bucket_step;
151 /* check array subscript bounds */
152 if (i < 0)
153 i = 0;
154 else if (i >= LPFC_MAX_BUCKET_COUNT)
155 i = LPFC_MAX_BUCKET_COUNT - 1;
156 } else {
157 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
158 if (latency <= (phba->bucket_base +
159 ((1<<i)*phba->bucket_step)))
160 break;
161 }
162
163 pnode->lat_data[i].cmd_count++;
164 spin_unlock_irqrestore(shost->host_lock, flags);
165 }
166
167 /**
168 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
169 * @phba: Pointer to HBA context object.
170 * @vport: Pointer to vport object.
171 * @ndlp: Pointer to FC node associated with the target.
172 * @lun: Lun number of the scsi device.
173 * @old_val: Old value of the queue depth.
174 * @new_val: New value of the queue depth.
175 *
176 * This function sends an event to the mgmt application indicating
177 * there is a change in the scsi device queue depth.
178 **/
179 static void
180 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
181 struct lpfc_vport *vport,
182 struct lpfc_nodelist *ndlp,
183 uint32_t lun,
184 uint32_t old_val,
185 uint32_t new_val)
186 {
187 struct lpfc_fast_path_event *fast_path_evt;
188 unsigned long flags;
189
190 fast_path_evt = lpfc_alloc_fast_evt(phba);
191 if (!fast_path_evt)
192 return;
193
194 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
195 FC_REG_SCSI_EVENT;
196 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
197 LPFC_EVENT_VARQUEDEPTH;
198
199 /* Report all luns with change in queue depth */
200 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
201 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
202 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
203 &ndlp->nlp_portname, sizeof(struct lpfc_name));
204 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
205 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
206 }
207
208 fast_path_evt->un.queue_depth_evt.oldval = old_val;
209 fast_path_evt->un.queue_depth_evt.newval = new_val;
210 fast_path_evt->vport = vport;
211
212 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
213 spin_lock_irqsave(&phba->hbalock, flags);
214 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
215 spin_unlock_irqrestore(&phba->hbalock, flags);
216 lpfc_worker_wake_up(phba);
217
218 return;
219 }
220
221 /**
222 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
223 * @phba: The Hba for which this call is being executed.
224 *
225 * This routine is called when there is resource error in driver or firmware.
226 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
227 * posts at most 1 event each second. This routine wakes up worker thread of
228 * @phba to process WORKER_RAM_DOWN_EVENT event.
229 *
230 * This routine should be called with no lock held.
231 **/
232 void
233 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
234 {
235 unsigned long flags;
236 uint32_t evt_posted;
237
238 spin_lock_irqsave(&phba->hbalock, flags);
239 atomic_inc(&phba->num_rsrc_err);
240 phba->last_rsrc_error_time = jiffies;
241
242 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
243 spin_unlock_irqrestore(&phba->hbalock, flags);
244 return;
245 }
246
247 phba->last_ramp_down_time = jiffies;
248
249 spin_unlock_irqrestore(&phba->hbalock, flags);
250
251 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
252 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
253 if (!evt_posted)
254 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
255 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
256
257 if (!evt_posted)
258 lpfc_worker_wake_up(phba);
259 return;
260 }
261
262 /**
263 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
264 * @phba: The Hba for which this call is being executed.
265 *
266 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
267 * post at most 1 event every 5 minute after last_ramp_up_time or
268 * last_rsrc_error_time. This routine wakes up worker thread of @phba
269 * to process WORKER_RAM_DOWN_EVENT event.
270 *
271 * This routine should be called with no lock held.
272 **/
273 static inline void
274 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
275 struct scsi_device *sdev)
276 {
277 unsigned long flags;
278 struct lpfc_hba *phba = vport->phba;
279 uint32_t evt_posted;
280 atomic_inc(&phba->num_cmd_success);
281
282 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
283 return;
284 spin_lock_irqsave(&phba->hbalock, flags);
285 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
286 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
287 spin_unlock_irqrestore(&phba->hbalock, flags);
288 return;
289 }
290 phba->last_ramp_up_time = jiffies;
291 spin_unlock_irqrestore(&phba->hbalock, flags);
292
293 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
294 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
295 if (!evt_posted)
296 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
297 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
298
299 if (!evt_posted)
300 lpfc_worker_wake_up(phba);
301 return;
302 }
303
304 /**
305 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
306 * @phba: The Hba for which this call is being executed.
307 *
308 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
309 * thread.This routine reduces queue depth for all scsi device on each vport
310 * associated with @phba.
311 **/
312 void
313 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
314 {
315 struct lpfc_vport **vports;
316 struct Scsi_Host *shost;
317 struct scsi_device *sdev;
318 unsigned long new_queue_depth, old_queue_depth;
319 unsigned long num_rsrc_err, num_cmd_success;
320 int i;
321 struct lpfc_rport_data *rdata;
322
323 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
324 num_cmd_success = atomic_read(&phba->num_cmd_success);
325
326 vports = lpfc_create_vport_work_array(phba);
327 if (vports != NULL)
328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
329 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) {
331 new_queue_depth =
332 sdev->queue_depth * num_rsrc_err /
333 (num_rsrc_err + num_cmd_success);
334 if (!new_queue_depth)
335 new_queue_depth = sdev->queue_depth - 1;
336 else
337 new_queue_depth = sdev->queue_depth -
338 new_queue_depth;
339 old_queue_depth = sdev->queue_depth;
340 if (sdev->ordered_tags)
341 scsi_adjust_queue_depth(sdev,
342 MSG_ORDERED_TAG,
343 new_queue_depth);
344 else
345 scsi_adjust_queue_depth(sdev,
346 MSG_SIMPLE_TAG,
347 new_queue_depth);
348 rdata = sdev->hostdata;
349 if (rdata)
350 lpfc_send_sdev_queuedepth_change_event(
351 phba, vports[i],
352 rdata->pnode,
353 sdev->lun, old_queue_depth,
354 new_queue_depth);
355 }
356 }
357 lpfc_destroy_vport_work_array(phba, vports);
358 atomic_set(&phba->num_rsrc_err, 0);
359 atomic_set(&phba->num_cmd_success, 0);
360 }
361
362 /**
363 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
364 * @phba: The Hba for which this call is being executed.
365 *
366 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
367 * thread.This routine increases queue depth for all scsi device on each vport
368 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
369 * num_cmd_success to zero.
370 **/
371 void
372 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
373 {
374 struct lpfc_vport **vports;
375 struct Scsi_Host *shost;
376 struct scsi_device *sdev;
377 int i;
378 struct lpfc_rport_data *rdata;
379
380 vports = lpfc_create_vport_work_array(phba);
381 if (vports != NULL)
382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
383 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) {
385 if (vports[i]->cfg_lun_queue_depth <=
386 sdev->queue_depth)
387 continue;
388 if (sdev->ordered_tags)
389 scsi_adjust_queue_depth(sdev,
390 MSG_ORDERED_TAG,
391 sdev->queue_depth+1);
392 else
393 scsi_adjust_queue_depth(sdev,
394 MSG_SIMPLE_TAG,
395 sdev->queue_depth+1);
396 rdata = sdev->hostdata;
397 if (rdata)
398 lpfc_send_sdev_queuedepth_change_event(
399 phba, vports[i],
400 rdata->pnode,
401 sdev->lun,
402 sdev->queue_depth - 1,
403 sdev->queue_depth);
404 }
405 }
406 lpfc_destroy_vport_work_array(phba, vports);
407 atomic_set(&phba->num_rsrc_err, 0);
408 atomic_set(&phba->num_cmd_success, 0);
409 }
410
411 /**
412 * lpfc_scsi_dev_block - set all scsi hosts to block state
413 * @phba: Pointer to HBA context object.
414 *
415 * This function walks vport list and set each SCSI host to block state
416 * by invoking fc_remote_port_delete() routine. This function is invoked
417 * with EEH when device's PCI slot has been permanently disabled.
418 **/
419 void
420 lpfc_scsi_dev_block(struct lpfc_hba *phba)
421 {
422 struct lpfc_vport **vports;
423 struct Scsi_Host *shost;
424 struct scsi_device *sdev;
425 struct fc_rport *rport;
426 int i;
427
428 vports = lpfc_create_vport_work_array(phba);
429 if (vports != NULL)
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev));
434 fc_remote_port_delete(rport);
435 }
436 }
437 lpfc_destroy_vport_work_array(phba, vports);
438 }
439
440 /**
441 * lpfc_new_scsi_buf - Scsi buffer allocator
442 * @vport: The virtual port for which this call being executed.
443 *
444 * This routine allocates a scsi buffer, which contains all the necessary
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
446 * contains information to build the IOCB. The DMAable region contains
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
449 * and the BPL BDE is setup in the IOCB.
450 *
451 * Return codes:
452 * NULL - Error
453 * Pointer to lpfc_scsi_buf data structure - Success
454 **/
455 static struct lpfc_scsi_buf *
456 lpfc_new_scsi_buf(struct lpfc_vport *vport)
457 {
458 struct lpfc_hba *phba = vport->phba;
459 struct lpfc_scsi_buf *psb;
460 struct ulp_bde64 *bpl;
461 IOCB_t *iocb;
462 dma_addr_t pdma_phys_fcp_cmd;
463 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl;
465 uint16_t iotag;
466
467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
468 if (!psb)
469 return NULL;
470
471 /*
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
475 * sg_tablesize.
476 */
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478 &psb->dma_handle);
479 if (!psb->data) {
480 kfree(psb);
481 return NULL;
482 }
483
484 /* Initialize virtual ptrs to dma_buf region. */
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
486
487 /* Allocate iotag for psb->cur_iocbq. */
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
489 if (iotag == 0) {
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
491 psb->data, psb->dma_handle);
492 kfree (psb);
493 return NULL;
494 }
495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
496
497 psb->fcp_cmnd = psb->data;
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
500 sizeof(struct fcp_rsp);
501
502 /* Initialize local short-hand pointers. */
503 bpl = psb->fcp_bpl;
504 pdma_phys_fcp_cmd = psb->dma_handle;
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
508
509 /*
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
511 * list bdes. Initialize the first two and leave the rest for
512 * queuecommand.
513 */
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
519
520 /* Setup the physical region for the FCP RSP */
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
526
527 /*
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
529 * initialize it with all known data now.
530 */
531 iocb = &psb->cur_iocbq.iocb;
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
533 if ((phba->sli_rev == 3) &&
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
535 /* fill in immediate fcp command BDE */
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
539 unsli3.fcp_ext.icd);
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
542 iocb->ulpLe = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
551 } else {
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1;
557 iocb->ulpLe = 1;
558 }
559 iocb->ulpClass = CLASS3;
560
561 return psb;
562 }
563
564 /**
565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba
566 * @phba: The Hba for which this call is being executed.
567 *
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller.
570 *
571 * Return codes:
572 * NULL - Error
573 * Pointer to lpfc_scsi_buf - Success
574 **/
575 static struct lpfc_scsi_buf*
576 lpfc_get_scsi_buf(struct lpfc_hba * phba)
577 {
578 struct lpfc_scsi_buf * lpfc_cmd = NULL;
579 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
580 unsigned long iflag = 0;
581
582 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
583 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
584 if (lpfc_cmd) {
585 lpfc_cmd->seg_cnt = 0;
586 lpfc_cmd->nonsg_phys = 0;
587 lpfc_cmd->prot_seg_cnt = 0;
588 }
589 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
590 return lpfc_cmd;
591 }
592
593 /**
594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list
595 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released.
597 *
598 * This routine releases @psb scsi buffer by adding it to tail of @phba
599 * lpfc_scsi_buf_list list.
600 **/
601 static void
602 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
603 {
604 unsigned long iflag = 0;
605
606 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
607 psb->pCmd = NULL;
608 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
609 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
610 }
611
612 /**
613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer
614 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped.
616 *
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the
619 * bdea. This routine also initializes all IOCB fields which are dependent on
620 * scsi command request buffer.
621 *
622 * Return codes:
623 * 1 - Error
624 * 0 - Success
625 **/
626 static int
627 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
628 {
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL;
631 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
632 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
633 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
634 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
635 dma_addr_t physaddr;
636 uint32_t num_bde = 0;
637 int nseg, datadir = scsi_cmnd->sc_data_direction;
638
639 /*
640 * There are three possibilities here - use scatter-gather segment, use
641 * the single mapping, or neither. Start the lpfc command prep by
642 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
643 * data bde entry.
644 */
645 bpl += 2;
646 if (scsi_sg_count(scsi_cmnd)) {
647 /*
648 * The driver stores the segment count returned from pci_map_sg
649 * because this a count of dma-mappings used to map the use_sg
650 * pages. They are not guaranteed to be the same for those
651 * architectures that implement an IOMMU.
652 */
653
654 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
655 scsi_sg_count(scsi_cmnd), datadir);
656 if (unlikely(!nseg))
657 return 1;
658
659 lpfc_cmd->seg_cnt = nseg;
660 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
661 printk(KERN_ERR "%s: Too many sg segments from "
662 "dma_map_sg. Config %d, seg_cnt %d\n",
663 __func__, phba->cfg_sg_seg_cnt,
664 lpfc_cmd->seg_cnt);
665 scsi_dma_unmap(scsi_cmnd);
666 return 1;
667 }
668
669 /*
670 * The driver established a maximum scatter-gather segment count
671 * during probe that limits the number of sg elements in any
672 * single scsi command. Just run through the seg_cnt and format
673 * the bde's.
674 * When using SLI-3 the driver will try to fit all the BDEs into
675 * the IOCB. If it can't then the BDEs get added to a BPL as it
676 * does for SLI-2 mode.
677 */
678 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
679 physaddr = sg_dma_address(sgel);
680 if (phba->sli_rev == 3 &&
681 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
682 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
683 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
684 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
685 data_bde->addrLow = putPaddrLow(physaddr);
686 data_bde->addrHigh = putPaddrHigh(physaddr);
687 data_bde++;
688 } else {
689 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
690 bpl->tus.f.bdeSize = sg_dma_len(sgel);
691 bpl->tus.w = le32_to_cpu(bpl->tus.w);
692 bpl->addrLow =
693 le32_to_cpu(putPaddrLow(physaddr));
694 bpl->addrHigh =
695 le32_to_cpu(putPaddrHigh(physaddr));
696 bpl++;
697 }
698 }
699 }
700
701 /*
702 * Finish initializing those IOCB fields that are dependent on the
703 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
704 * explicitly reinitialized and for SLI-3 the extended bde count is
705 * explicitly reinitialized since all iocb memory resources are reused.
706 */
707 if (phba->sli_rev == 3 &&
708 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
709 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
710 /*
711 * The extended IOCB format can only fit 3 BDE or a BPL.
712 * This I/O has more than 3 BDE so the 1st data bde will
713 * be a BPL that is filled in here.
714 */
715 physaddr = lpfc_cmd->dma_handle;
716 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
717 data_bde->tus.f.bdeSize = (num_bde *
718 sizeof(struct ulp_bde64));
719 physaddr += (sizeof(struct fcp_cmnd) +
720 sizeof(struct fcp_rsp) +
721 (2 * sizeof(struct ulp_bde64)));
722 data_bde->addrHigh = putPaddrHigh(physaddr);
723 data_bde->addrLow = putPaddrLow(physaddr);
724 /* ebde count includes the responce bde and data bpl */
725 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
726 } else {
727 /* ebde count includes the responce bde and data bdes */
728 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
729 }
730 } else {
731 iocb_cmd->un.fcpi64.bdl.bdeSize =
732 ((num_bde + 2) * sizeof(struct ulp_bde64));
733 }
734 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
735
736 /*
737 * Due to difference in data length between DIF/non-DIF paths,
738 * we need to set word 4 of IOCB here
739 */
740 iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd));
741 return 0;
742 }
743
744 /*
745 * Given a scsi cmnd, determine the BlockGuard profile to be used
746 * with the cmd
747 */
748 static int
749 lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
750 {
751 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
752 uint8_t ret_prof = LPFC_PROF_INVALID;
753
754 if (guard_type == SHOST_DIX_GUARD_IP) {
755 switch (scsi_get_prot_op(sc)) {
756 case SCSI_PROT_READ_INSERT:
757 case SCSI_PROT_WRITE_STRIP:
758 ret_prof = LPFC_PROF_AST2;
759 break;
760
761 case SCSI_PROT_READ_STRIP:
762 case SCSI_PROT_WRITE_INSERT:
763 ret_prof = LPFC_PROF_A1;
764 break;
765
766 case SCSI_PROT_READ_CONVERT:
767 case SCSI_PROT_WRITE_CONVERT:
768 ret_prof = LPFC_PROF_AST1;
769 break;
770
771 case SCSI_PROT_READ_PASS:
772 case SCSI_PROT_WRITE_PASS:
773 case SCSI_PROT_NORMAL:
774 default:
775 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
776 scsi_get_prot_op(sc), guard_type);
777 break;
778
779 }
780 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
781 switch (scsi_get_prot_op(sc)) {
782 case SCSI_PROT_READ_STRIP:
783 case SCSI_PROT_WRITE_INSERT:
784 ret_prof = LPFC_PROF_A1;
785 break;
786
787 case SCSI_PROT_READ_PASS:
788 case SCSI_PROT_WRITE_PASS:
789 ret_prof = LPFC_PROF_C1;
790 break;
791
792 case SCSI_PROT_READ_CONVERT:
793 case SCSI_PROT_WRITE_CONVERT:
794 case SCSI_PROT_READ_INSERT:
795 case SCSI_PROT_WRITE_STRIP:
796 case SCSI_PROT_NORMAL:
797 default:
798 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
799 scsi_get_prot_op(sc), guard_type);
800 break;
801 }
802 } else {
803 /* unsupported format */
804 BUG();
805 }
806
807 return ret_prof;
808 }
809
810 struct scsi_dif_tuple {
811 __be16 guard_tag; /* Checksum */
812 __be16 app_tag; /* Opaque storage */
813 __be32 ref_tag; /* Target LBA or indirect LBA */
814 };
815
816 static inline unsigned
817 lpfc_cmd_blksize(struct scsi_cmnd *sc)
818 {
819 return sc->device->sector_size;
820 }
821
822 /**
823 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
824 * @sc: in: SCSI command
825 * @apptagmask: out: app tag mask
826 * @apptagval: out: app tag value
827 * @reftag: out: ref tag (reference tag)
828 *
829 * Description:
830 * Extract DIF paramters from the command if possible. Otherwise,
831 * use default paratmers.
832 *
833 **/
834 static inline void
835 lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
836 uint16_t *apptagval, uint32_t *reftag)
837 {
838 struct scsi_dif_tuple *spt;
839 unsigned char op = scsi_get_prot_op(sc);
840 unsigned int protcnt = scsi_prot_sg_count(sc);
841 static int cnt;
842
843 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
844 op == SCSI_PROT_WRITE_PASS ||
845 op == SCSI_PROT_WRITE_CONVERT)) {
846
847 cnt++;
848 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
849 scsi_prot_sglist(sc)[0].offset;
850 *apptagmask = 0;
851 *apptagval = 0;
852 *reftag = cpu_to_be32(spt->ref_tag);
853
854 } else {
855 /* SBC defines ref tag to be lower 32bits of LBA */
856 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
857 *apptagmask = 0;
858 *apptagval = 0;
859 }
860 }
861
862 /*
863 * This function sets up buffer list for protection groups of
864 * type LPFC_PG_TYPE_NO_DIF
865 *
866 * This is usually used when the HBA is instructed to generate
867 * DIFs and insert them into data stream (or strip DIF from
868 * incoming data stream)
869 *
870 * The buffer list consists of just one protection group described
871 * below:
872 * +-------------------------+
873 * start of prot group --> | PDE_1 |
874 * +-------------------------+
875 * | Data BDE |
876 * +-------------------------+
877 * |more Data BDE's ... (opt)|
878 * +-------------------------+
879 *
880 * @sc: pointer to scsi command we're working on
881 * @bpl: pointer to buffer list for protection groups
882 * @datacnt: number of segments of data that have been dma mapped
883 *
884 * Note: Data s/g buffers have been dma mapped
885 */
886 static int
887 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
888 struct ulp_bde64 *bpl, int datasegcnt)
889 {
890 struct scatterlist *sgde = NULL; /* s/g data entry */
891 struct lpfc_pde *pde1 = NULL;
892 dma_addr_t physaddr;
893 int i = 0, num_bde = 0;
894 int datadir = sc->sc_data_direction;
895 int prof = LPFC_PROF_INVALID;
896 unsigned blksize;
897 uint32_t reftag;
898 uint16_t apptagmask, apptagval;
899
900 pde1 = (struct lpfc_pde *) bpl;
901 prof = lpfc_sc_to_sli_prof(sc);
902
903 if (prof == LPFC_PROF_INVALID)
904 goto out;
905
906 /* extract some info from the scsi command for PDE1*/
907 blksize = lpfc_cmd_blksize(sc);
908 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
909
910 /* setup PDE1 with what we have */
911 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
912 BG_EC_STOP_ERR);
913 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
914
915 num_bde++;
916 bpl++;
917
918 /* assumption: caller has already run dma_map_sg on command data */
919 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
920 physaddr = sg_dma_address(sgde);
921 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
922 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
923 bpl->tus.f.bdeSize = sg_dma_len(sgde);
924 if (datadir == DMA_TO_DEVICE)
925 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
926 else
927 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
928 bpl->tus.w = le32_to_cpu(bpl->tus.w);
929 bpl++;
930 num_bde++;
931 }
932
933 out:
934 return num_bde;
935 }
936
937 /*
938 * This function sets up buffer list for protection groups of
939 * type LPFC_PG_TYPE_DIF_BUF
940 *
941 * This is usually used when DIFs are in their own buffers,
942 * separate from the data. The HBA can then by instructed
943 * to place the DIFs in the outgoing stream. For read operations,
944 * The HBA could extract the DIFs and place it in DIF buffers.
945 *
946 * The buffer list for this type consists of one or more of the
947 * protection groups described below:
948 * +-------------------------+
949 * start of first prot group --> | PDE_1 |
950 * +-------------------------+
951 * | PDE_3 (Prot BDE) |
952 * +-------------------------+
953 * | Data BDE |
954 * +-------------------------+
955 * |more Data BDE's ... (opt)|
956 * +-------------------------+
957 * start of new prot group --> | PDE_1 |
958 * +-------------------------+
959 * | ... |
960 * +-------------------------+
961 *
962 * @sc: pointer to scsi command we're working on
963 * @bpl: pointer to buffer list for protection groups
964 * @datacnt: number of segments of data that have been dma mapped
965 * @protcnt: number of segment of protection data that have been dma mapped
966 *
967 * Note: It is assumed that both data and protection s/g buffers have been
968 * mapped for DMA
969 */
970 static int
971 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
972 struct ulp_bde64 *bpl, int datacnt, int protcnt)
973 {
974 struct scatterlist *sgde = NULL; /* s/g data entry */
975 struct scatterlist *sgpe = NULL; /* s/g prot entry */
976 struct lpfc_pde *pde1 = NULL;
977 struct ulp_bde64 *prot_bde = NULL;
978 dma_addr_t dataphysaddr, protphysaddr;
979 unsigned short curr_data = 0, curr_prot = 0;
980 unsigned int split_offset, protgroup_len;
981 unsigned int protgrp_blks, protgrp_bytes;
982 unsigned int remainder, subtotal;
983 int prof = LPFC_PROF_INVALID;
984 int datadir = sc->sc_data_direction;
985 unsigned char pgdone = 0, alldone = 0;
986 unsigned blksize;
987 uint32_t reftag;
988 uint16_t apptagmask, apptagval;
989 int num_bde = 0;
990
991 sgpe = scsi_prot_sglist(sc);
992 sgde = scsi_sglist(sc);
993
994 if (!sgpe || !sgde) {
995 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
996 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
997 sgpe, sgde);
998 return 0;
999 }
1000
1001 prof = lpfc_sc_to_sli_prof(sc);
1002 if (prof == LPFC_PROF_INVALID)
1003 goto out;
1004
1005 /* extract some info from the scsi command for PDE1*/
1006 blksize = lpfc_cmd_blksize(sc);
1007 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1008
1009 split_offset = 0;
1010 do {
1011 /* setup the first PDE_1 */
1012 pde1 = (struct lpfc_pde *) bpl;
1013
1014 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1015 BG_EC_STOP_ERR);
1016 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1017
1018 num_bde++;
1019 bpl++;
1020
1021 /* setup the first BDE that points to protection buffer */
1022 prot_bde = (struct ulp_bde64 *) bpl;
1023 protphysaddr = sg_dma_address(sgpe);
1024 prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1025 prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1026 protgroup_len = sg_dma_len(sgpe);
1027
1028
1029 /* must be integer multiple of the DIF block length */
1030 BUG_ON(protgroup_len % 8);
1031
1032 protgrp_blks = protgroup_len / 8;
1033 protgrp_bytes = protgrp_blks * blksize;
1034
1035 prot_bde->tus.f.bdeSize = protgroup_len;
1036 if (datadir == DMA_TO_DEVICE)
1037 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1038 else
1039 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1040 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1041
1042 curr_prot++;
1043 num_bde++;
1044
1045 /* setup BDE's for data blocks associated with DIF data */
1046 pgdone = 0;
1047 subtotal = 0; /* total bytes processed for current prot grp */
1048 while (!pgdone) {
1049 if (!sgde) {
1050 printk(KERN_ERR "%s Invalid data segment\n",
1051 __func__);
1052 return 0;
1053 }
1054 bpl++;
1055 dataphysaddr = sg_dma_address(sgde) + split_offset;
1056 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1057 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1058
1059 remainder = sg_dma_len(sgde) - split_offset;
1060
1061 if ((subtotal + remainder) <= protgrp_bytes) {
1062 /* we can use this whole buffer */
1063 bpl->tus.f.bdeSize = remainder;
1064 split_offset = 0;
1065
1066 if ((subtotal + remainder) == protgrp_bytes)
1067 pgdone = 1;
1068 } else {
1069 /* must split this buffer with next prot grp */
1070 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1071 split_offset += bpl->tus.f.bdeSize;
1072 }
1073
1074 subtotal += bpl->tus.f.bdeSize;
1075
1076 if (datadir == DMA_TO_DEVICE)
1077 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1078 else
1079 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1080 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1081
1082 num_bde++;
1083 curr_data++;
1084
1085 if (split_offset)
1086 break;
1087
1088 /* Move to the next s/g segment if possible */
1089 sgde = sg_next(sgde);
1090 }
1091
1092 /* are we done ? */
1093 if (curr_prot == protcnt) {
1094 alldone = 1;
1095 } else if (curr_prot < protcnt) {
1096 /* advance to next prot buffer */
1097 sgpe = sg_next(sgpe);
1098 bpl++;
1099
1100 /* update the reference tag */
1101 reftag += protgrp_blks;
1102 } else {
1103 /* if we're here, we have a bug */
1104 printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
1105 }
1106
1107 } while (!alldone);
1108
1109 out:
1110
1111
1112 return num_bde;
1113 }
1114 /*
1115 * Given a SCSI command that supports DIF, determine composition of protection
1116 * groups involved in setting up buffer lists
1117 *
1118 * Returns:
1119 * for DIF (for both read and write)
1120 * */
1121 static int
1122 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1123 {
1124 int ret = LPFC_PG_TYPE_INVALID;
1125 unsigned char op = scsi_get_prot_op(sc);
1126
1127 switch (op) {
1128 case SCSI_PROT_READ_STRIP:
1129 case SCSI_PROT_WRITE_INSERT:
1130 ret = LPFC_PG_TYPE_NO_DIF;
1131 break;
1132 case SCSI_PROT_READ_INSERT:
1133 case SCSI_PROT_WRITE_STRIP:
1134 case SCSI_PROT_READ_PASS:
1135 case SCSI_PROT_WRITE_PASS:
1136 case SCSI_PROT_WRITE_CONVERT:
1137 case SCSI_PROT_READ_CONVERT:
1138 ret = LPFC_PG_TYPE_DIF_BUF;
1139 break;
1140 default:
1141 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1142 "9021 Unsupported protection op:%d\n", op);
1143 break;
1144 }
1145
1146 return ret;
1147 }
1148
1149 /*
1150 * This is the protection/DIF aware version of
1151 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1152 * two functions eventually, but for now, it's here
1153 */
1154 static int
1155 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1156 struct lpfc_scsi_buf *lpfc_cmd)
1157 {
1158 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1159 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1160 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1161 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1162 uint32_t num_bde = 0;
1163 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1164 int prot_group_type = 0;
1165 int diflen, fcpdl;
1166 unsigned blksize;
1167
1168 /*
1169 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1170 * fcp_rsp regions to the first data bde entry
1171 */
1172 bpl += 2;
1173 if (scsi_sg_count(scsi_cmnd)) {
1174 /*
1175 * The driver stores the segment count returned from pci_map_sg
1176 * because this a count of dma-mappings used to map the use_sg
1177 * pages. They are not guaranteed to be the same for those
1178 * architectures that implement an IOMMU.
1179 */
1180 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1181 scsi_sglist(scsi_cmnd),
1182 scsi_sg_count(scsi_cmnd), datadir);
1183 if (unlikely(!datasegcnt))
1184 return 1;
1185
1186 lpfc_cmd->seg_cnt = datasegcnt;
1187 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1188 printk(KERN_ERR "%s: Too many sg segments from "
1189 "dma_map_sg. Config %d, seg_cnt %d\n",
1190 __func__, phba->cfg_sg_seg_cnt,
1191 lpfc_cmd->seg_cnt);
1192 scsi_dma_unmap(scsi_cmnd);
1193 return 1;
1194 }
1195
1196 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1197
1198 switch (prot_group_type) {
1199 case LPFC_PG_TYPE_NO_DIF:
1200 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1201 datasegcnt);
1202 /* we shoud have 2 or more entries in buffer list */
1203 if (num_bde < 2)
1204 goto err;
1205 break;
1206 case LPFC_PG_TYPE_DIF_BUF:{
1207 /*
1208 * This type indicates that protection buffers are
1209 * passed to the driver, so that needs to be prepared
1210 * for DMA
1211 */
1212 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1213 scsi_prot_sglist(scsi_cmnd),
1214 scsi_prot_sg_count(scsi_cmnd), datadir);
1215 if (unlikely(!protsegcnt)) {
1216 scsi_dma_unmap(scsi_cmnd);
1217 return 1;
1218 }
1219
1220 lpfc_cmd->prot_seg_cnt = protsegcnt;
1221 if (lpfc_cmd->prot_seg_cnt
1222 > phba->cfg_prot_sg_seg_cnt) {
1223 printk(KERN_ERR "%s: Too many prot sg segments "
1224 "from dma_map_sg. Config %d,"
1225 "prot_seg_cnt %d\n", __func__,
1226 phba->cfg_prot_sg_seg_cnt,
1227 lpfc_cmd->prot_seg_cnt);
1228 dma_unmap_sg(&phba->pcidev->dev,
1229 scsi_prot_sglist(scsi_cmnd),
1230 scsi_prot_sg_count(scsi_cmnd),
1231 datadir);
1232 scsi_dma_unmap(scsi_cmnd);
1233 return 1;
1234 }
1235
1236 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1237 datasegcnt, protsegcnt);
1238 /* we shoud have 3 or more entries in buffer list */
1239 if (num_bde < 3)
1240 goto err;
1241 break;
1242 }
1243 case LPFC_PG_TYPE_INVALID:
1244 default:
1245 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1246 "9022 Unexpected protection group %i\n",
1247 prot_group_type);
1248 return 1;
1249 }
1250 }
1251
1252 /*
1253 * Finish initializing those IOCB fields that are dependent on the
1254 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1255 * reinitialized since all iocb memory resources are used many times
1256 * for transmit, receive, and continuation bpl's.
1257 */
1258 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1259 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1260 iocb_cmd->ulpBdeCount = 1;
1261 iocb_cmd->ulpLe = 1;
1262
1263 fcpdl = scsi_bufflen(scsi_cmnd);
1264
1265 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1266 /*
1267 * We are in DIF Type 1 mode
1268 * Every data block has a 8 byte DIF (trailer)
1269 * attached to it. Must ajust FCP data length
1270 */
1271 blksize = lpfc_cmd_blksize(scsi_cmnd);
1272 diflen = (fcpdl / blksize) * 8;
1273 fcpdl += diflen;
1274 }
1275 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1276
1277 /*
1278 * Due to difference in data length between DIF/non-DIF paths,
1279 * we need to set word 4 of IOCB here
1280 */
1281 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1282
1283 return 0;
1284 err:
1285 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1286 "9023 Could not setup all needed BDE's"
1287 "prot_group_type=%d, num_bde=%d\n",
1288 prot_group_type, num_bde);
1289 return 1;
1290 }
1291
1292 /*
1293 * This function checks for BlockGuard errors detected by
1294 * the HBA. In case of errors, the ASC/ASCQ fields in the
1295 * sense buffer will be set accordingly, paired with
1296 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1297 * detected corruption.
1298 *
1299 * Returns:
1300 * 0 - No error found
1301 * 1 - BlockGuard error found
1302 * -1 - Internal error (bad profile, ...etc)
1303 */
1304 static int
1305 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1306 struct lpfc_iocbq *pIocbOut)
1307 {
1308 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1309 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1310 int ret = 0;
1311 uint32_t bghm = bgf->bghm;
1312 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0;
1314
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
1316 "bgstat=0x%x bghm=0x%x\n",
1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318 cmd->request->nr_sectors, bgstat, bghm);
1319
1320 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) {
1322 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
1323 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1324 lpfc_debug_save_data(cmd);
1325
1326 /* If we have a prot sgl, save the DIF buffer */
1327 if (lpfc_prot_group_type(phba, cmd) ==
1328 LPFC_PG_TYPE_DIF_BUF) {
1329 printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
1330 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1331 lpfc_debug_save_dif(cmd);
1332 }
1333
1334 _dump_buf_done = 1;
1335 }
1336 spin_unlock(&_dump_buf_lock);
1337
1338 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1339 cmd->result = ScsiResult(DID_ERROR, 0);
1340 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
1341 bgstat);
1342 ret = (-1);
1343 goto out;
1344 }
1345
1346 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1347 cmd->result = ScsiResult(DID_ERROR, 0);
1348 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1349 bgstat);
1350 ret = (-1);
1351 goto out;
1352 }
1353
1354 if (lpfc_bgs_get_guard_err(bgstat)) {
1355 ret = 1;
1356
1357 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1358 0x10, 0x1);
1359 cmd->result = DRIVER_SENSE << 24
1360 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1361 phba->bg_guard_err_cnt++;
1362 printk(KERN_ERR "BLKGRD: guard_tag error\n");
1363 }
1364
1365 if (lpfc_bgs_get_reftag_err(bgstat)) {
1366 ret = 1;
1367
1368 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1369 0x10, 0x3);
1370 cmd->result = DRIVER_SENSE << 24
1371 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1372
1373 phba->bg_reftag_err_cnt++;
1374 printk(KERN_ERR "BLKGRD: ref_tag error\n");
1375 }
1376
1377 if (lpfc_bgs_get_apptag_err(bgstat)) {
1378 ret = 1;
1379
1380 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1381 0x10, 0x2);
1382 cmd->result = DRIVER_SENSE << 24
1383 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1384
1385 phba->bg_apptag_err_cnt++;
1386 printk(KERN_ERR "BLKGRD: app_tag error\n");
1387 }
1388
1389 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1390 /*
1391 * setup sense data descriptor 0 per SPC-4 as an information
1392 * field, and put the failing LBA in it
1393 */
1394 cmd->sense_buffer[8] = 0; /* Information */
1395 cmd->sense_buffer[9] = 0xa; /* Add. length */
1396 bghm /= cmd->device->sector_size;
1397
1398 failing_sector = scsi_get_lba(cmd);
1399 failing_sector += bghm;
1400
1401 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1402 }
1403
1404 if (!ret) {
1405 /* No error was reported - problem in FW? */
1406 cmd->result = ScsiResult(DID_ERROR, 0);
1407 printk(KERN_ERR "BLKGRD: no errors reported!\n");
1408 }
1409
1410 out:
1411 return ret;
1412 }
1413
1414 /**
1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object.
1418 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
1419 * @rsp_iocb: Pointer to response iocb object which reported error.
1420 *
1421 * This function posts an event when there is a SCSI command reporting
1422 * error from the scsi device.
1423 **/
1424 static void
1425 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1426 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
1427 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1428 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
1429 uint32_t resp_info = fcprsp->rspStatus2;
1430 uint32_t scsi_status = fcprsp->rspStatus3;
1431 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
1432 struct lpfc_fast_path_event *fast_path_evt = NULL;
1433 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
1434 unsigned long flags;
1435
1436 /* If there is queuefull or busy condition send a scsi event */
1437 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
1438 (cmnd->result == SAM_STAT_BUSY)) {
1439 fast_path_evt = lpfc_alloc_fast_evt(phba);
1440 if (!fast_path_evt)
1441 return;
1442 fast_path_evt->un.scsi_evt.event_type =
1443 FC_REG_SCSI_EVENT;
1444 fast_path_evt->un.scsi_evt.subcategory =
1445 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
1446 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
1447 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
1448 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
1449 &pnode->nlp_portname, sizeof(struct lpfc_name));
1450 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
1451 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1452 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
1453 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
1454 fast_path_evt = lpfc_alloc_fast_evt(phba);
1455 if (!fast_path_evt)
1456 return;
1457 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
1458 FC_REG_SCSI_EVENT;
1459 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
1460 LPFC_EVENT_CHECK_COND;
1461 fast_path_evt->un.check_cond_evt.scsi_event.lun =
1462 cmnd->device->lun;
1463 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
1464 &pnode->nlp_portname, sizeof(struct lpfc_name));
1465 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
1466 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1467 fast_path_evt->un.check_cond_evt.sense_key =
1468 cmnd->sense_buffer[2] & 0xf;
1469 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
1470 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
1471 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1472 fcpi_parm &&
1473 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
1474 ((scsi_status == SAM_STAT_GOOD) &&
1475 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
1476 /*
1477 * If status is good or resid does not match with fcp_param and
1478 * there is valid fcpi_parm, then there is a read_check error
1479 */
1480 fast_path_evt = lpfc_alloc_fast_evt(phba);
1481 if (!fast_path_evt)
1482 return;
1483 fast_path_evt->un.read_check_error.header.event_type =
1484 FC_REG_FABRIC_EVENT;
1485 fast_path_evt->un.read_check_error.header.subcategory =
1486 LPFC_EVENT_FCPRDCHKERR;
1487 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
1488 &pnode->nlp_portname, sizeof(struct lpfc_name));
1489 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
1490 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1491 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
1492 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
1493 fast_path_evt->un.read_check_error.fcpiparam =
1494 fcpi_parm;
1495 } else
1496 return;
1497
1498 fast_path_evt->vport = vport;
1499 spin_lock_irqsave(&phba->hbalock, flags);
1500 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
1501 spin_unlock_irqrestore(&phba->hbalock, flags);
1502 lpfc_worker_wake_up(phba);
1503 return;
1504 }
1505
1506 /**
1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather
1508 * @phba: The Hba for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped.
1510 *
1511 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd.
1513 **/
1514 static void
1515 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
1516 {
1517 /*
1518 * There are only two special cases to consider. (1) the scsi command
1519 * requested scatter-gather usage or (2) the scsi command allocated
1520 * a request buffer, but did not request use_sg. There is a third
1521 * case, but it does not require resource deallocation.
1522 */
1523 if (psb->seg_cnt > 0)
1524 scsi_dma_unmap(psb->pCmd);
1525 if (psb->prot_seg_cnt > 0)
1526 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
1527 scsi_prot_sg_count(psb->pCmd),
1528 psb->pCmd->sc_data_direction);
1529 }
1530
1531 /**
1532 * lpfc_handler_fcp_err - FCP response handler
1533 * @vport: The virtual port for which this call is being executed.
1534 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1535 * @rsp_iocb: The response IOCB which contains FCP error.
1536 *
1537 * This routine is called to process response IOCB with status field
1538 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
1539 * based upon SCSI and FCP error.
1540 **/
1541 static void
1542 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1543 struct lpfc_iocbq *rsp_iocb)
1544 {
1545 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1546 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
1547 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
1548 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
1549 uint32_t resp_info = fcprsp->rspStatus2;
1550 uint32_t scsi_status = fcprsp->rspStatus3;
1551 uint32_t *lp;
1552 uint32_t host_status = DID_OK;
1553 uint32_t rsplen = 0;
1554 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
1555
1556
1557 /*
1558 * If this is a task management command, there is no
1559 * scsi packet associated with this lpfc_cmd. The driver
1560 * consumes it.
1561 */
1562 if (fcpcmd->fcpCntl2) {
1563 scsi_status = 0;
1564 goto out;
1565 }
1566
1567 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
1568 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
1569 if (snslen > SCSI_SENSE_BUFFERSIZE)
1570 snslen = SCSI_SENSE_BUFFERSIZE;
1571
1572 if (resp_info & RSP_LEN_VALID)
1573 rsplen = be32_to_cpu(fcprsp->rspRspLen);
1574 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
1575 }
1576 lp = (uint32_t *)cmnd->sense_buffer;
1577
1578 if (!scsi_status && (resp_info & RESID_UNDER))
1579 logit = LOG_FCP;
1580
1581 lpfc_printf_vlog(vport, KERN_WARNING, logit,
1582 "9024 FCP command x%x failed: x%x SNS x%x x%x "
1583 "Data: x%x x%x x%x x%x x%x\n",
1584 cmnd->cmnd[0], scsi_status,
1585 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
1586 be32_to_cpu(fcprsp->rspResId),
1587 be32_to_cpu(fcprsp->rspSnsLen),
1588 be32_to_cpu(fcprsp->rspRspLen),
1589 fcprsp->rspInfo3);
1590
1591 if (resp_info & RSP_LEN_VALID) {
1592 rsplen = be32_to_cpu(fcprsp->rspRspLen);
1593 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
1594 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
1595 host_status = DID_ERROR;
1596 goto out;
1597 }
1598 }
1599
1600 scsi_set_resid(cmnd, 0);
1601 if (resp_info & RESID_UNDER) {
1602 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
1603
1604 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1605 "9025 FCP Read Underrun, expected %d, "
1606 "residual %d Data: x%x x%x x%x\n",
1607 be32_to_cpu(fcpcmd->fcpDl),
1608 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
1609 cmnd->underflow);
1610
1611 /*
1612 * If there is an under run check if under run reported by
1613 * storage array is same as the under run reported by HBA.
1614 * If this is not same, there is a dropped frame.
1615 */
1616 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1617 fcpi_parm &&
1618 (scsi_get_resid(cmnd) != fcpi_parm)) {
1619 lpfc_printf_vlog(vport, KERN_WARNING,
1620 LOG_FCP | LOG_FCP_ERROR,
1621 "9026 FCP Read Check Error "
1622 "and Underrun Data: x%x x%x x%x x%x\n",
1623 be32_to_cpu(fcpcmd->fcpDl),
1624 scsi_get_resid(cmnd), fcpi_parm,
1625 cmnd->cmnd[0]);
1626 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
1627 host_status = DID_ERROR;
1628 }
1629 /*
1630 * The cmnd->underflow is the minimum number of bytes that must
1631 * be transfered for this command. Provided a sense condition
1632 * is not present, make sure the actual amount transferred is at
1633 * least the underflow value or fail.
1634 */
1635 if (!(resp_info & SNS_LEN_VALID) &&
1636 (scsi_status == SAM_STAT_GOOD) &&
1637 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
1638 < cmnd->underflow)) {
1639 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1640 "9027 FCP command x%x residual "
1641 "underrun converted to error "
1642 "Data: x%x x%x x%x\n",
1643 cmnd->cmnd[0], scsi_bufflen(cmnd),
1644 scsi_get_resid(cmnd), cmnd->underflow);
1645 host_status = DID_ERROR;
1646 }
1647 } else if (resp_info & RESID_OVER) {
1648 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1649 "9028 FCP command x%x residual overrun error. "
1650 "Data: x%x x%x \n", cmnd->cmnd[0],
1651 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
1652 host_status = DID_ERROR;
1653
1654 /*
1655 * Check SLI validation that all the transfer was actually done
1656 * (fcpi_parm should be zero). Apply check only to reads.
1657 */
1658 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
1659 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
1660 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
1661 "9029 FCP Read Check Error Data: "
1662 "x%x x%x x%x x%x\n",
1663 be32_to_cpu(fcpcmd->fcpDl),
1664 be32_to_cpu(fcprsp->rspResId),
1665 fcpi_parm, cmnd->cmnd[0]);
1666 host_status = DID_ERROR;
1667 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
1668 }
1669
1670 out:
1671 cmnd->result = ScsiResult(host_status, scsi_status);
1672 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
1673 }
1674
1675 /**
1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd .
1680 *
1681 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as
1683 * well by ramping down device queue depth.
1684 **/
1685 static void
1686 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1687 struct lpfc_iocbq *pIocbOut)
1688 {
1689 struct lpfc_scsi_buf *lpfc_cmd =
1690 (struct lpfc_scsi_buf *) pIocbIn->context1;
1691 struct lpfc_vport *vport = pIocbIn->vport;
1692 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
1693 struct lpfc_nodelist *pnode = rdata->pnode;
1694 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1695 int result;
1696 struct scsi_device *sdev, *tmp_sdev;
1697 int depth = 0;
1698 unsigned long flags;
1699 struct lpfc_fast_path_event *fast_path_evt;
1700
1701 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
1702 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
1703 if (pnode && NLP_CHK_NODE_ACT(pnode))
1704 atomic_dec(&pnode->cmd_pending);
1705
1706 if (lpfc_cmd->status) {
1707 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1708 (lpfc_cmd->result & IOERR_DRVR_MASK))
1709 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1710 else if (lpfc_cmd->status >= IOSTAT_CNT)
1711 lpfc_cmd->status = IOSTAT_DEFAULT;
1712
1713 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1714 "9030 FCP cmd x%x failed <%d/%d> "
1715 "status: x%x result: x%x Data: x%x x%x\n",
1716 cmd->cmnd[0],
1717 cmd->device ? cmd->device->id : 0xffff,
1718 cmd->device ? cmd->device->lun : 0xffff,
1719 lpfc_cmd->status, lpfc_cmd->result,
1720 pIocbOut->iocb.ulpContext,
1721 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
1722
1723 switch (lpfc_cmd->status) {
1724 case IOSTAT_FCP_RSP_ERROR:
1725 /* Call FCP RSP handler to determine result */
1726 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
1727 break;
1728 case IOSTAT_NPORT_BSY:
1729 case IOSTAT_FABRIC_BSY:
1730 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1731 fast_path_evt = lpfc_alloc_fast_evt(phba);
1732 if (!fast_path_evt)
1733 break;
1734 fast_path_evt->un.fabric_evt.event_type =
1735 FC_REG_FABRIC_EVENT;
1736 fast_path_evt->un.fabric_evt.subcategory =
1737 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
1738 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
1739 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1740 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
1741 &pnode->nlp_portname,
1742 sizeof(struct lpfc_name));
1743 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
1744 &pnode->nlp_nodename,
1745 sizeof(struct lpfc_name));
1746 }
1747 fast_path_evt->vport = vport;
1748 fast_path_evt->work_evt.evt =
1749 LPFC_EVT_FASTPATH_MGMT_EVT;
1750 spin_lock_irqsave(&phba->hbalock, flags);
1751 list_add_tail(&fast_path_evt->work_evt.evt_listp,
1752 &phba->work_list);
1753 spin_unlock_irqrestore(&phba->hbalock, flags);
1754 lpfc_worker_wake_up(phba);
1755 break;
1756 case IOSTAT_LOCAL_REJECT:
1757 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
1758 lpfc_cmd->result == IOERR_NO_RESOURCES ||
1759 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
1760 cmd->result = ScsiResult(DID_REQUEUE, 0);
1761 break;
1762 }
1763
1764 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
1765 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
1766 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
1767 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1768 /*
1769 * This is a response for a BG enabled
1770 * cmd. Parse BG error
1771 */
1772 lpfc_parse_bg_err(phba, lpfc_cmd,
1773 pIocbOut);
1774 break;
1775 } else {
1776 lpfc_printf_vlog(vport, KERN_WARNING,
1777 LOG_BG,
1778 "9031 non-zero BGSTAT "
1779 "on unprotected cmd");
1780 }
1781 }
1782
1783 /* else: fall through */
1784 default:
1785 cmd->result = ScsiResult(DID_ERROR, 0);
1786 break;
1787 }
1788
1789 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
1790 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
1791 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
1792 SAM_STAT_BUSY);
1793 } else {
1794 cmd->result = ScsiResult(DID_OK, 0);
1795 }
1796
1797 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
1798 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
1799
1800 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1801 "0710 Iodone <%d/%d> cmd %p, error "
1802 "x%x SNS x%x x%x Data: x%x x%x\n",
1803 cmd->device->id, cmd->device->lun, cmd,
1804 cmd->result, *lp, *(lp + 3), cmd->retries,
1805 scsi_get_resid(cmd));
1806 }
1807
1808 lpfc_update_stats(phba, lpfc_cmd);
1809 result = cmd->result;
1810 sdev = cmd->device;
1811 if (vport->cfg_max_scsicmpl_time &&
1812 time_after(jiffies, lpfc_cmd->start_time +
1813 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
1814 spin_lock_irqsave(sdev->host->host_lock, flags);
1815 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1816 if (pnode->cmd_qdepth >
1817 atomic_read(&pnode->cmd_pending) &&
1818 (atomic_read(&pnode->cmd_pending) >
1819 LPFC_MIN_TGT_QDEPTH) &&
1820 ((cmd->cmnd[0] == READ_10) ||
1821 (cmd->cmnd[0] == WRITE_10)))
1822 pnode->cmd_qdepth =
1823 atomic_read(&pnode->cmd_pending);
1824
1825 pnode->last_change_time = jiffies;
1826 }
1827 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1828 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1829 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
1830 time_after(jiffies, pnode->last_change_time +
1831 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
1832 spin_lock_irqsave(sdev->host->host_lock, flags);
1833 pnode->cmd_qdepth += pnode->cmd_qdepth *
1834 LPFC_TGTQ_RAMPUP_PCENT / 100;
1835 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
1836 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1837 pnode->last_change_time = jiffies;
1838 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1839 }
1840 }
1841
1842 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1843 cmd->scsi_done(cmd);
1844
1845 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1846 /*
1847 * If there is a thread waiting for command completion
1848 * wake up the thread.
1849 */
1850 spin_lock_irqsave(sdev->host->host_lock, flags);
1851 lpfc_cmd->pCmd = NULL;
1852 if (lpfc_cmd->waitq)
1853 wake_up(lpfc_cmd->waitq);
1854 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1855 lpfc_release_scsi_buf(phba, lpfc_cmd);
1856 return;
1857 }
1858
1859
1860 if (!result)
1861 lpfc_rampup_queue_depth(vport, sdev);
1862
1863 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
1864 ((jiffies - pnode->last_ramp_up_time) >
1865 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1866 ((jiffies - pnode->last_q_full_time) >
1867 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1868 (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
1869 shost_for_each_device(tmp_sdev, sdev->host) {
1870 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
1871 if (tmp_sdev->id != sdev->id)
1872 continue;
1873 if (tmp_sdev->ordered_tags)
1874 scsi_adjust_queue_depth(tmp_sdev,
1875 MSG_ORDERED_TAG,
1876 tmp_sdev->queue_depth+1);
1877 else
1878 scsi_adjust_queue_depth(tmp_sdev,
1879 MSG_SIMPLE_TAG,
1880 tmp_sdev->queue_depth+1);
1881
1882 pnode->last_ramp_up_time = jiffies;
1883 }
1884 }
1885 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1886 0xFFFFFFFF,
1887 sdev->queue_depth - 1, sdev->queue_depth);
1888 }
1889
1890 /*
1891 * Check for queue full. If the lun is reporting queue full, then
1892 * back off the lun queue depth to prevent target overloads.
1893 */
1894 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
1895 NLP_CHK_NODE_ACT(pnode)) {
1896 pnode->last_q_full_time = jiffies;
1897
1898 shost_for_each_device(tmp_sdev, sdev->host) {
1899 if (tmp_sdev->id != sdev->id)
1900 continue;
1901 depth = scsi_track_queue_full(tmp_sdev,
1902 tmp_sdev->queue_depth - 1);
1903 }
1904 /*
1905 * The queue depth cannot be lowered any more.
1906 * Modify the returned error code to store
1907 * the final depth value set by
1908 * scsi_track_queue_full.
1909 */
1910 if (depth == -1)
1911 depth = sdev->host->cmd_per_lun;
1912
1913 if (depth) {
1914 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1915 "0711 detected queue full - lun queue "
1916 "depth adjusted to %d.\n", depth);
1917 lpfc_send_sdev_queuedepth_change_event(phba, vport,
1918 pnode, 0xFFFFFFFF,
1919 depth+1, depth);
1920 }
1921 }
1922
1923 /*
1924 * If there is a thread waiting for command completion
1925 * wake up the thread.
1926 */
1927 spin_lock_irqsave(sdev->host->host_lock, flags);
1928 lpfc_cmd->pCmd = NULL;
1929 if (lpfc_cmd->waitq)
1930 wake_up(lpfc_cmd->waitq);
1931 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1932
1933 lpfc_release_scsi_buf(phba, lpfc_cmd);
1934 }
1935
1936 /**
1937 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
1938 * @data: A pointer to the immediate command data portion of the IOCB.
1939 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1940 *
1941 * The routine copies the entire FCP command from @fcp_cmnd to @data while
1942 * byte swapping the data to big endian format for transmission on the wire.
1943 **/
1944 static void
1945 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1946 {
1947 int i, j;
1948 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1949 i += sizeof(uint32_t), j++) {
1950 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1951 }
1952 }
1953
1954 /**
1955 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit
1956 * @vport: The virtual port for which this call is being executed.
1957 * @lpfc_cmd: The scsi command which needs to send.
1958 * @pnode: Pointer to lpfc_nodelist.
1959 *
1960 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1961 * to transfer.
1962 **/
1963 static void
1964 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1965 struct lpfc_nodelist *pnode)
1966 {
1967 struct lpfc_hba *phba = vport->phba;
1968 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1969 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1970 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1971 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
1972 int datadir = scsi_cmnd->sc_data_direction;
1973 char tag[2];
1974
1975 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1976 return;
1977
1978 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
1979 /* clear task management bits */
1980 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
1981
1982 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
1983 &lpfc_cmd->fcp_cmnd->fcp_lun);
1984
1985 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
1986
1987 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
1988 switch (tag[0]) {
1989 case HEAD_OF_QUEUE_TAG:
1990 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
1991 break;
1992 case ORDERED_QUEUE_TAG:
1993 fcp_cmnd->fcpCntl1 = ORDERED_Q;
1994 break;
1995 default:
1996 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
1997 break;
1998 }
1999 } else
2000 fcp_cmnd->fcpCntl1 = 0;
2001
2002 /*
2003 * There are three possibilities here - use scatter-gather segment, use
2004 * the single mapping, or neither. Start the lpfc command prep by
2005 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2006 * data bde entry.
2007 */
2008 if (scsi_sg_count(scsi_cmnd)) {
2009 if (datadir == DMA_TO_DEVICE) {
2010 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2011 iocb_cmd->un.fcpi.fcpi_parm = 0;
2012 iocb_cmd->ulpPU = 0;
2013 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2014 phba->fc4OutputRequests++;
2015 } else {
2016 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2017 iocb_cmd->ulpPU = PARM_READ_CHECK;
2018 fcp_cmnd->fcpCntl3 = READ_DATA;
2019 phba->fc4InputRequests++;
2020 }
2021 } else {
2022 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2023 iocb_cmd->un.fcpi.fcpi_parm = 0;
2024 iocb_cmd->ulpPU = 0;
2025 fcp_cmnd->fcpCntl3 = 0;
2026 phba->fc4ControlRequests++;
2027 }
2028 if (phba->sli_rev == 3 &&
2029 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2030 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
2031 /*
2032 * Finish initializing those IOCB fields that are independent
2033 * of the scsi_cmnd request_buffer
2034 */
2035 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2036 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2037 piocbq->iocb.ulpFCP2Rcvy = 1;
2038 else
2039 piocbq->iocb.ulpFCP2Rcvy = 0;
2040
2041 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2042 piocbq->context1 = lpfc_cmd;
2043 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2044 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2045 piocbq->vport = vport;
2046 }
2047
2048 /**
2049 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit
2050 * @vport: The virtual port for which this call is being executed.
2051 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2052 * @lun: Logical unit number.
2053 * @task_mgmt_cmd: SCSI task management command.
2054 *
2055 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
2056 *
2057 * Return codes:
2058 * 0 - Error
2059 * 1 - Success
2060 **/
2061 static int
2062 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2063 struct lpfc_scsi_buf *lpfc_cmd,
2064 unsigned int lun,
2065 uint8_t task_mgmt_cmd)
2066 {
2067 struct lpfc_iocbq *piocbq;
2068 IOCB_t *piocb;
2069 struct fcp_cmnd *fcp_cmnd;
2070 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2071 struct lpfc_nodelist *ndlp = rdata->pnode;
2072
2073 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2074 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
2075 return 0;
2076
2077 piocbq = &(lpfc_cmd->cur_iocbq);
2078 piocbq->vport = vport;
2079
2080 piocb = &piocbq->iocb;
2081
2082 fcp_cmnd = lpfc_cmd->fcp_cmnd;
2083 /* Clear out any old data in the FCP command area */
2084 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2085 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
2086 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
2087 if (vport->phba->sli_rev == 3 &&
2088 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2089 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2090 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2091 piocb->ulpContext = ndlp->nlp_rpi;
2092 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2093 piocb->ulpFCP2Rcvy = 1;
2094 }
2095 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2096
2097 /* ulpTimeout is only one byte */
2098 if (lpfc_cmd->timeout > 0xff) {
2099 /*
2100 * Do not timeout the command at the firmware level.
2101 * The driver will provide the timeout mechanism.
2102 */
2103 piocb->ulpTimeout = 0;
2104 } else {
2105 piocb->ulpTimeout = lpfc_cmd->timeout;
2106 }
2107
2108 return 1;
2109 }
2110
2111 /**
2112 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2113 * @phba: The Hba for which this call is being executed.
2114 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2115 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2116 *
2117 * This routine is IOCB completion routine for device reset and target reset
2118 * routine. This routine release scsi buffer associated with lpfc_cmd.
2119 **/
2120 static void
2121 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2122 struct lpfc_iocbq *cmdiocbq,
2123 struct lpfc_iocbq *rspiocbq)
2124 {
2125 struct lpfc_scsi_buf *lpfc_cmd =
2126 (struct lpfc_scsi_buf *) cmdiocbq->context1;
2127 if (lpfc_cmd)
2128 lpfc_release_scsi_buf(phba, lpfc_cmd);
2129 return;
2130 }
2131
2132 /**
2133 * lpfc_scsi_tgt_reset - Target reset handler
2134 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2135 * @vport: The virtual port for which this call is being executed.
2136 * @tgt_id: Target ID.
2137 * @lun: Lun number.
2138 * @rdata: Pointer to lpfc_rport_data.
2139 *
2140 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2141 *
2142 * Return Code:
2143 * 0x2003 - Error
2144 * 0x2002 - Success.
2145 **/
2146 static int
2147 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2148 unsigned tgt_id, unsigned int lun,
2149 struct lpfc_rport_data *rdata)
2150 {
2151 struct lpfc_hba *phba = vport->phba;
2152 struct lpfc_iocbq *iocbq;
2153 struct lpfc_iocbq *iocbqrsp;
2154 int ret;
2155 int status;
2156
2157 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
2158 return FAILED;
2159
2160 lpfc_cmd->rdata = rdata;
2161 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2162 FCP_TARGET_RESET);
2163 if (!status)
2164 return FAILED;
2165
2166 iocbq = &lpfc_cmd->cur_iocbq;
2167 iocbqrsp = lpfc_sli_get_iocbq(phba);
2168
2169 if (!iocbqrsp)
2170 return FAILED;
2171
2172 /* Issue Target Reset to TGT <num> */
2173 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2174 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2175 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2176 status = lpfc_sli_issue_iocb_wait(phba,
2177 &phba->sli.ring[phba->sli.fcp_ring],
2178 iocbq, iocbqrsp, lpfc_cmd->timeout);
2179 if (status != IOCB_SUCCESS) {
2180 if (status == IOCB_TIMEDOUT) {
2181 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2182 ret = TIMEOUT_ERROR;
2183 } else
2184 ret = FAILED;
2185 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2186 } else {
2187 ret = SUCCESS;
2188 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
2189 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
2190 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2191 (lpfc_cmd->result & IOERR_DRVR_MASK))
2192 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2193 }
2194
2195 lpfc_sli_release_iocbq(phba, iocbqrsp);
2196 return ret;
2197 }
2198
2199 /**
2200 * lpfc_info - Info entry point of scsi_host_template data structure
2201 * @host: The scsi host for which this call is being executed.
2202 *
2203 * This routine provides module information about hba.
2204 *
2205 * Reutrn code:
2206 * Pointer to char - Success.
2207 **/
2208 const char *
2209 lpfc_info(struct Scsi_Host *host)
2210 {
2211 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2212 struct lpfc_hba *phba = vport->phba;
2213 int len;
2214 static char lpfcinfobuf[384];
2215
2216 memset(lpfcinfobuf,0,384);
2217 if (phba && phba->pcidev){
2218 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2219 len = strlen(lpfcinfobuf);
2220 snprintf(lpfcinfobuf + len,
2221 384-len,
2222 " on PCI bus %02x device %02x irq %d",
2223 phba->pcidev->bus->number,
2224 phba->pcidev->devfn,
2225 phba->pcidev->irq);
2226 len = strlen(lpfcinfobuf);
2227 if (phba->Port[0]) {
2228 snprintf(lpfcinfobuf + len,
2229 384-len,
2230 " port %s",
2231 phba->Port);
2232 }
2233 }
2234 return lpfcinfobuf;
2235 }
2236
2237 /**
2238 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
2239 * @phba: The Hba for which this call is being executed.
2240 *
2241 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2242 * The default value of cfg_poll_tmo is 10 milliseconds.
2243 **/
2244 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2245 {
2246 unsigned long poll_tmo_expires =
2247 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2248
2249 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2250 mod_timer(&phba->fcp_poll_timer,
2251 poll_tmo_expires);
2252 }
2253
2254 /**
2255 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
2256 * @phba: The Hba for which this call is being executed.
2257 *
2258 * This routine starts the fcp_poll_timer of @phba.
2259 **/
2260 void lpfc_poll_start_timer(struct lpfc_hba * phba)
2261 {
2262 lpfc_poll_rearm_timer(phba);
2263 }
2264
2265 /**
2266 * lpfc_poll_timeout - Restart polling timer
2267 * @ptr: Map to lpfc_hba data structure pointer.
2268 *
2269 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2270 * and FCP Ring interrupt is disable.
2271 **/
2272
2273 void lpfc_poll_timeout(unsigned long ptr)
2274 {
2275 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2276
2277 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2278 lpfc_sli_poll_fcp_ring (phba);
2279 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2280 lpfc_poll_rearm_timer(phba);
2281 }
2282 }
2283
2284 /**
2285 * lpfc_queuecommand - scsi_host_template queuecommand entry point
2286 * @cmnd: Pointer to scsi_cmnd data structure.
2287 * @done: Pointer to done routine.
2288 *
2289 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2290 * This routine prepares an IOCB from scsi command and provides to firmware.
2291 * The @done callback is invoked after driver finished processing the command.
2292 *
2293 * Return value :
2294 * 0 - Success
2295 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2296 **/
2297 static int
2298 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2299 {
2300 struct Scsi_Host *shost = cmnd->device->host;
2301 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2302 struct lpfc_hba *phba = vport->phba;
2303 struct lpfc_sli *psli = &phba->sli;
2304 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2305 struct lpfc_nodelist *ndlp = rdata->pnode;
2306 struct lpfc_scsi_buf *lpfc_cmd;
2307 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2308 int err;
2309
2310 err = fc_remote_port_chkready(rport);
2311 if (err) {
2312 cmnd->result = err;
2313 goto out_fail_command;
2314 }
2315
2316 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2317 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2318
2319 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
2320 "str=%s without registering for BlockGuard - "
2321 "Rejecting command\n",
2322 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2323 dif_op_str[scsi_get_prot_op(cmnd)]);
2324 goto out_fail_command;
2325 }
2326
2327 /*
2328 * Catch race where our node has transitioned, but the
2329 * transport is still transitioning.
2330 */
2331 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2332 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2333 goto out_fail_command;
2334 }
2335 if (vport->cfg_max_scsicmpl_time &&
2336 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
2337 goto out_host_busy;
2338
2339 lpfc_cmd = lpfc_get_scsi_buf(phba);
2340 if (lpfc_cmd == NULL) {
2341 lpfc_rampdown_queue_depth(phba);
2342
2343 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2344 "0707 driver's buffer pool is empty, "
2345 "IO busied\n");
2346 goto out_host_busy;
2347 }
2348
2349 /*
2350 * Store the midlayer's command structure for the completion phase
2351 * and complete the command initialization.
2352 */
2353 lpfc_cmd->pCmd = cmnd;
2354 lpfc_cmd->rdata = rdata;
2355 lpfc_cmd->timeout = 0;
2356 lpfc_cmd->start_time = jiffies;
2357 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
2358 cmnd->scsi_done = done;
2359
2360 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2361 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2362 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2363 "str=%s\n",
2364 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2365 dif_op_str[scsi_get_prot_op(cmnd)]);
2366 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2367 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2368 "%02x %02x %02x %02x %02x \n",
2369 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2370 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2371 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2372 cmnd->cmnd[9]);
2373 if (cmnd->cmnd[0] == READ_10)
2374 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2375 "9035 BLKGRD: READ @ sector %llu, "
2376 "count %lu\n",
2377 (unsigned long long)scsi_get_lba(cmnd),
2378 cmnd->request->nr_sectors);
2379 else if (cmnd->cmnd[0] == WRITE_10)
2380 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2381 "9036 BLKGRD: WRITE @ sector %llu, "
2382 "count %lu cmd=%p\n",
2383 (unsigned long long)scsi_get_lba(cmnd),
2384 cmnd->request->nr_sectors,
2385 cmnd);
2386
2387 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2388 } else {
2389 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2390 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2391 " str=%s\n",
2392 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2393 dif_op_str[scsi_get_prot_op(cmnd)]);
2394 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2395 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2396 "%02x %02x %02x %02x %02x \n",
2397 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2398 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2399 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2400 cmnd->cmnd[9]);
2401 if (cmnd->cmnd[0] == READ_10)
2402 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2403 "9040 dbg: READ @ sector %llu, "
2404 "count %lu\n",
2405 (unsigned long long)scsi_get_lba(cmnd),
2406 cmnd->request->nr_sectors);
2407 else if (cmnd->cmnd[0] == WRITE_10)
2408 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2409 "9041 dbg: WRITE @ sector %llu, "
2410 "count %lu cmd=%p\n",
2411 (unsigned long long)scsi_get_lba(cmnd),
2412 cmnd->request->nr_sectors, cmnd);
2413 else
2414 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2415 "9042 dbg: parser not implemented\n");
2416 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2417 }
2418
2419 if (err)
2420 goto out_host_busy_free_buf;
2421
2422 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2423
2424 atomic_inc(&ndlp->cmd_pending);
2425 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
2426 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2427 if (err) {
2428 atomic_dec(&ndlp->cmd_pending);
2429 goto out_host_busy_free_buf;
2430 }
2431 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2432 lpfc_sli_poll_fcp_ring(phba);
2433 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2434 lpfc_poll_rearm_timer(phba);
2435 }
2436
2437 return 0;
2438
2439 out_host_busy_free_buf:
2440 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2441 lpfc_release_scsi_buf(phba, lpfc_cmd);
2442 out_host_busy:
2443 return SCSI_MLQUEUE_HOST_BUSY;
2444
2445 out_fail_command:
2446 done(cmnd);
2447 return 0;
2448 }
2449
2450 /**
2451 * lpfc_block_error_handler - Routine to block error handler
2452 * @cmnd: Pointer to scsi_cmnd data structure.
2453 *
2454 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2455 **/
2456 static void
2457 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
2458 {
2459 struct Scsi_Host *shost = cmnd->device->host;
2460 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2461
2462 spin_lock_irq(shost->host_lock);
2463 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
2464 spin_unlock_irq(shost->host_lock);
2465 msleep(1000);
2466 spin_lock_irq(shost->host_lock);
2467 }
2468 spin_unlock_irq(shost->host_lock);
2469 return;
2470 }
2471
2472 /**
2473 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
2474 * @cmnd: Pointer to scsi_cmnd data structure.
2475 *
2476 * This routine aborts @cmnd pending in base driver.
2477 *
2478 * Return code :
2479 * 0x2003 - Error
2480 * 0x2002 - Success
2481 **/
2482 static int
2483 lpfc_abort_handler(struct scsi_cmnd *cmnd)
2484 {
2485 struct Scsi_Host *shost = cmnd->device->host;
2486 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2487 struct lpfc_hba *phba = vport->phba;
2488 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2489 struct lpfc_iocbq *iocb;
2490 struct lpfc_iocbq *abtsiocb;
2491 struct lpfc_scsi_buf *lpfc_cmd;
2492 IOCB_t *cmd, *icmd;
2493 int ret = SUCCESS;
2494 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
2495
2496 lpfc_block_error_handler(cmnd);
2497 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
2498 BUG_ON(!lpfc_cmd);
2499
2500 /*
2501 * If pCmd field of the corresponding lpfc_scsi_buf structure
2502 * points to a different SCSI command, then the driver has
2503 * already completed this command, but the midlayer did not
2504 * see the completion before the eh fired. Just return
2505 * SUCCESS.
2506 */
2507 iocb = &lpfc_cmd->cur_iocbq;
2508 if (lpfc_cmd->pCmd != cmnd)
2509 goto out;
2510
2511 BUG_ON(iocb->context1 != lpfc_cmd);
2512
2513 abtsiocb = lpfc_sli_get_iocbq(phba);
2514 if (abtsiocb == NULL) {
2515 ret = FAILED;
2516 goto out;
2517 }
2518
2519 /*
2520 * The scsi command can not be in txq and it is in flight because the
2521 * pCmd is still pointig at the SCSI command we have to abort. There
2522 * is no need to search the txcmplq. Just send an abort to the FW.
2523 */
2524
2525 cmd = &iocb->iocb;
2526 icmd = &abtsiocb->iocb;
2527 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2528 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2529 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2530
2531 icmd->ulpLe = 1;
2532 icmd->ulpClass = cmd->ulpClass;
2533 if (lpfc_is_link_up(phba))
2534 icmd->ulpCommand = CMD_ABORT_XRI_CN;
2535 else
2536 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
2537
2538 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2539 abtsiocb->vport = vport;
2540 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
2541 lpfc_sli_release_iocbq(phba, abtsiocb);
2542 ret = FAILED;
2543 goto out;
2544 }
2545
2546 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2547 lpfc_sli_poll_fcp_ring (phba);
2548
2549 lpfc_cmd->waitq = &waitq;
2550 /* Wait for abort to complete */
2551 wait_event_timeout(waitq,
2552 (lpfc_cmd->pCmd != cmnd),
2553 (2*vport->cfg_devloss_tmo*HZ));
2554
2555 spin_lock_irq(shost->host_lock);
2556 lpfc_cmd->waitq = NULL;
2557 spin_unlock_irq(shost->host_lock);
2558
2559 if (lpfc_cmd->pCmd == cmnd) {
2560 ret = FAILED;
2561 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2562 "0748 abort handler timed out waiting "
2563 "for abort to complete: ret %#x, ID %d, "
2564 "LUN %d, snum %#lx\n",
2565 ret, cmnd->device->id, cmnd->device->lun,
2566 cmnd->serial_number);
2567 }
2568
2569 out:
2570 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2571 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
2572 "LUN %d snum %#lx\n", ret, cmnd->device->id,
2573 cmnd->device->lun, cmnd->serial_number);
2574 return ret;
2575 }
2576
2577 /**
2578 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
2579 * @cmnd: Pointer to scsi_cmnd data structure.
2580 *
2581 * This routine does a device reset by sending a TARGET_RESET task management
2582 * command.
2583 *
2584 * Return code :
2585 * 0x2003 - Error
2586 * 0x2002 - Success
2587 **/
2588 static int
2589 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
2590 {
2591 struct Scsi_Host *shost = cmnd->device->host;
2592 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2593 struct lpfc_hba *phba = vport->phba;
2594 struct lpfc_scsi_buf *lpfc_cmd;
2595 struct lpfc_iocbq *iocbq, *iocbqrsp;
2596 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2597 struct lpfc_nodelist *pnode = rdata->pnode;
2598 unsigned long later;
2599 int ret = SUCCESS;
2600 int status;
2601 int cnt;
2602 struct lpfc_scsi_event_header scsi_event;
2603
2604 lpfc_block_error_handler(cmnd);
2605 /*
2606 * If target is not in a MAPPED state, delay the reset until
2607 * target is rediscovered or devloss timeout expires.
2608 */
2609 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2610 while (time_after(later, jiffies)) {
2611 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2612 return FAILED;
2613 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
2614 break;
2615 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
2616 rdata = cmnd->device->hostdata;
2617 if (!rdata)
2618 break;
2619 pnode = rdata->pnode;
2620 }
2621
2622 scsi_event.event_type = FC_REG_SCSI_EVENT;
2623 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
2624 scsi_event.lun = 0;
2625 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
2626 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
2627
2628 fc_host_post_vendor_event(shost,
2629 fc_get_event_number(),
2630 sizeof(scsi_event),
2631 (char *)&scsi_event,
2632 LPFC_NL_VENDOR_ID);
2633
2634 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
2635 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2636 "0721 LUN Reset rport "
2637 "failure: msec x%x rdata x%p\n",
2638 jiffies_to_msecs(jiffies - later), rdata);
2639 return FAILED;
2640 }
2641 lpfc_cmd = lpfc_get_scsi_buf(phba);
2642 if (lpfc_cmd == NULL)
2643 return FAILED;
2644 lpfc_cmd->timeout = 60;
2645 lpfc_cmd->rdata = rdata;
2646
2647 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
2648 cmnd->device->lun,
2649 FCP_TARGET_RESET);
2650 if (!status) {
2651 lpfc_release_scsi_buf(phba, lpfc_cmd);
2652 return FAILED;
2653 }
2654 iocbq = &lpfc_cmd->cur_iocbq;
2655
2656 /* get a buffer for this IOCB command response */
2657 iocbqrsp = lpfc_sli_get_iocbq(phba);
2658 if (iocbqrsp == NULL) {
2659 lpfc_release_scsi_buf(phba, lpfc_cmd);
2660 return FAILED;
2661 }
2662 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2663 "0703 Issue target reset to TGT %d LUN %d "
2664 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
2665 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
2666 status = lpfc_sli_issue_iocb_wait(phba,
2667 &phba->sli.ring[phba->sli.fcp_ring],
2668 iocbq, iocbqrsp, lpfc_cmd->timeout);
2669 if (status == IOCB_TIMEDOUT) {
2670 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2671 ret = TIMEOUT_ERROR;
2672 } else {
2673 if (status != IOCB_SUCCESS)
2674 ret = FAILED;
2675 lpfc_release_scsi_buf(phba, lpfc_cmd);
2676 }
2677 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2678 "0713 SCSI layer issued device reset (%d, %d) "
2679 "return x%x status x%x result x%x\n",
2680 cmnd->device->id, cmnd->device->lun, ret,
2681 iocbqrsp->iocb.ulpStatus,
2682 iocbqrsp->iocb.un.ulpWord[4]);
2683 lpfc_sli_release_iocbq(phba, iocbqrsp);
2684 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
2685 LPFC_CTX_TGT);
2686 if (cnt)
2687 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2688 cmnd->device->id, cmnd->device->lun,
2689 LPFC_CTX_TGT);
2690 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2691 while (time_after(later, jiffies) && cnt) {
2692 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2693 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
2694 cmnd->device->lun, LPFC_CTX_TGT);
2695 }
2696 if (cnt) {
2697 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2698 "0719 device reset I/O flush failure: "
2699 "cnt x%x\n", cnt);
2700 ret = FAILED;
2701 }
2702 return ret;
2703 }
2704
2705 /**
2706 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
2707 * @cmnd: Pointer to scsi_cmnd data structure.
2708 *
2709 * This routine does target reset to all target on @cmnd->device->host.
2710 *
2711 * Return Code:
2712 * 0x2003 - Error
2713 * 0x2002 - Success
2714 **/
2715 static int
2716 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2717 {
2718 struct Scsi_Host *shost = cmnd->device->host;
2719 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2720 struct lpfc_hba *phba = vport->phba;
2721 struct lpfc_nodelist *ndlp = NULL;
2722 int match;
2723 int ret = SUCCESS, status = SUCCESS, i;
2724 int cnt;
2725 struct lpfc_scsi_buf * lpfc_cmd;
2726 unsigned long later;
2727 struct lpfc_scsi_event_header scsi_event;
2728
2729 scsi_event.event_type = FC_REG_SCSI_EVENT;
2730 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
2731 scsi_event.lun = 0;
2732 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
2733 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
2734
2735 fc_host_post_vendor_event(shost,
2736 fc_get_event_number(),
2737 sizeof(scsi_event),
2738 (char *)&scsi_event,
2739 LPFC_NL_VENDOR_ID);
2740
2741 lpfc_block_error_handler(cmnd);
2742 /*
2743 * Since the driver manages a single bus device, reset all
2744 * targets known to the driver. Should any target reset
2745 * fail, this routine returns failure to the midlayer.
2746 */
2747 for (i = 0; i < LPFC_MAX_TARGET; i++) {
2748 /* Search for mapped node by target ID */
2749 match = 0;
2750 spin_lock_irq(shost->host_lock);
2751 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2752 if (!NLP_CHK_NODE_ACT(ndlp))
2753 continue;
2754 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
2755 ndlp->nlp_sid == i &&
2756 ndlp->rport) {
2757 match = 1;
2758 break;
2759 }
2760 }
2761 spin_unlock_irq(shost->host_lock);
2762 if (!match)
2763 continue;
2764 lpfc_cmd = lpfc_get_scsi_buf(phba);
2765 if (lpfc_cmd) {
2766 lpfc_cmd->timeout = 60;
2767 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
2768 cmnd->device->lun,
2769 ndlp->rport->dd_data);
2770 if (status != TIMEOUT_ERROR)
2771 lpfc_release_scsi_buf(phba, lpfc_cmd);
2772 }
2773 if (!lpfc_cmd || status != SUCCESS) {
2774 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2775 "0700 Bus Reset on target %d failed\n",
2776 i);
2777 ret = FAILED;
2778 }
2779 }
2780 /*
2781 * All outstanding txcmplq I/Os should have been aborted by
2782 * the targets. Unfortunately, some targets do not abide by
2783 * this forcing the driver to double check.
2784 */
2785 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
2786 if (cnt)
2787 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2788 0, 0, LPFC_CTX_HOST);
2789 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2790 while (time_after(later, jiffies) && cnt) {
2791 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2792 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
2793 }
2794 if (cnt) {
2795 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2796 "0715 Bus Reset I/O flush failure: "
2797 "cnt x%x left x%x\n", cnt, i);
2798 ret = FAILED;
2799 }
2800 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2801 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
2802 return ret;
2803 }
2804
2805 /**
2806 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
2807 * @sdev: Pointer to scsi_device.
2808 *
2809 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
2810 * globally available list of scsi buffers. This routine also makes sure scsi
2811 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
2812 * of scsi buffer exists for the lifetime of the driver.
2813 *
2814 * Return codes:
2815 * non-0 - Error
2816 * 0 - Success
2817 **/
2818 static int
2819 lpfc_slave_alloc(struct scsi_device *sdev)
2820 {
2821 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2822 struct lpfc_hba *phba = vport->phba;
2823 struct lpfc_scsi_buf *scsi_buf = NULL;
2824 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2825 uint32_t total = 0, i;
2826 uint32_t num_to_alloc = 0;
2827 unsigned long flags;
2828
2829 if (!rport || fc_remote_port_chkready(rport))
2830 return -ENXIO;
2831
2832 sdev->hostdata = rport->dd_data;
2833
2834 /*
2835 * Populate the cmds_per_lun count scsi_bufs into this host's globally
2836 * available list of scsi buffers. Don't allocate more than the
2837 * HBA limit conveyed to the midlayer via the host structure. The
2838 * formula accounts for the lun_queue_depth + error handlers + 1
2839 * extra. This list of scsi bufs exists for the lifetime of the driver.
2840 */
2841 total = phba->total_scsi_bufs;
2842 num_to_alloc = vport->cfg_lun_queue_depth + 2;
2843
2844 /* Allow some exchanges to be available always to complete discovery */
2845 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2846 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2847 "0704 At limitation of %d preallocated "
2848 "command buffers\n", total);
2849 return 0;
2850 /* Allow some exchanges to be available always to complete discovery */
2851 } else if (total + num_to_alloc >
2852 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2853 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2854 "0705 Allocation request of %d "
2855 "command buffers will exceed max of %d. "
2856 "Reducing allocation request to %d.\n",
2857 num_to_alloc, phba->cfg_hba_queue_depth,
2858 (phba->cfg_hba_queue_depth - total));
2859 num_to_alloc = phba->cfg_hba_queue_depth - total;
2860 }
2861
2862 for (i = 0; i < num_to_alloc; i++) {
2863 scsi_buf = lpfc_new_scsi_buf(vport);
2864 if (!scsi_buf) {
2865 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2866 "0706 Failed to allocate "
2867 "command buffer\n");
2868 break;
2869 }
2870
2871 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2872 phba->total_scsi_bufs++;
2873 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2874 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2875 }
2876 return 0;
2877 }
2878
2879 /**
2880 * lpfc_slave_configure - scsi_host_template slave_configure entry point
2881 * @sdev: Pointer to scsi_device.
2882 *
2883 * This routine configures following items
2884 * - Tag command queuing support for @sdev if supported.
2885 * - Dev loss time out value of fc_rport.
2886 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2887 *
2888 * Return codes:
2889 * 0 - Success
2890 **/
2891 static int
2892 lpfc_slave_configure(struct scsi_device *sdev)
2893 {
2894 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2895 struct lpfc_hba *phba = vport->phba;
2896 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2897
2898 if (sdev->tagged_supported)
2899 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
2900 else
2901 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
2902
2903 /*
2904 * Initialize the fc transport attributes for the target
2905 * containing this scsi device. Also note that the driver's
2906 * target pointer is stored in the starget_data for the
2907 * driver's sysfs entry point functions.
2908 */
2909 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
2910
2911 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2912 lpfc_sli_poll_fcp_ring(phba);
2913 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2914 lpfc_poll_rearm_timer(phba);
2915 }
2916
2917 return 0;
2918 }
2919
2920 /**
2921 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
2922 * @sdev: Pointer to scsi_device.
2923 *
2924 * This routine sets @sdev hostatdata filed to null.
2925 **/
2926 static void
2927 lpfc_slave_destroy(struct scsi_device *sdev)
2928 {
2929 sdev->hostdata = NULL;
2930 return;
2931 }
2932
2933
2934 struct scsi_host_template lpfc_template = {
2935 .module = THIS_MODULE,
2936 .name = LPFC_DRIVER_NAME,
2937 .info = lpfc_info,
2938 .queuecommand = lpfc_queuecommand,
2939 .eh_abort_handler = lpfc_abort_handler,
2940 .eh_device_reset_handler= lpfc_device_reset_handler,
2941 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2942 .slave_alloc = lpfc_slave_alloc,
2943 .slave_configure = lpfc_slave_configure,
2944 .slave_destroy = lpfc_slave_destroy,
2945 .scan_finished = lpfc_scan_finished,
2946 .this_id = -1,
2947 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
2948 .cmd_per_lun = LPFC_CMD_PER_LUN,
2949 .use_clustering = ENABLE_CLUSTERING,
2950 .shost_attrs = lpfc_hba_attrs,
2951 .max_sectors = 0xFFFF,
2952 };
2953
2954 struct scsi_host_template lpfc_vport_template = {
2955 .module = THIS_MODULE,
2956 .name = LPFC_DRIVER_NAME,
2957 .info = lpfc_info,
2958 .queuecommand = lpfc_queuecommand,
2959 .eh_abort_handler = lpfc_abort_handler,
2960 .eh_device_reset_handler= lpfc_device_reset_handler,
2961 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2962 .slave_alloc = lpfc_slave_alloc,
2963 .slave_configure = lpfc_slave_configure,
2964 .slave_destroy = lpfc_slave_destroy,
2965 .scan_finished = lpfc_scan_finished,
2966 .this_id = -1,
2967 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
2968 .cmd_per_lun = LPFC_CMD_PER_LUN,
2969 .use_clustering = ENABLE_CLUSTERING,
2970 .shost_attrs = lpfc_vport_attrs,
2971 .max_sectors = 0xFFFF,
2972 };