1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/crc-t10dif.h>
30 #include <net/checksum.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_eh.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_transport_fc.h>
39 #include "lpfc_version.h"
43 #include "lpfc_sli4.h"
45 #include "lpfc_disc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_logmsg.h"
49 #include "lpfc_crtn.h"
50 #include "lpfc_vport.h"
52 #define LPFC_RESET_WAIT 2
53 #define LPFC_ABORT_WAIT 2
55 int _dump_buf_done
= 1;
57 static char *dif_op_str
[] = {
67 struct scsi_dif_tuple
{
68 __be16 guard_tag
; /* Checksum */
69 __be16 app_tag
; /* Opaque storage */
70 __be32 ref_tag
; /* Target LBA or indirect LBA */
73 static struct lpfc_rport_data
*
74 lpfc_rport_data_from_scsi_device(struct scsi_device
*sdev
)
76 struct lpfc_vport
*vport
= (struct lpfc_vport
*)sdev
->host
->hostdata
;
78 if (vport
->phba
->cfg_fof
)
79 return ((struct lpfc_device_data
*)sdev
->hostdata
)->rport_data
;
81 return (struct lpfc_rport_data
*)sdev
->hostdata
;
85 lpfc_release_scsi_buf_s4(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
);
87 lpfc_release_scsi_buf_s3(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
);
89 lpfc_prot_group_type(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
);
92 lpfc_debug_save_data(struct lpfc_hba
*phba
, struct scsi_cmnd
*cmnd
)
95 struct scatterlist
*sgde
= scsi_sglist(cmnd
);
97 if (!_dump_buf_data
) {
98 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
99 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
106 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
107 "9051 BLKGRD: ERROR: data scatterlist is null\n");
111 dst
= (void *) _dump_buf_data
;
114 memcpy(dst
, src
, sgde
->length
);
116 sgde
= sg_next(sgde
);
121 lpfc_debug_save_dif(struct lpfc_hba
*phba
, struct scsi_cmnd
*cmnd
)
124 struct scatterlist
*sgde
= scsi_prot_sglist(cmnd
);
126 if (!_dump_buf_dif
) {
127 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
128 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
134 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
135 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
142 memcpy(dst
, src
, sgde
->length
);
144 sgde
= sg_next(sgde
);
148 static inline unsigned
149 lpfc_cmd_blksize(struct scsi_cmnd
*sc
)
151 return sc
->device
->sector_size
;
154 #define LPFC_CHECK_PROTECT_GUARD 1
155 #define LPFC_CHECK_PROTECT_REF 2
156 static inline unsigned
157 lpfc_cmd_protect(struct scsi_cmnd
*sc
, int flag
)
162 static inline unsigned
163 lpfc_cmd_guard_csum(struct scsi_cmnd
*sc
)
165 if (lpfc_prot_group_type(NULL
, sc
) == LPFC_PG_TYPE_NO_DIF
)
167 if (scsi_host_get_guard(sc
->device
->host
) == SHOST_DIX_GUARD_IP
)
173 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
174 * @phba: Pointer to HBA object.
175 * @lpfc_cmd: lpfc scsi command object pointer.
177 * This function is called from the lpfc_prep_task_mgmt_cmd function to
178 * set the last bit in the response sge entry.
181 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba
*phba
,
182 struct lpfc_scsi_buf
*lpfc_cmd
)
184 struct sli4_sge
*sgl
= (struct sli4_sge
*)lpfc_cmd
->fcp_bpl
;
187 sgl
->word2
= le32_to_cpu(sgl
->word2
);
188 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
189 sgl
->word2
= cpu_to_le32(sgl
->word2
);
194 * lpfc_update_stats - Update statistical data for the command completion
195 * @phba: Pointer to HBA object.
196 * @lpfc_cmd: lpfc scsi command object pointer.
198 * This function is called when there is a command completion and this
199 * function updates the statistical data for the command completion.
202 lpfc_update_stats(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
204 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
205 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
206 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
208 struct Scsi_Host
*shost
= cmd
->device
->host
;
209 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
210 unsigned long latency
;
216 latency
= jiffies_to_msecs((long)jiffies
- (long)lpfc_cmd
->start_time
);
218 spin_lock_irqsave(shost
->host_lock
, flags
);
219 if (!vport
->stat_data_enabled
||
220 vport
->stat_data_blocked
||
223 (phba
->bucket_type
== LPFC_NO_BUCKET
)) {
224 spin_unlock_irqrestore(shost
->host_lock
, flags
);
228 if (phba
->bucket_type
== LPFC_LINEAR_BUCKET
) {
229 i
= (latency
+ phba
->bucket_step
- 1 - phba
->bucket_base
)/
231 /* check array subscript bounds */
234 else if (i
>= LPFC_MAX_BUCKET_COUNT
)
235 i
= LPFC_MAX_BUCKET_COUNT
- 1;
237 for (i
= 0; i
< LPFC_MAX_BUCKET_COUNT
-1; i
++)
238 if (latency
<= (phba
->bucket_base
+
239 ((1<<i
)*phba
->bucket_step
)))
243 pnode
->lat_data
[i
].cmd_count
++;
244 spin_unlock_irqrestore(shost
->host_lock
, flags
);
248 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
249 * @phba: The Hba for which this call is being executed.
251 * This routine is called when there is resource error in driver or firmware.
252 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
253 * posts at most 1 event each second. This routine wakes up worker thread of
254 * @phba to process WORKER_RAM_DOWN_EVENT event.
256 * This routine should be called with no lock held.
259 lpfc_rampdown_queue_depth(struct lpfc_hba
*phba
)
263 unsigned long expires
;
265 spin_lock_irqsave(&phba
->hbalock
, flags
);
266 atomic_inc(&phba
->num_rsrc_err
);
267 phba
->last_rsrc_error_time
= jiffies
;
269 expires
= phba
->last_ramp_down_time
+ QUEUE_RAMP_DOWN_INTERVAL
;
270 if (time_after(expires
, jiffies
)) {
271 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
275 phba
->last_ramp_down_time
= jiffies
;
277 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
279 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
280 evt_posted
= phba
->pport
->work_port_events
& WORKER_RAMP_DOWN_QUEUE
;
282 phba
->pport
->work_port_events
|= WORKER_RAMP_DOWN_QUEUE
;
283 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
286 lpfc_worker_wake_up(phba
);
291 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
292 * @phba: The Hba for which this call is being executed.
294 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
295 * thread.This routine reduces queue depth for all scsi device on each vport
296 * associated with @phba.
299 lpfc_ramp_down_queue_handler(struct lpfc_hba
*phba
)
301 struct lpfc_vport
**vports
;
302 struct Scsi_Host
*shost
;
303 struct scsi_device
*sdev
;
304 unsigned long new_queue_depth
;
305 unsigned long num_rsrc_err
, num_cmd_success
;
308 num_rsrc_err
= atomic_read(&phba
->num_rsrc_err
);
309 num_cmd_success
= atomic_read(&phba
->num_cmd_success
);
312 * The error and success command counters are global per
313 * driver instance. If another handler has already
314 * operated on this error event, just exit.
316 if (num_rsrc_err
== 0)
319 vports
= lpfc_create_vport_work_array(phba
);
321 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
322 shost
= lpfc_shost_from_vport(vports
[i
]);
323 shost_for_each_device(sdev
, shost
) {
325 sdev
->queue_depth
* num_rsrc_err
/
326 (num_rsrc_err
+ num_cmd_success
);
327 if (!new_queue_depth
)
328 new_queue_depth
= sdev
->queue_depth
- 1;
330 new_queue_depth
= sdev
->queue_depth
-
332 scsi_change_queue_depth(sdev
, new_queue_depth
);
335 lpfc_destroy_vport_work_array(phba
, vports
);
336 atomic_set(&phba
->num_rsrc_err
, 0);
337 atomic_set(&phba
->num_cmd_success
, 0);
341 * lpfc_scsi_dev_block - set all scsi hosts to block state
342 * @phba: Pointer to HBA context object.
344 * This function walks vport list and set each SCSI host to block state
345 * by invoking fc_remote_port_delete() routine. This function is invoked
346 * with EEH when device's PCI slot has been permanently disabled.
349 lpfc_scsi_dev_block(struct lpfc_hba
*phba
)
351 struct lpfc_vport
**vports
;
352 struct Scsi_Host
*shost
;
353 struct scsi_device
*sdev
;
354 struct fc_rport
*rport
;
357 vports
= lpfc_create_vport_work_array(phba
);
359 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
360 shost
= lpfc_shost_from_vport(vports
[i
]);
361 shost_for_each_device(sdev
, shost
) {
362 rport
= starget_to_rport(scsi_target(sdev
));
363 fc_remote_port_delete(rport
);
366 lpfc_destroy_vport_work_array(phba
, vports
);
370 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
371 * @vport: The virtual port for which this call being executed.
372 * @num_to_allocate: The requested number of buffers to allocate.
374 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
375 * the scsi buffer contains all the necessary information needed to initiate
376 * a SCSI I/O. The non-DMAable buffer region contains information to build
377 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
378 * and the initial BPL. In addition to allocating memory, the FCP CMND and
379 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
382 * int - number of scsi buffers that were allocated.
383 * 0 = failure, less than num_to_alloc is a partial failure.
386 lpfc_new_scsi_buf_s3(struct lpfc_vport
*vport
, int num_to_alloc
)
388 struct lpfc_hba
*phba
= vport
->phba
;
389 struct lpfc_scsi_buf
*psb
;
390 struct ulp_bde64
*bpl
;
392 dma_addr_t pdma_phys_fcp_cmd
;
393 dma_addr_t pdma_phys_fcp_rsp
;
394 dma_addr_t pdma_phys_bpl
;
398 bpl_size
= phba
->cfg_sg_dma_buf_size
-
399 (sizeof(struct fcp_cmnd
) + sizeof(struct fcp_rsp
));
401 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
402 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
403 num_to_alloc
, phba
->cfg_sg_dma_buf_size
,
404 (int)sizeof(struct fcp_cmnd
),
405 (int)sizeof(struct fcp_rsp
), bpl_size
);
407 for (bcnt
= 0; bcnt
< num_to_alloc
; bcnt
++) {
408 psb
= kzalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
413 * Get memory from the pci pool to map the virt space to pci
414 * bus space for an I/O. The DMA buffer includes space for the
415 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
416 * necessary to support the sg_tablesize.
418 psb
->data
= pci_pool_zalloc(phba
->lpfc_sg_dma_buf_pool
,
419 GFP_KERNEL
, &psb
->dma_handle
);
426 /* Allocate iotag for psb->cur_iocbq. */
427 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
429 pci_pool_free(phba
->lpfc_sg_dma_buf_pool
,
430 psb
->data
, psb
->dma_handle
);
434 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
436 psb
->fcp_cmnd
= psb
->data
;
437 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
438 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
439 sizeof(struct fcp_rsp
);
441 /* Initialize local short-hand pointers. */
443 pdma_phys_fcp_cmd
= psb
->dma_handle
;
444 pdma_phys_fcp_rsp
= psb
->dma_handle
+ sizeof(struct fcp_cmnd
);
445 pdma_phys_bpl
= psb
->dma_handle
+ sizeof(struct fcp_cmnd
) +
446 sizeof(struct fcp_rsp
);
449 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
450 * are sg list bdes. Initialize the first two and leave the
451 * rest for queuecommand.
453 bpl
[0].addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd
));
454 bpl
[0].addrLow
= le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd
));
455 bpl
[0].tus
.f
.bdeSize
= sizeof(struct fcp_cmnd
);
456 bpl
[0].tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
457 bpl
[0].tus
.w
= le32_to_cpu(bpl
[0].tus
.w
);
459 /* Setup the physical region for the FCP RSP */
460 bpl
[1].addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp
));
461 bpl
[1].addrLow
= le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp
));
462 bpl
[1].tus
.f
.bdeSize
= sizeof(struct fcp_rsp
);
463 bpl
[1].tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
464 bpl
[1].tus
.w
= le32_to_cpu(bpl
[1].tus
.w
);
467 * Since the IOCB for the FCP I/O is built into this
468 * lpfc_scsi_buf, initialize it with all known data now.
470 iocb
= &psb
->cur_iocbq
.iocb
;
471 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
472 if ((phba
->sli_rev
== 3) &&
473 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
)) {
474 /* fill in immediate fcp command BDE */
475 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDE_IMMED
;
476 iocb
->un
.fcpi64
.bdl
.bdeSize
= sizeof(struct fcp_cmnd
);
477 iocb
->un
.fcpi64
.bdl
.addrLow
= offsetof(IOCB_t
,
479 iocb
->un
.fcpi64
.bdl
.addrHigh
= 0;
480 iocb
->ulpBdeCount
= 0;
482 /* fill in response BDE */
483 iocb
->unsli3
.fcp_ext
.rbde
.tus
.f
.bdeFlags
=
485 iocb
->unsli3
.fcp_ext
.rbde
.tus
.f
.bdeSize
=
486 sizeof(struct fcp_rsp
);
487 iocb
->unsli3
.fcp_ext
.rbde
.addrLow
=
488 putPaddrLow(pdma_phys_fcp_rsp
);
489 iocb
->unsli3
.fcp_ext
.rbde
.addrHigh
=
490 putPaddrHigh(pdma_phys_fcp_rsp
);
492 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
493 iocb
->un
.fcpi64
.bdl
.bdeSize
=
494 (2 * sizeof(struct ulp_bde64
));
495 iocb
->un
.fcpi64
.bdl
.addrLow
=
496 putPaddrLow(pdma_phys_bpl
);
497 iocb
->un
.fcpi64
.bdl
.addrHigh
=
498 putPaddrHigh(pdma_phys_bpl
);
499 iocb
->ulpBdeCount
= 1;
502 iocb
->ulpClass
= CLASS3
;
503 psb
->status
= IOSTAT_SUCCESS
;
504 /* Put it back into the SCSI buffer list */
505 psb
->cur_iocbq
.context1
= psb
;
506 lpfc_release_scsi_buf_s3(phba
, psb
);
514 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
515 * @vport: pointer to lpfc vport data structure.
517 * This routine is invoked by the vport cleanup for deletions and the cleanup
518 * for an ndlp on removal.
521 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport
*vport
)
523 struct lpfc_hba
*phba
= vport
->phba
;
524 struct lpfc_scsi_buf
*psb
, *next_psb
;
525 unsigned long iflag
= 0;
527 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
))
529 spin_lock_irqsave(&phba
->hbalock
, iflag
);
530 spin_lock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
531 list_for_each_entry_safe(psb
, next_psb
,
532 &phba
->sli4_hba
.lpfc_abts_scsi_buf_list
, list
) {
533 if (psb
->rdata
&& psb
->rdata
->pnode
534 && psb
->rdata
->pnode
->vport
== vport
)
537 spin_unlock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
538 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
542 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
543 * @phba: pointer to lpfc hba data structure.
544 * @axri: pointer to the fcp xri abort wcqe structure.
546 * This routine is invoked by the worker thread to process a SLI4 fast-path
550 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba
*phba
,
551 struct sli4_wcqe_xri_aborted
*axri
)
553 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
554 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
555 struct lpfc_scsi_buf
*psb
, *next_psb
;
556 unsigned long iflag
= 0;
557 struct lpfc_iocbq
*iocbq
;
559 struct lpfc_nodelist
*ndlp
;
561 struct lpfc_sli_ring
*pring
= phba
->sli4_hba
.els_wq
->pring
;
563 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_FCP
))
565 spin_lock_irqsave(&phba
->hbalock
, iflag
);
566 spin_lock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
567 list_for_each_entry_safe(psb
, next_psb
,
568 &phba
->sli4_hba
.lpfc_abts_scsi_buf_list
, list
) {
569 if (psb
->cur_iocbq
.sli4_xritag
== xri
) {
570 list_del(&psb
->list
);
572 psb
->status
= IOSTAT_SUCCESS
;
574 &phba
->sli4_hba
.abts_scsi_buf_list_lock
);
575 if (psb
->rdata
&& psb
->rdata
->pnode
)
576 ndlp
= psb
->rdata
->pnode
;
580 rrq_empty
= list_empty(&phba
->active_rrq_list
);
581 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
583 lpfc_set_rrq_active(phba
, ndlp
,
584 psb
->cur_iocbq
.sli4_lxritag
, rxid
, 1);
585 lpfc_sli4_abts_err_handler(phba
, ndlp
, axri
);
587 lpfc_release_scsi_buf_s4(phba
, psb
);
589 lpfc_worker_wake_up(phba
);
593 spin_unlock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
594 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
595 iocbq
= phba
->sli
.iocbq_lookup
[i
];
597 if (!(iocbq
->iocb_flag
& LPFC_IO_FCP
) ||
598 (iocbq
->iocb_flag
& LPFC_IO_LIBDFC
))
600 if (iocbq
->sli4_xritag
!= xri
)
602 psb
= container_of(iocbq
, struct lpfc_scsi_buf
, cur_iocbq
);
604 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
605 if (!list_empty(&pring
->txq
))
606 lpfc_worker_wake_up(phba
);
610 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
614 * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
615 * @phba: pointer to lpfc hba data structure.
616 * @post_sblist: pointer to the scsi buffer list.
618 * This routine walks a list of scsi buffers that was passed in. It attempts
619 * to construct blocks of scsi buffer sgls which contains contiguous xris and
620 * uses the non-embedded SGL block post mailbox commands to post to the port.
621 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
622 * embedded SGL post mailbox command for posting. The @post_sblist passed in
623 * must be local list, thus no lock is needed when manipulate the list.
625 * Returns: 0 = failure, non-zero number of successfully posted buffers.
628 lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba
*phba
,
629 struct list_head
*post_sblist
, int sb_count
)
631 struct lpfc_scsi_buf
*psb
, *psb_next
;
632 int status
, sgl_size
;
633 int post_cnt
= 0, block_cnt
= 0, num_posting
= 0, num_posted
= 0;
634 dma_addr_t pdma_phys_bpl1
;
635 int last_xritag
= NO_XRI
;
636 LIST_HEAD(prep_sblist
);
637 LIST_HEAD(blck_sblist
);
638 LIST_HEAD(scsi_sblist
);
644 sgl_size
= phba
->cfg_sg_dma_buf_size
-
645 (sizeof(struct fcp_cmnd
) + sizeof(struct fcp_rsp
));
647 list_for_each_entry_safe(psb
, psb_next
, post_sblist
, list
) {
648 list_del_init(&psb
->list
);
650 if ((last_xritag
!= NO_XRI
) &&
651 (psb
->cur_iocbq
.sli4_xritag
!= last_xritag
+ 1)) {
652 /* a hole in xri block, form a sgl posting block */
653 list_splice_init(&prep_sblist
, &blck_sblist
);
654 post_cnt
= block_cnt
- 1;
655 /* prepare list for next posting block */
656 list_add_tail(&psb
->list
, &prep_sblist
);
659 /* prepare list for next posting block */
660 list_add_tail(&psb
->list
, &prep_sblist
);
661 /* enough sgls for non-embed sgl mbox command */
662 if (block_cnt
== LPFC_NEMBED_MBOX_SGL_CNT
) {
663 list_splice_init(&prep_sblist
, &blck_sblist
);
664 post_cnt
= block_cnt
;
669 last_xritag
= psb
->cur_iocbq
.sli4_xritag
;
671 /* end of repost sgl list condition for SCSI buffers */
672 if (num_posting
== sb_count
) {
674 /* last sgl posting block */
675 list_splice_init(&prep_sblist
, &blck_sblist
);
676 post_cnt
= block_cnt
;
677 } else if (block_cnt
== 1) {
678 /* last single sgl with non-contiguous xri */
679 if (sgl_size
> SGL_PAGE_SIZE
)
680 pdma_phys_bpl1
= psb
->dma_phys_bpl
+
684 status
= lpfc_sli4_post_sgl(phba
,
687 psb
->cur_iocbq
.sli4_xritag
);
689 /* failure, put on abort scsi list */
692 /* success, put on SCSI buffer list */
694 psb
->status
= IOSTAT_SUCCESS
;
697 /* success, put on SCSI buffer sgl list */
698 list_add_tail(&psb
->list
, &scsi_sblist
);
702 /* continue until a nembed page worth of sgls */
706 /* post block of SCSI buffer list sgls */
707 status
= lpfc_sli4_post_scsi_sgl_block(phba
, &blck_sblist
,
710 /* don't reset xirtag due to hole in xri block */
712 last_xritag
= NO_XRI
;
714 /* reset SCSI buffer post count for next round of posting */
717 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
718 while (!list_empty(&blck_sblist
)) {
719 list_remove_head(&blck_sblist
, psb
,
720 struct lpfc_scsi_buf
, list
);
722 /* failure, put on abort scsi list */
725 /* success, put on SCSI buffer list */
727 psb
->status
= IOSTAT_SUCCESS
;
730 list_add_tail(&psb
->list
, &scsi_sblist
);
733 /* Push SCSI buffers with sgl posted to the availble list */
734 while (!list_empty(&scsi_sblist
)) {
735 list_remove_head(&scsi_sblist
, psb
,
736 struct lpfc_scsi_buf
, list
);
737 lpfc_release_scsi_buf_s4(phba
, psb
);
743 * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
744 * @phba: pointer to lpfc hba data structure.
746 * This routine walks the list of scsi buffers that have been allocated and
747 * repost them to the port by using SGL block post. This is needed after a
748 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
749 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
750 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
752 * Returns: 0 = success, non-zero failure.
755 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba
*phba
)
757 LIST_HEAD(post_sblist
);
758 int num_posted
, rc
= 0;
760 /* get all SCSI buffers need to repost to a local list */
761 spin_lock_irq(&phba
->scsi_buf_list_get_lock
);
762 spin_lock(&phba
->scsi_buf_list_put_lock
);
763 list_splice_init(&phba
->lpfc_scsi_buf_list_get
, &post_sblist
);
764 list_splice(&phba
->lpfc_scsi_buf_list_put
, &post_sblist
);
765 spin_unlock(&phba
->scsi_buf_list_put_lock
);
766 spin_unlock_irq(&phba
->scsi_buf_list_get_lock
);
768 /* post the list of scsi buffer sgls to port if available */
769 if (!list_empty(&post_sblist
)) {
770 num_posted
= lpfc_sli4_post_scsi_sgl_list(phba
, &post_sblist
,
771 phba
->sli4_hba
.scsi_xri_cnt
);
772 /* failed to post any scsi buffer, return error */
780 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
781 * @vport: The virtual port for which this call being executed.
782 * @num_to_allocate: The requested number of buffers to allocate.
784 * This routine allocates scsi buffers for device with SLI-4 interface spec,
785 * the scsi buffer contains all the necessary information needed to initiate
786 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
787 * them on a list, it post them to the port by using SGL block post.
790 * int - number of scsi buffers that were allocated and posted.
791 * 0 = failure, less than num_to_alloc is a partial failure.
794 lpfc_new_scsi_buf_s4(struct lpfc_vport
*vport
, int num_to_alloc
)
796 struct lpfc_hba
*phba
= vport
->phba
;
797 struct lpfc_scsi_buf
*psb
;
798 struct sli4_sge
*sgl
;
800 dma_addr_t pdma_phys_fcp_cmd
;
801 dma_addr_t pdma_phys_fcp_rsp
;
802 dma_addr_t pdma_phys_bpl
;
803 uint16_t iotag
, lxri
= 0;
804 int bcnt
, num_posted
, sgl_size
;
805 LIST_HEAD(prep_sblist
);
806 LIST_HEAD(post_sblist
);
807 LIST_HEAD(scsi_sblist
);
809 sgl_size
= phba
->cfg_sg_dma_buf_size
-
810 (sizeof(struct fcp_cmnd
) + sizeof(struct fcp_rsp
));
812 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
813 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
814 num_to_alloc
, phba
->cfg_sg_dma_buf_size
, sgl_size
,
815 (int)sizeof(struct fcp_cmnd
),
816 (int)sizeof(struct fcp_rsp
));
818 for (bcnt
= 0; bcnt
< num_to_alloc
; bcnt
++) {
819 psb
= kzalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
823 * Get memory from the pci pool to map the virt space to
824 * pci bus space for an I/O. The DMA buffer includes space
825 * for the struct fcp_cmnd, struct fcp_rsp and the number
826 * of bde's necessary to support the sg_tablesize.
828 psb
->data
= pci_pool_zalloc(phba
->lpfc_sg_dma_buf_pool
,
829 GFP_KERNEL
, &psb
->dma_handle
);
836 * 4K Page alignment is CRITICAL to BlockGuard, double check
839 if (phba
->cfg_enable_bg
&& (((unsigned long)(psb
->data
) &
840 (unsigned long)(SLI4_PAGE_SIZE
- 1)) != 0)) {
841 pci_pool_free(phba
->lpfc_sg_dma_buf_pool
,
842 psb
->data
, psb
->dma_handle
);
848 lxri
= lpfc_sli4_next_xritag(phba
);
849 if (lxri
== NO_XRI
) {
850 pci_pool_free(phba
->lpfc_sg_dma_buf_pool
,
851 psb
->data
, psb
->dma_handle
);
856 /* Allocate iotag for psb->cur_iocbq. */
857 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
859 pci_pool_free(phba
->lpfc_sg_dma_buf_pool
,
860 psb
->data
, psb
->dma_handle
);
862 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
863 "3368 Failed to allocate IOTAG for"
864 " XRI:0x%x\n", lxri
);
865 lpfc_sli4_free_xri(phba
, lxri
);
868 psb
->cur_iocbq
.sli4_lxritag
= lxri
;
869 psb
->cur_iocbq
.sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
870 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
871 psb
->fcp_bpl
= psb
->data
;
872 psb
->fcp_cmnd
= (psb
->data
+ sgl_size
);
873 psb
->fcp_rsp
= (struct fcp_rsp
*)((uint8_t *)psb
->fcp_cmnd
+
874 sizeof(struct fcp_cmnd
));
876 /* Initialize local short-hand pointers. */
877 sgl
= (struct sli4_sge
*)psb
->fcp_bpl
;
878 pdma_phys_bpl
= psb
->dma_handle
;
879 pdma_phys_fcp_cmd
= (psb
->dma_handle
+ sgl_size
);
880 pdma_phys_fcp_rsp
= pdma_phys_fcp_cmd
+ sizeof(struct fcp_cmnd
);
883 * The first two bdes are the FCP_CMD and FCP_RSP.
884 * The balance are sg list bdes. Initialize the
885 * first two and leave the rest for queuecommand.
887 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd
));
888 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd
));
889 sgl
->word2
= le32_to_cpu(sgl
->word2
);
890 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
891 sgl
->word2
= cpu_to_le32(sgl
->word2
);
892 sgl
->sge_len
= cpu_to_le32(sizeof(struct fcp_cmnd
));
895 /* Setup the physical region for the FCP RSP */
896 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp
));
897 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp
));
898 sgl
->word2
= le32_to_cpu(sgl
->word2
);
899 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
900 sgl
->word2
= cpu_to_le32(sgl
->word2
);
901 sgl
->sge_len
= cpu_to_le32(sizeof(struct fcp_rsp
));
904 * Since the IOCB for the FCP I/O is built into this
905 * lpfc_scsi_buf, initialize it with all known data now.
907 iocb
= &psb
->cur_iocbq
.iocb
;
908 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
909 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDE_64
;
910 /* setting the BLP size to 2 * sizeof BDE may not be correct.
911 * We are setting the bpl to point to out sgl. An sgl's
912 * entries are 16 bytes, a bpl entries are 12 bytes.
914 iocb
->un
.fcpi64
.bdl
.bdeSize
= sizeof(struct fcp_cmnd
);
915 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys_fcp_cmd
);
916 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys_fcp_cmd
);
917 iocb
->ulpBdeCount
= 1;
919 iocb
->ulpClass
= CLASS3
;
920 psb
->cur_iocbq
.context1
= psb
;
921 psb
->dma_phys_bpl
= pdma_phys_bpl
;
923 /* add the scsi buffer to a post list */
924 list_add_tail(&psb
->list
, &post_sblist
);
925 spin_lock_irq(&phba
->scsi_buf_list_get_lock
);
926 phba
->sli4_hba
.scsi_xri_cnt
++;
927 spin_unlock_irq(&phba
->scsi_buf_list_get_lock
);
929 lpfc_printf_log(phba
, KERN_INFO
, LOG_BG
| LOG_FCP
,
930 "3021 Allocate %d out of %d requested new SCSI "
931 "buffers\n", bcnt
, num_to_alloc
);
933 /* post the list of scsi buffer sgls to port if available */
934 if (!list_empty(&post_sblist
))
935 num_posted
= lpfc_sli4_post_scsi_sgl_list(phba
,
944 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
945 * @vport: The virtual port for which this call being executed.
946 * @num_to_allocate: The requested number of buffers to allocate.
948 * This routine wraps the actual SCSI buffer allocator function pointer from
949 * the lpfc_hba struct.
952 * int - number of scsi buffers that were allocated.
953 * 0 = failure, less than num_to_alloc is a partial failure.
956 lpfc_new_scsi_buf(struct lpfc_vport
*vport
, int num_to_alloc
)
958 return vport
->phba
->lpfc_new_scsi_buf(vport
, num_to_alloc
);
962 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
963 * @phba: The HBA for which this call is being executed.
965 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
966 * and returns to caller.
970 * Pointer to lpfc_scsi_buf - Success
972 static struct lpfc_scsi_buf
*
973 lpfc_get_scsi_buf_s3(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
975 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
976 struct list_head
*scsi_buf_list_get
= &phba
->lpfc_scsi_buf_list_get
;
977 unsigned long iflag
= 0;
979 spin_lock_irqsave(&phba
->scsi_buf_list_get_lock
, iflag
);
980 list_remove_head(scsi_buf_list_get
, lpfc_cmd
, struct lpfc_scsi_buf
,
983 spin_lock(&phba
->scsi_buf_list_put_lock
);
984 list_splice(&phba
->lpfc_scsi_buf_list_put
,
985 &phba
->lpfc_scsi_buf_list_get
);
986 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_put
);
987 list_remove_head(scsi_buf_list_get
, lpfc_cmd
,
988 struct lpfc_scsi_buf
, list
);
989 spin_unlock(&phba
->scsi_buf_list_put_lock
);
991 spin_unlock_irqrestore(&phba
->scsi_buf_list_get_lock
, iflag
);
995 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
996 * @phba: The HBA for which this call is being executed.
998 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
999 * and returns to caller.
1003 * Pointer to lpfc_scsi_buf - Success
1005 static struct lpfc_scsi_buf
*
1006 lpfc_get_scsi_buf_s4(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
1008 struct lpfc_scsi_buf
*lpfc_cmd
, *lpfc_cmd_next
;
1009 unsigned long iflag
= 0;
1012 spin_lock_irqsave(&phba
->scsi_buf_list_get_lock
, iflag
);
1013 list_for_each_entry_safe(lpfc_cmd
, lpfc_cmd_next
,
1014 &phba
->lpfc_scsi_buf_list_get
, list
) {
1015 if (lpfc_test_rrq_active(phba
, ndlp
,
1016 lpfc_cmd
->cur_iocbq
.sli4_lxritag
))
1018 list_del(&lpfc_cmd
->list
);
1023 spin_lock(&phba
->scsi_buf_list_put_lock
);
1024 list_splice(&phba
->lpfc_scsi_buf_list_put
,
1025 &phba
->lpfc_scsi_buf_list_get
);
1026 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_put
);
1027 spin_unlock(&phba
->scsi_buf_list_put_lock
);
1028 list_for_each_entry_safe(lpfc_cmd
, lpfc_cmd_next
,
1029 &phba
->lpfc_scsi_buf_list_get
, list
) {
1030 if (lpfc_test_rrq_active(
1031 phba
, ndlp
, lpfc_cmd
->cur_iocbq
.sli4_lxritag
))
1033 list_del(&lpfc_cmd
->list
);
1038 spin_unlock_irqrestore(&phba
->scsi_buf_list_get_lock
, iflag
);
1044 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1045 * @phba: The HBA for which this call is being executed.
1047 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1048 * and returns to caller.
1052 * Pointer to lpfc_scsi_buf - Success
1054 static struct lpfc_scsi_buf
*
1055 lpfc_get_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
1057 return phba
->lpfc_get_scsi_buf(phba
, ndlp
);
1061 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1062 * @phba: The Hba for which this call is being executed.
1063 * @psb: The scsi buffer which is being released.
1065 * This routine releases @psb scsi buffer by adding it to tail of @phba
1066 * lpfc_scsi_buf_list list.
1069 lpfc_release_scsi_buf_s3(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
1071 unsigned long iflag
= 0;
1074 psb
->nonsg_phys
= 0;
1075 psb
->prot_seg_cnt
= 0;
1077 spin_lock_irqsave(&phba
->scsi_buf_list_put_lock
, iflag
);
1079 psb
->cur_iocbq
.iocb_flag
= LPFC_IO_FCP
;
1080 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list_put
);
1081 spin_unlock_irqrestore(&phba
->scsi_buf_list_put_lock
, iflag
);
1085 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1086 * @phba: The Hba for which this call is being executed.
1087 * @psb: The scsi buffer which is being released.
1089 * This routine releases @psb scsi buffer by adding it to tail of @phba
1090 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1091 * and cannot be reused for at least RA_TOV amount of time if it was
1095 lpfc_release_scsi_buf_s4(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
1097 unsigned long iflag
= 0;
1100 psb
->nonsg_phys
= 0;
1101 psb
->prot_seg_cnt
= 0;
1103 if (psb
->exch_busy
) {
1104 spin_lock_irqsave(&phba
->sli4_hba
.abts_scsi_buf_list_lock
,
1107 list_add_tail(&psb
->list
,
1108 &phba
->sli4_hba
.lpfc_abts_scsi_buf_list
);
1109 spin_unlock_irqrestore(&phba
->sli4_hba
.abts_scsi_buf_list_lock
,
1113 psb
->cur_iocbq
.iocb_flag
= LPFC_IO_FCP
;
1114 spin_lock_irqsave(&phba
->scsi_buf_list_put_lock
, iflag
);
1115 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list_put
);
1116 spin_unlock_irqrestore(&phba
->scsi_buf_list_put_lock
, iflag
);
1121 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1122 * @phba: The Hba for which this call is being executed.
1123 * @psb: The scsi buffer which is being released.
1125 * This routine releases @psb scsi buffer by adding it to tail of @phba
1126 * lpfc_scsi_buf_list list.
1129 lpfc_release_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
1132 phba
->lpfc_release_scsi_buf(phba
, psb
);
1136 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1137 * @phba: The Hba for which this call is being executed.
1138 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1140 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1141 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1142 * through sg elements and format the bde. This routine also initializes all
1143 * IOCB fields which are dependent on scsi command request buffer.
1150 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
1152 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
1153 struct scatterlist
*sgel
= NULL
;
1154 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
1155 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
1156 struct lpfc_iocbq
*iocbq
= &lpfc_cmd
->cur_iocbq
;
1157 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
1158 struct ulp_bde64
*data_bde
= iocb_cmd
->unsli3
.fcp_ext
.dbde
;
1159 dma_addr_t physaddr
;
1160 uint32_t num_bde
= 0;
1161 int nseg
, datadir
= scsi_cmnd
->sc_data_direction
;
1164 * There are three possibilities here - use scatter-gather segment, use
1165 * the single mapping, or neither. Start the lpfc command prep by
1166 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1170 if (scsi_sg_count(scsi_cmnd
)) {
1172 * The driver stores the segment count returned from pci_map_sg
1173 * because this a count of dma-mappings used to map the use_sg
1174 * pages. They are not guaranteed to be the same for those
1175 * architectures that implement an IOMMU.
1178 nseg
= dma_map_sg(&phba
->pcidev
->dev
, scsi_sglist(scsi_cmnd
),
1179 scsi_sg_count(scsi_cmnd
), datadir
);
1180 if (unlikely(!nseg
))
1183 lpfc_cmd
->seg_cnt
= nseg
;
1184 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
1185 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1186 "9064 BLKGRD: %s: Too many sg segments from "
1187 "dma_map_sg. Config %d, seg_cnt %d\n",
1188 __func__
, phba
->cfg_sg_seg_cnt
,
1190 lpfc_cmd
->seg_cnt
= 0;
1191 scsi_dma_unmap(scsi_cmnd
);
1196 * The driver established a maximum scatter-gather segment count
1197 * during probe that limits the number of sg elements in any
1198 * single scsi command. Just run through the seg_cnt and format
1200 * When using SLI-3 the driver will try to fit all the BDEs into
1201 * the IOCB. If it can't then the BDEs get added to a BPL as it
1202 * does for SLI-2 mode.
1204 scsi_for_each_sg(scsi_cmnd
, sgel
, nseg
, num_bde
) {
1205 physaddr
= sg_dma_address(sgel
);
1206 if (phba
->sli_rev
== 3 &&
1207 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) &&
1208 !(iocbq
->iocb_flag
& DSS_SECURITY_OP
) &&
1209 nseg
<= LPFC_EXT_DATA_BDE_COUNT
) {
1210 data_bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1211 data_bde
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
1212 data_bde
->addrLow
= putPaddrLow(physaddr
);
1213 data_bde
->addrHigh
= putPaddrHigh(physaddr
);
1216 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1217 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
1218 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1220 le32_to_cpu(putPaddrLow(physaddr
));
1222 le32_to_cpu(putPaddrHigh(physaddr
));
1229 * Finish initializing those IOCB fields that are dependent on the
1230 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1231 * explicitly reinitialized and for SLI-3 the extended bde count is
1232 * explicitly reinitialized since all iocb memory resources are reused.
1234 if (phba
->sli_rev
== 3 &&
1235 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) &&
1236 !(iocbq
->iocb_flag
& DSS_SECURITY_OP
)) {
1237 if (num_bde
> LPFC_EXT_DATA_BDE_COUNT
) {
1239 * The extended IOCB format can only fit 3 BDE or a BPL.
1240 * This I/O has more than 3 BDE so the 1st data bde will
1241 * be a BPL that is filled in here.
1243 physaddr
= lpfc_cmd
->dma_handle
;
1244 data_bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BLP_64
;
1245 data_bde
->tus
.f
.bdeSize
= (num_bde
*
1246 sizeof(struct ulp_bde64
));
1247 physaddr
+= (sizeof(struct fcp_cmnd
) +
1248 sizeof(struct fcp_rsp
) +
1249 (2 * sizeof(struct ulp_bde64
)));
1250 data_bde
->addrHigh
= putPaddrHigh(physaddr
);
1251 data_bde
->addrLow
= putPaddrLow(physaddr
);
1252 /* ebde count includes the response bde and data bpl */
1253 iocb_cmd
->unsli3
.fcp_ext
.ebde_count
= 2;
1255 /* ebde count includes the response bde and data bdes */
1256 iocb_cmd
->unsli3
.fcp_ext
.ebde_count
= (num_bde
+ 1);
1259 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
=
1260 ((num_bde
+ 2) * sizeof(struct ulp_bde64
));
1261 iocb_cmd
->unsli3
.fcp_ext
.ebde_count
= (num_bde
+ 1);
1263 fcp_cmnd
->fcpDl
= cpu_to_be32(scsi_bufflen(scsi_cmnd
));
1266 * Due to difference in data length between DIF/non-DIF paths,
1267 * we need to set word 4 of IOCB here
1269 iocb_cmd
->un
.fcpi
.fcpi_parm
= scsi_bufflen(scsi_cmnd
);
1273 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1275 /* Return BG_ERR_INIT if error injection is detected by Initiator */
1276 #define BG_ERR_INIT 0x1
1277 /* Return BG_ERR_TGT if error injection is detected by Target */
1278 #define BG_ERR_TGT 0x2
1279 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
1280 #define BG_ERR_SWAP 0x10
1282 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1285 #define BG_ERR_CHECK 0x20
1288 * lpfc_bg_err_inject - Determine if we should inject an error
1289 * @phba: The Hba for which this call is being executed.
1290 * @sc: The SCSI command to examine
1291 * @reftag: (out) BlockGuard reference tag for transmitted data
1292 * @apptag: (out) BlockGuard application tag for transmitted data
1293 * @new_guard (in) Value to replace CRC with if needed
1295 * Returns BG_ERR_* bit mask or 0 if request ignored
1298 lpfc_bg_err_inject(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1299 uint32_t *reftag
, uint16_t *apptag
, uint32_t new_guard
)
1301 struct scatterlist
*sgpe
; /* s/g prot entry */
1302 struct lpfc_scsi_buf
*lpfc_cmd
= NULL
;
1303 struct scsi_dif_tuple
*src
= NULL
;
1304 struct lpfc_nodelist
*ndlp
;
1305 struct lpfc_rport_data
*rdata
;
1306 uint32_t op
= scsi_get_prot_op(sc
);
1313 if (op
== SCSI_PROT_NORMAL
)
1316 sgpe
= scsi_prot_sglist(sc
);
1317 lba
= scsi_get_lba(sc
);
1319 /* First check if we need to match the LBA */
1320 if (phba
->lpfc_injerr_lba
!= LPFC_INJERR_LBA_OFF
) {
1321 blksize
= lpfc_cmd_blksize(sc
);
1322 numblks
= (scsi_bufflen(sc
) + blksize
- 1) / blksize
;
1324 /* Make sure we have the right LBA if one is specified */
1325 if ((phba
->lpfc_injerr_lba
< lba
) ||
1326 (phba
->lpfc_injerr_lba
>= (lba
+ numblks
)))
1329 blockoff
= phba
->lpfc_injerr_lba
- lba
;
1330 numblks
= sg_dma_len(sgpe
) /
1331 sizeof(struct scsi_dif_tuple
);
1332 if (numblks
< blockoff
)
1337 /* Next check if we need to match the remote NPortID or WWPN */
1338 rdata
= lpfc_rport_data_from_scsi_device(sc
->device
);
1339 if (rdata
&& rdata
->pnode
) {
1340 ndlp
= rdata
->pnode
;
1342 /* Make sure we have the right NPortID if one is specified */
1343 if (phba
->lpfc_injerr_nportid
&&
1344 (phba
->lpfc_injerr_nportid
!= ndlp
->nlp_DID
))
1348 * Make sure we have the right WWPN if one is specified.
1349 * wwn[0] should be a non-zero NAA in a good WWPN.
1351 if (phba
->lpfc_injerr_wwpn
.u
.wwn
[0] &&
1352 (memcmp(&ndlp
->nlp_portname
, &phba
->lpfc_injerr_wwpn
,
1353 sizeof(struct lpfc_name
)) != 0))
1357 /* Setup a ptr to the protection data if the SCSI host provides it */
1359 src
= (struct scsi_dif_tuple
*)sg_virt(sgpe
);
1361 lpfc_cmd
= (struct lpfc_scsi_buf
*)sc
->host_scribble
;
1364 /* Should we change the Reference Tag */
1366 if (phba
->lpfc_injerr_wref_cnt
) {
1368 case SCSI_PROT_WRITE_PASS
:
1371 * For WRITE_PASS, force the error
1372 * to be sent on the wire. It should
1373 * be detected by the Target.
1374 * If blockoff != 0 error will be
1375 * inserted in middle of the IO.
1378 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1379 "9076 BLKGRD: Injecting reftag error: "
1380 "write lba x%lx + x%x oldrefTag x%x\n",
1381 (unsigned long)lba
, blockoff
,
1382 be32_to_cpu(src
->ref_tag
));
1385 * Save the old ref_tag so we can
1386 * restore it on completion.
1389 lpfc_cmd
->prot_data_type
=
1391 lpfc_cmd
->prot_data_segment
=
1393 lpfc_cmd
->prot_data
=
1396 src
->ref_tag
= cpu_to_be32(0xDEADBEEF);
1397 phba
->lpfc_injerr_wref_cnt
--;
1398 if (phba
->lpfc_injerr_wref_cnt
== 0) {
1399 phba
->lpfc_injerr_nportid
= 0;
1400 phba
->lpfc_injerr_lba
=
1401 LPFC_INJERR_LBA_OFF
;
1402 memset(&phba
->lpfc_injerr_wwpn
,
1403 0, sizeof(struct lpfc_name
));
1405 rc
= BG_ERR_TGT
| BG_ERR_CHECK
;
1410 case SCSI_PROT_WRITE_INSERT
:
1412 * For WRITE_INSERT, force the error
1413 * to be sent on the wire. It should be
1414 * detected by the Target.
1416 /* DEADBEEF will be the reftag on the wire */
1417 *reftag
= 0xDEADBEEF;
1418 phba
->lpfc_injerr_wref_cnt
--;
1419 if (phba
->lpfc_injerr_wref_cnt
== 0) {
1420 phba
->lpfc_injerr_nportid
= 0;
1421 phba
->lpfc_injerr_lba
=
1422 LPFC_INJERR_LBA_OFF
;
1423 memset(&phba
->lpfc_injerr_wwpn
,
1424 0, sizeof(struct lpfc_name
));
1426 rc
= BG_ERR_TGT
| BG_ERR_CHECK
;
1428 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1429 "9078 BLKGRD: Injecting reftag error: "
1430 "write lba x%lx\n", (unsigned long)lba
);
1432 case SCSI_PROT_WRITE_STRIP
:
1434 * For WRITE_STRIP and WRITE_PASS,
1435 * force the error on data
1436 * being copied from SLI-Host to SLI-Port.
1438 *reftag
= 0xDEADBEEF;
1439 phba
->lpfc_injerr_wref_cnt
--;
1440 if (phba
->lpfc_injerr_wref_cnt
== 0) {
1441 phba
->lpfc_injerr_nportid
= 0;
1442 phba
->lpfc_injerr_lba
=
1443 LPFC_INJERR_LBA_OFF
;
1444 memset(&phba
->lpfc_injerr_wwpn
,
1445 0, sizeof(struct lpfc_name
));
1449 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1450 "9077 BLKGRD: Injecting reftag error: "
1451 "write lba x%lx\n", (unsigned long)lba
);
1455 if (phba
->lpfc_injerr_rref_cnt
) {
1457 case SCSI_PROT_READ_INSERT
:
1458 case SCSI_PROT_READ_STRIP
:
1459 case SCSI_PROT_READ_PASS
:
1461 * For READ_STRIP and READ_PASS, force the
1462 * error on data being read off the wire. It
1463 * should force an IO error to the driver.
1465 *reftag
= 0xDEADBEEF;
1466 phba
->lpfc_injerr_rref_cnt
--;
1467 if (phba
->lpfc_injerr_rref_cnt
== 0) {
1468 phba
->lpfc_injerr_nportid
= 0;
1469 phba
->lpfc_injerr_lba
=
1470 LPFC_INJERR_LBA_OFF
;
1471 memset(&phba
->lpfc_injerr_wwpn
,
1472 0, sizeof(struct lpfc_name
));
1476 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1477 "9079 BLKGRD: Injecting reftag error: "
1478 "read lba x%lx\n", (unsigned long)lba
);
1484 /* Should we change the Application Tag */
1486 if (phba
->lpfc_injerr_wapp_cnt
) {
1488 case SCSI_PROT_WRITE_PASS
:
1491 * For WRITE_PASS, force the error
1492 * to be sent on the wire. It should
1493 * be detected by the Target.
1494 * If blockoff != 0 error will be
1495 * inserted in middle of the IO.
1498 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1499 "9080 BLKGRD: Injecting apptag error: "
1500 "write lba x%lx + x%x oldappTag x%x\n",
1501 (unsigned long)lba
, blockoff
,
1502 be16_to_cpu(src
->app_tag
));
1505 * Save the old app_tag so we can
1506 * restore it on completion.
1509 lpfc_cmd
->prot_data_type
=
1511 lpfc_cmd
->prot_data_segment
=
1513 lpfc_cmd
->prot_data
=
1516 src
->app_tag
= cpu_to_be16(0xDEAD);
1517 phba
->lpfc_injerr_wapp_cnt
--;
1518 if (phba
->lpfc_injerr_wapp_cnt
== 0) {
1519 phba
->lpfc_injerr_nportid
= 0;
1520 phba
->lpfc_injerr_lba
=
1521 LPFC_INJERR_LBA_OFF
;
1522 memset(&phba
->lpfc_injerr_wwpn
,
1523 0, sizeof(struct lpfc_name
));
1525 rc
= BG_ERR_TGT
| BG_ERR_CHECK
;
1529 case SCSI_PROT_WRITE_INSERT
:
1531 * For WRITE_INSERT, force the
1532 * error to be sent on the wire. It should be
1533 * detected by the Target.
1535 /* DEAD will be the apptag on the wire */
1537 phba
->lpfc_injerr_wapp_cnt
--;
1538 if (phba
->lpfc_injerr_wapp_cnt
== 0) {
1539 phba
->lpfc_injerr_nportid
= 0;
1540 phba
->lpfc_injerr_lba
=
1541 LPFC_INJERR_LBA_OFF
;
1542 memset(&phba
->lpfc_injerr_wwpn
,
1543 0, sizeof(struct lpfc_name
));
1545 rc
= BG_ERR_TGT
| BG_ERR_CHECK
;
1547 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1548 "0813 BLKGRD: Injecting apptag error: "
1549 "write lba x%lx\n", (unsigned long)lba
);
1551 case SCSI_PROT_WRITE_STRIP
:
1553 * For WRITE_STRIP and WRITE_PASS,
1554 * force the error on data
1555 * being copied from SLI-Host to SLI-Port.
1558 phba
->lpfc_injerr_wapp_cnt
--;
1559 if (phba
->lpfc_injerr_wapp_cnt
== 0) {
1560 phba
->lpfc_injerr_nportid
= 0;
1561 phba
->lpfc_injerr_lba
=
1562 LPFC_INJERR_LBA_OFF
;
1563 memset(&phba
->lpfc_injerr_wwpn
,
1564 0, sizeof(struct lpfc_name
));
1568 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1569 "0812 BLKGRD: Injecting apptag error: "
1570 "write lba x%lx\n", (unsigned long)lba
);
1574 if (phba
->lpfc_injerr_rapp_cnt
) {
1576 case SCSI_PROT_READ_INSERT
:
1577 case SCSI_PROT_READ_STRIP
:
1578 case SCSI_PROT_READ_PASS
:
1580 * For READ_STRIP and READ_PASS, force the
1581 * error on data being read off the wire. It
1582 * should force an IO error to the driver.
1585 phba
->lpfc_injerr_rapp_cnt
--;
1586 if (phba
->lpfc_injerr_rapp_cnt
== 0) {
1587 phba
->lpfc_injerr_nportid
= 0;
1588 phba
->lpfc_injerr_lba
=
1589 LPFC_INJERR_LBA_OFF
;
1590 memset(&phba
->lpfc_injerr_wwpn
,
1591 0, sizeof(struct lpfc_name
));
1595 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1596 "0814 BLKGRD: Injecting apptag error: "
1597 "read lba x%lx\n", (unsigned long)lba
);
1604 /* Should we change the Guard Tag */
1606 if (phba
->lpfc_injerr_wgrd_cnt
) {
1608 case SCSI_PROT_WRITE_PASS
:
1612 case SCSI_PROT_WRITE_INSERT
:
1614 * For WRITE_INSERT, force the
1615 * error to be sent on the wire. It should be
1616 * detected by the Target.
1618 phba
->lpfc_injerr_wgrd_cnt
--;
1619 if (phba
->lpfc_injerr_wgrd_cnt
== 0) {
1620 phba
->lpfc_injerr_nportid
= 0;
1621 phba
->lpfc_injerr_lba
=
1622 LPFC_INJERR_LBA_OFF
;
1623 memset(&phba
->lpfc_injerr_wwpn
,
1624 0, sizeof(struct lpfc_name
));
1627 rc
|= BG_ERR_TGT
| BG_ERR_SWAP
;
1628 /* Signals the caller to swap CRC->CSUM */
1630 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1631 "0817 BLKGRD: Injecting guard error: "
1632 "write lba x%lx\n", (unsigned long)lba
);
1634 case SCSI_PROT_WRITE_STRIP
:
1636 * For WRITE_STRIP and WRITE_PASS,
1637 * force the error on data
1638 * being copied from SLI-Host to SLI-Port.
1640 phba
->lpfc_injerr_wgrd_cnt
--;
1641 if (phba
->lpfc_injerr_wgrd_cnt
== 0) {
1642 phba
->lpfc_injerr_nportid
= 0;
1643 phba
->lpfc_injerr_lba
=
1644 LPFC_INJERR_LBA_OFF
;
1645 memset(&phba
->lpfc_injerr_wwpn
,
1646 0, sizeof(struct lpfc_name
));
1649 rc
= BG_ERR_INIT
| BG_ERR_SWAP
;
1650 /* Signals the caller to swap CRC->CSUM */
1652 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1653 "0816 BLKGRD: Injecting guard error: "
1654 "write lba x%lx\n", (unsigned long)lba
);
1658 if (phba
->lpfc_injerr_rgrd_cnt
) {
1660 case SCSI_PROT_READ_INSERT
:
1661 case SCSI_PROT_READ_STRIP
:
1662 case SCSI_PROT_READ_PASS
:
1664 * For READ_STRIP and READ_PASS, force the
1665 * error on data being read off the wire. It
1666 * should force an IO error to the driver.
1668 phba
->lpfc_injerr_rgrd_cnt
--;
1669 if (phba
->lpfc_injerr_rgrd_cnt
== 0) {
1670 phba
->lpfc_injerr_nportid
= 0;
1671 phba
->lpfc_injerr_lba
=
1672 LPFC_INJERR_LBA_OFF
;
1673 memset(&phba
->lpfc_injerr_wwpn
,
1674 0, sizeof(struct lpfc_name
));
1677 rc
= BG_ERR_INIT
| BG_ERR_SWAP
;
1678 /* Signals the caller to swap CRC->CSUM */
1680 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1681 "0818 BLKGRD: Injecting guard error: "
1682 "read lba x%lx\n", (unsigned long)lba
);
1692 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1693 * the specified SCSI command.
1694 * @phba: The Hba for which this call is being executed.
1695 * @sc: The SCSI command to examine
1696 * @txopt: (out) BlockGuard operation for transmitted data
1697 * @rxopt: (out) BlockGuard operation for received data
1699 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1703 lpfc_sc_to_bg_opcodes(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1704 uint8_t *txop
, uint8_t *rxop
)
1708 if (lpfc_cmd_guard_csum(sc
)) {
1709 switch (scsi_get_prot_op(sc
)) {
1710 case SCSI_PROT_READ_INSERT
:
1711 case SCSI_PROT_WRITE_STRIP
:
1712 *rxop
= BG_OP_IN_NODIF_OUT_CSUM
;
1713 *txop
= BG_OP_IN_CSUM_OUT_NODIF
;
1716 case SCSI_PROT_READ_STRIP
:
1717 case SCSI_PROT_WRITE_INSERT
:
1718 *rxop
= BG_OP_IN_CRC_OUT_NODIF
;
1719 *txop
= BG_OP_IN_NODIF_OUT_CRC
;
1722 case SCSI_PROT_READ_PASS
:
1723 case SCSI_PROT_WRITE_PASS
:
1724 *rxop
= BG_OP_IN_CRC_OUT_CSUM
;
1725 *txop
= BG_OP_IN_CSUM_OUT_CRC
;
1728 case SCSI_PROT_NORMAL
:
1730 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1731 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1732 scsi_get_prot_op(sc
));
1738 switch (scsi_get_prot_op(sc
)) {
1739 case SCSI_PROT_READ_STRIP
:
1740 case SCSI_PROT_WRITE_INSERT
:
1741 *rxop
= BG_OP_IN_CRC_OUT_NODIF
;
1742 *txop
= BG_OP_IN_NODIF_OUT_CRC
;
1745 case SCSI_PROT_READ_PASS
:
1746 case SCSI_PROT_WRITE_PASS
:
1747 *rxop
= BG_OP_IN_CRC_OUT_CRC
;
1748 *txop
= BG_OP_IN_CRC_OUT_CRC
;
1751 case SCSI_PROT_READ_INSERT
:
1752 case SCSI_PROT_WRITE_STRIP
:
1753 *rxop
= BG_OP_IN_NODIF_OUT_CRC
;
1754 *txop
= BG_OP_IN_CRC_OUT_NODIF
;
1757 case SCSI_PROT_NORMAL
:
1759 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
1760 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1761 scsi_get_prot_op(sc
));
1770 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1772 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1773 * the specified SCSI command in order to force a guard tag error.
1774 * @phba: The Hba for which this call is being executed.
1775 * @sc: The SCSI command to examine
1776 * @txopt: (out) BlockGuard operation for transmitted data
1777 * @rxopt: (out) BlockGuard operation for received data
1779 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1783 lpfc_bg_err_opcodes(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1784 uint8_t *txop
, uint8_t *rxop
)
1788 if (lpfc_cmd_guard_csum(sc
)) {
1789 switch (scsi_get_prot_op(sc
)) {
1790 case SCSI_PROT_READ_INSERT
:
1791 case SCSI_PROT_WRITE_STRIP
:
1792 *rxop
= BG_OP_IN_NODIF_OUT_CRC
;
1793 *txop
= BG_OP_IN_CRC_OUT_NODIF
;
1796 case SCSI_PROT_READ_STRIP
:
1797 case SCSI_PROT_WRITE_INSERT
:
1798 *rxop
= BG_OP_IN_CSUM_OUT_NODIF
;
1799 *txop
= BG_OP_IN_NODIF_OUT_CSUM
;
1802 case SCSI_PROT_READ_PASS
:
1803 case SCSI_PROT_WRITE_PASS
:
1804 *rxop
= BG_OP_IN_CSUM_OUT_CRC
;
1805 *txop
= BG_OP_IN_CRC_OUT_CSUM
;
1808 case SCSI_PROT_NORMAL
:
1814 switch (scsi_get_prot_op(sc
)) {
1815 case SCSI_PROT_READ_STRIP
:
1816 case SCSI_PROT_WRITE_INSERT
:
1817 *rxop
= BG_OP_IN_CSUM_OUT_NODIF
;
1818 *txop
= BG_OP_IN_NODIF_OUT_CSUM
;
1821 case SCSI_PROT_READ_PASS
:
1822 case SCSI_PROT_WRITE_PASS
:
1823 *rxop
= BG_OP_IN_CSUM_OUT_CSUM
;
1824 *txop
= BG_OP_IN_CSUM_OUT_CSUM
;
1827 case SCSI_PROT_READ_INSERT
:
1828 case SCSI_PROT_WRITE_STRIP
:
1829 *rxop
= BG_OP_IN_NODIF_OUT_CSUM
;
1830 *txop
= BG_OP_IN_CSUM_OUT_NODIF
;
1833 case SCSI_PROT_NORMAL
:
1844 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1845 * @phba: The Hba for which this call is being executed.
1846 * @sc: pointer to scsi command we're working on
1847 * @bpl: pointer to buffer list for protection groups
1848 * @datacnt: number of segments of data that have been dma mapped
1850 * This function sets up BPL buffer list for protection groups of
1851 * type LPFC_PG_TYPE_NO_DIF
1853 * This is usually used when the HBA is instructed to generate
1854 * DIFs and insert them into data stream (or strip DIF from
1855 * incoming data stream)
1857 * The buffer list consists of just one protection group described
1859 * +-------------------------+
1860 * start of prot group --> | PDE_5 |
1861 * +-------------------------+
1863 * +-------------------------+
1865 * +-------------------------+
1866 * |more Data BDE's ... (opt)|
1867 * +-------------------------+
1870 * Note: Data s/g buffers have been dma mapped
1872 * Returns the number of BDEs added to the BPL.
1875 lpfc_bg_setup_bpl(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
1876 struct ulp_bde64
*bpl
, int datasegcnt
)
1878 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
1879 struct lpfc_pde5
*pde5
= NULL
;
1880 struct lpfc_pde6
*pde6
= NULL
;
1881 dma_addr_t physaddr
;
1882 int i
= 0, num_bde
= 0, status
;
1883 int datadir
= sc
->sc_data_direction
;
1884 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1887 uint32_t checking
= 1;
1891 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
1895 /* extract some info from the scsi command for pde*/
1896 reftag
= (uint32_t)scsi_get_lba(sc
); /* Truncate LBA */
1898 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1899 rc
= lpfc_bg_err_inject(phba
, sc
, &reftag
, NULL
, 1);
1901 if (rc
& BG_ERR_SWAP
)
1902 lpfc_bg_err_opcodes(phba
, sc
, &txop
, &rxop
);
1903 if (rc
& BG_ERR_CHECK
)
1908 /* setup PDE5 with what we have */
1909 pde5
= (struct lpfc_pde5
*) bpl
;
1910 memset(pde5
, 0, sizeof(struct lpfc_pde5
));
1911 bf_set(pde5_type
, pde5
, LPFC_PDE5_DESCRIPTOR
);
1913 /* Endianness conversion if necessary for PDE5 */
1914 pde5
->word0
= cpu_to_le32(pde5
->word0
);
1915 pde5
->reftag
= cpu_to_le32(reftag
);
1917 /* advance bpl and increment bde count */
1920 pde6
= (struct lpfc_pde6
*) bpl
;
1922 /* setup PDE6 with the rest of the info */
1923 memset(pde6
, 0, sizeof(struct lpfc_pde6
));
1924 bf_set(pde6_type
, pde6
, LPFC_PDE6_DESCRIPTOR
);
1925 bf_set(pde6_optx
, pde6
, txop
);
1926 bf_set(pde6_oprx
, pde6
, rxop
);
1929 * We only need to check the data on READs, for WRITEs
1930 * protection data is automatically generated, not checked.
1932 if (datadir
== DMA_FROM_DEVICE
) {
1933 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_GUARD
))
1934 bf_set(pde6_ce
, pde6
, checking
);
1936 bf_set(pde6_ce
, pde6
, 0);
1938 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_REF
))
1939 bf_set(pde6_re
, pde6
, checking
);
1941 bf_set(pde6_re
, pde6
, 0);
1943 bf_set(pde6_ai
, pde6
, 1);
1944 bf_set(pde6_ae
, pde6
, 0);
1945 bf_set(pde6_apptagval
, pde6
, 0);
1947 /* Endianness conversion if necessary for PDE6 */
1948 pde6
->word0
= cpu_to_le32(pde6
->word0
);
1949 pde6
->word1
= cpu_to_le32(pde6
->word1
);
1950 pde6
->word2
= cpu_to_le32(pde6
->word2
);
1952 /* advance bpl and increment bde count */
1956 /* assumption: caller has already run dma_map_sg on command data */
1957 scsi_for_each_sg(sc
, sgde
, datasegcnt
, i
) {
1958 physaddr
= sg_dma_address(sgde
);
1959 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
1960 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
1961 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgde
);
1962 if (datadir
== DMA_TO_DEVICE
)
1963 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1965 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
1966 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1976 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1977 * @phba: The Hba for which this call is being executed.
1978 * @sc: pointer to scsi command we're working on
1979 * @bpl: pointer to buffer list for protection groups
1980 * @datacnt: number of segments of data that have been dma mapped
1981 * @protcnt: number of segment of protection data that have been dma mapped
1983 * This function sets up BPL buffer list for protection groups of
1984 * type LPFC_PG_TYPE_DIF
1986 * This is usually used when DIFs are in their own buffers,
1987 * separate from the data. The HBA can then by instructed
1988 * to place the DIFs in the outgoing stream. For read operations,
1989 * The HBA could extract the DIFs and place it in DIF buffers.
1991 * The buffer list for this type consists of one or more of the
1992 * protection groups described below:
1993 * +-------------------------+
1994 * start of first prot group --> | PDE_5 |
1995 * +-------------------------+
1997 * +-------------------------+
1998 * | PDE_7 (Prot BDE) |
1999 * +-------------------------+
2001 * +-------------------------+
2002 * |more Data BDE's ... (opt)|
2003 * +-------------------------+
2004 * start of new prot group --> | PDE_5 |
2005 * +-------------------------+
2007 * +-------------------------+
2009 * Note: It is assumed that both data and protection s/g buffers have been
2012 * Returns the number of BDEs added to the BPL.
2015 lpfc_bg_setup_bpl_prot(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
2016 struct ulp_bde64
*bpl
, int datacnt
, int protcnt
)
2018 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
2019 struct scatterlist
*sgpe
= NULL
; /* s/g prot entry */
2020 struct lpfc_pde5
*pde5
= NULL
;
2021 struct lpfc_pde6
*pde6
= NULL
;
2022 struct lpfc_pde7
*pde7
= NULL
;
2023 dma_addr_t dataphysaddr
, protphysaddr
;
2024 unsigned short curr_data
= 0, curr_prot
= 0;
2025 unsigned int split_offset
;
2026 unsigned int protgroup_len
, protgroup_offset
= 0, protgroup_remainder
;
2027 unsigned int protgrp_blks
, protgrp_bytes
;
2028 unsigned int remainder
, subtotal
;
2030 int datadir
= sc
->sc_data_direction
;
2031 unsigned char pgdone
= 0, alldone
= 0;
2033 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2036 uint32_t checking
= 1;
2041 sgpe
= scsi_prot_sglist(sc
);
2042 sgde
= scsi_sglist(sc
);
2044 if (!sgpe
|| !sgde
) {
2045 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
2046 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2051 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
2055 /* extract some info from the scsi command */
2056 blksize
= lpfc_cmd_blksize(sc
);
2057 reftag
= (uint32_t)scsi_get_lba(sc
); /* Truncate LBA */
2059 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2060 rc
= lpfc_bg_err_inject(phba
, sc
, &reftag
, NULL
, 1);
2062 if (rc
& BG_ERR_SWAP
)
2063 lpfc_bg_err_opcodes(phba
, sc
, &txop
, &rxop
);
2064 if (rc
& BG_ERR_CHECK
)
2071 /* Check to see if we ran out of space */
2072 if (num_bde
>= (phba
->cfg_total_seg_cnt
- 2))
2075 /* setup PDE5 with what we have */
2076 pde5
= (struct lpfc_pde5
*) bpl
;
2077 memset(pde5
, 0, sizeof(struct lpfc_pde5
));
2078 bf_set(pde5_type
, pde5
, LPFC_PDE5_DESCRIPTOR
);
2080 /* Endianness conversion if necessary for PDE5 */
2081 pde5
->word0
= cpu_to_le32(pde5
->word0
);
2082 pde5
->reftag
= cpu_to_le32(reftag
);
2084 /* advance bpl and increment bde count */
2087 pde6
= (struct lpfc_pde6
*) bpl
;
2089 /* setup PDE6 with the rest of the info */
2090 memset(pde6
, 0, sizeof(struct lpfc_pde6
));
2091 bf_set(pde6_type
, pde6
, LPFC_PDE6_DESCRIPTOR
);
2092 bf_set(pde6_optx
, pde6
, txop
);
2093 bf_set(pde6_oprx
, pde6
, rxop
);
2095 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_GUARD
))
2096 bf_set(pde6_ce
, pde6
, checking
);
2098 bf_set(pde6_ce
, pde6
, 0);
2100 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_REF
))
2101 bf_set(pde6_re
, pde6
, checking
);
2103 bf_set(pde6_re
, pde6
, 0);
2105 bf_set(pde6_ai
, pde6
, 1);
2106 bf_set(pde6_ae
, pde6
, 0);
2107 bf_set(pde6_apptagval
, pde6
, 0);
2109 /* Endianness conversion if necessary for PDE6 */
2110 pde6
->word0
= cpu_to_le32(pde6
->word0
);
2111 pde6
->word1
= cpu_to_le32(pde6
->word1
);
2112 pde6
->word2
= cpu_to_le32(pde6
->word2
);
2114 /* advance bpl and increment bde count */
2118 /* setup the first BDE that points to protection buffer */
2119 protphysaddr
= sg_dma_address(sgpe
) + protgroup_offset
;
2120 protgroup_len
= sg_dma_len(sgpe
) - protgroup_offset
;
2122 /* must be integer multiple of the DIF block length */
2123 BUG_ON(protgroup_len
% 8);
2125 pde7
= (struct lpfc_pde7
*) bpl
;
2126 memset(pde7
, 0, sizeof(struct lpfc_pde7
));
2127 bf_set(pde7_type
, pde7
, LPFC_PDE7_DESCRIPTOR
);
2129 pde7
->addrHigh
= le32_to_cpu(putPaddrHigh(protphysaddr
));
2130 pde7
->addrLow
= le32_to_cpu(putPaddrLow(protphysaddr
));
2132 protgrp_blks
= protgroup_len
/ 8;
2133 protgrp_bytes
= protgrp_blks
* blksize
;
2135 /* check if this pde is crossing the 4K boundary; if so split */
2136 if ((pde7
->addrLow
& 0xfff) + protgroup_len
> 0x1000) {
2137 protgroup_remainder
= 0x1000 - (pde7
->addrLow
& 0xfff);
2138 protgroup_offset
+= protgroup_remainder
;
2139 protgrp_blks
= protgroup_remainder
/ 8;
2140 protgrp_bytes
= protgrp_blks
* blksize
;
2142 protgroup_offset
= 0;
2148 /* setup BDE's for data blocks associated with DIF data */
2150 subtotal
= 0; /* total bytes processed for current prot grp */
2152 /* Check to see if we ran out of space */
2153 if (num_bde
>= phba
->cfg_total_seg_cnt
)
2157 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
2158 "9065 BLKGRD:%s Invalid data segment\n",
2163 dataphysaddr
= sg_dma_address(sgde
) + split_offset
;
2164 bpl
->addrLow
= le32_to_cpu(putPaddrLow(dataphysaddr
));
2165 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(dataphysaddr
));
2167 remainder
= sg_dma_len(sgde
) - split_offset
;
2169 if ((subtotal
+ remainder
) <= protgrp_bytes
) {
2170 /* we can use this whole buffer */
2171 bpl
->tus
.f
.bdeSize
= remainder
;
2174 if ((subtotal
+ remainder
) == protgrp_bytes
)
2177 /* must split this buffer with next prot grp */
2178 bpl
->tus
.f
.bdeSize
= protgrp_bytes
- subtotal
;
2179 split_offset
+= bpl
->tus
.f
.bdeSize
;
2182 subtotal
+= bpl
->tus
.f
.bdeSize
;
2184 if (datadir
== DMA_TO_DEVICE
)
2185 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2187 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
2188 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
2196 /* Move to the next s/g segment if possible */
2197 sgde
= sg_next(sgde
);
2201 if (protgroup_offset
) {
2202 /* update the reference tag */
2203 reftag
+= protgrp_blks
;
2209 if (curr_prot
== protcnt
) {
2211 } else if (curr_prot
< protcnt
) {
2212 /* advance to next prot buffer */
2213 sgpe
= sg_next(sgpe
);
2216 /* update the reference tag */
2217 reftag
+= protgrp_blks
;
2219 /* if we're here, we have a bug */
2220 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
2221 "9054 BLKGRD: bug in %s\n", __func__
);
2231 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2232 * @phba: The Hba for which this call is being executed.
2233 * @sc: pointer to scsi command we're working on
2234 * @sgl: pointer to buffer list for protection groups
2235 * @datacnt: number of segments of data that have been dma mapped
2237 * This function sets up SGL buffer list for protection groups of
2238 * type LPFC_PG_TYPE_NO_DIF
2240 * This is usually used when the HBA is instructed to generate
2241 * DIFs and insert them into data stream (or strip DIF from
2242 * incoming data stream)
2244 * The buffer list consists of just one protection group described
2246 * +-------------------------+
2247 * start of prot group --> | DI_SEED |
2248 * +-------------------------+
2250 * +-------------------------+
2251 * |more Data SGE's ... (opt)|
2252 * +-------------------------+
2255 * Note: Data s/g buffers have been dma mapped
2257 * Returns the number of SGEs added to the SGL.
2260 lpfc_bg_setup_sgl(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
2261 struct sli4_sge
*sgl
, int datasegcnt
)
2263 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
2264 struct sli4_sge_diseed
*diseed
= NULL
;
2265 dma_addr_t physaddr
;
2266 int i
= 0, num_sge
= 0, status
;
2269 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2272 uint32_t checking
= 1;
2274 uint32_t dma_offset
= 0;
2276 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
2280 /* extract some info from the scsi command for pde*/
2281 reftag
= (uint32_t)scsi_get_lba(sc
); /* Truncate LBA */
2283 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2284 rc
= lpfc_bg_err_inject(phba
, sc
, &reftag
, NULL
, 1);
2286 if (rc
& BG_ERR_SWAP
)
2287 lpfc_bg_err_opcodes(phba
, sc
, &txop
, &rxop
);
2288 if (rc
& BG_ERR_CHECK
)
2293 /* setup DISEED with what we have */
2294 diseed
= (struct sli4_sge_diseed
*) sgl
;
2295 memset(diseed
, 0, sizeof(struct sli4_sge_diseed
));
2296 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DISEED
);
2298 /* Endianness conversion if necessary */
2299 diseed
->ref_tag
= cpu_to_le32(reftag
);
2300 diseed
->ref_tag_tran
= diseed
->ref_tag
;
2303 * We only need to check the data on READs, for WRITEs
2304 * protection data is automatically generated, not checked.
2306 if (sc
->sc_data_direction
== DMA_FROM_DEVICE
) {
2307 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_GUARD
))
2308 bf_set(lpfc_sli4_sge_dif_ce
, diseed
, checking
);
2310 bf_set(lpfc_sli4_sge_dif_ce
, diseed
, 0);
2312 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_REF
))
2313 bf_set(lpfc_sli4_sge_dif_re
, diseed
, checking
);
2315 bf_set(lpfc_sli4_sge_dif_re
, diseed
, 0);
2318 /* setup DISEED with the rest of the info */
2319 bf_set(lpfc_sli4_sge_dif_optx
, diseed
, txop
);
2320 bf_set(lpfc_sli4_sge_dif_oprx
, diseed
, rxop
);
2322 bf_set(lpfc_sli4_sge_dif_ai
, diseed
, 1);
2323 bf_set(lpfc_sli4_sge_dif_me
, diseed
, 0);
2325 /* Endianness conversion if necessary for DISEED */
2326 diseed
->word2
= cpu_to_le32(diseed
->word2
);
2327 diseed
->word3
= cpu_to_le32(diseed
->word3
);
2329 /* advance bpl and increment sge count */
2333 /* assumption: caller has already run dma_map_sg on command data */
2334 scsi_for_each_sg(sc
, sgde
, datasegcnt
, i
) {
2335 physaddr
= sg_dma_address(sgde
);
2336 dma_len
= sg_dma_len(sgde
);
2337 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(physaddr
));
2338 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(physaddr
));
2339 if ((i
+ 1) == datasegcnt
)
2340 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2342 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
2343 bf_set(lpfc_sli4_sge_offset
, sgl
, dma_offset
);
2344 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
2346 sgl
->sge_len
= cpu_to_le32(dma_len
);
2347 dma_offset
+= dma_len
;
2358 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2359 * @phba: The Hba for which this call is being executed.
2360 * @sc: pointer to scsi command we're working on
2361 * @sgl: pointer to buffer list for protection groups
2362 * @datacnt: number of segments of data that have been dma mapped
2363 * @protcnt: number of segment of protection data that have been dma mapped
2365 * This function sets up SGL buffer list for protection groups of
2366 * type LPFC_PG_TYPE_DIF
2368 * This is usually used when DIFs are in their own buffers,
2369 * separate from the data. The HBA can then by instructed
2370 * to place the DIFs in the outgoing stream. For read operations,
2371 * The HBA could extract the DIFs and place it in DIF buffers.
2373 * The buffer list for this type consists of one or more of the
2374 * protection groups described below:
2375 * +-------------------------+
2376 * start of first prot group --> | DISEED |
2377 * +-------------------------+
2378 * | DIF (Prot SGE) |
2379 * +-------------------------+
2381 * +-------------------------+
2382 * |more Data SGE's ... (opt)|
2383 * +-------------------------+
2384 * start of new prot group --> | DISEED |
2385 * +-------------------------+
2387 * +-------------------------+
2389 * Note: It is assumed that both data and protection s/g buffers have been
2392 * Returns the number of SGEs added to the SGL.
2395 lpfc_bg_setup_sgl_prot(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
,
2396 struct sli4_sge
*sgl
, int datacnt
, int protcnt
)
2398 struct scatterlist
*sgde
= NULL
; /* s/g data entry */
2399 struct scatterlist
*sgpe
= NULL
; /* s/g prot entry */
2400 struct sli4_sge_diseed
*diseed
= NULL
;
2401 dma_addr_t dataphysaddr
, protphysaddr
;
2402 unsigned short curr_data
= 0, curr_prot
= 0;
2403 unsigned int split_offset
;
2404 unsigned int protgroup_len
, protgroup_offset
= 0, protgroup_remainder
;
2405 unsigned int protgrp_blks
, protgrp_bytes
;
2406 unsigned int remainder
, subtotal
;
2408 unsigned char pgdone
= 0, alldone
= 0;
2413 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2416 uint32_t checking
= 1;
2417 uint32_t dma_offset
= 0;
2420 sgpe
= scsi_prot_sglist(sc
);
2421 sgde
= scsi_sglist(sc
);
2423 if (!sgpe
|| !sgde
) {
2424 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
2425 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2430 status
= lpfc_sc_to_bg_opcodes(phba
, sc
, &txop
, &rxop
);
2434 /* extract some info from the scsi command */
2435 blksize
= lpfc_cmd_blksize(sc
);
2436 reftag
= (uint32_t)scsi_get_lba(sc
); /* Truncate LBA */
2438 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2439 rc
= lpfc_bg_err_inject(phba
, sc
, &reftag
, NULL
, 1);
2441 if (rc
& BG_ERR_SWAP
)
2442 lpfc_bg_err_opcodes(phba
, sc
, &txop
, &rxop
);
2443 if (rc
& BG_ERR_CHECK
)
2450 /* Check to see if we ran out of space */
2451 if (num_sge
>= (phba
->cfg_total_seg_cnt
- 2))
2454 /* setup DISEED with what we have */
2455 diseed
= (struct sli4_sge_diseed
*) sgl
;
2456 memset(diseed
, 0, sizeof(struct sli4_sge_diseed
));
2457 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DISEED
);
2459 /* Endianness conversion if necessary */
2460 diseed
->ref_tag
= cpu_to_le32(reftag
);
2461 diseed
->ref_tag_tran
= diseed
->ref_tag
;
2463 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_GUARD
)) {
2464 bf_set(lpfc_sli4_sge_dif_ce
, diseed
, checking
);
2467 bf_set(lpfc_sli4_sge_dif_ce
, diseed
, 0);
2469 * When in this mode, the hardware will replace
2470 * the guard tag from the host with a
2471 * newly generated good CRC for the wire.
2472 * Switch to raw mode here to avoid this
2473 * behavior. What the host sends gets put on the wire.
2475 if (txop
== BG_OP_IN_CRC_OUT_CRC
) {
2476 txop
= BG_OP_RAW_MODE
;
2477 rxop
= BG_OP_RAW_MODE
;
2482 if (lpfc_cmd_protect(sc
, LPFC_CHECK_PROTECT_REF
))
2483 bf_set(lpfc_sli4_sge_dif_re
, diseed
, checking
);
2485 bf_set(lpfc_sli4_sge_dif_re
, diseed
, 0);
2487 /* setup DISEED with the rest of the info */
2488 bf_set(lpfc_sli4_sge_dif_optx
, diseed
, txop
);
2489 bf_set(lpfc_sli4_sge_dif_oprx
, diseed
, rxop
);
2491 bf_set(lpfc_sli4_sge_dif_ai
, diseed
, 1);
2492 bf_set(lpfc_sli4_sge_dif_me
, diseed
, 0);
2494 /* Endianness conversion if necessary for DISEED */
2495 diseed
->word2
= cpu_to_le32(diseed
->word2
);
2496 diseed
->word3
= cpu_to_le32(diseed
->word3
);
2498 /* advance sgl and increment bde count */
2502 /* setup the first BDE that points to protection buffer */
2503 protphysaddr
= sg_dma_address(sgpe
) + protgroup_offset
;
2504 protgroup_len
= sg_dma_len(sgpe
) - protgroup_offset
;
2506 /* must be integer multiple of the DIF block length */
2507 BUG_ON(protgroup_len
% 8);
2509 /* Now setup DIF SGE */
2511 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DIF
);
2512 sgl
->addr_hi
= le32_to_cpu(putPaddrHigh(protphysaddr
));
2513 sgl
->addr_lo
= le32_to_cpu(putPaddrLow(protphysaddr
));
2514 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2516 protgrp_blks
= protgroup_len
/ 8;
2517 protgrp_bytes
= protgrp_blks
* blksize
;
2519 /* check if DIF SGE is crossing the 4K boundary; if so split */
2520 if ((sgl
->addr_lo
& 0xfff) + protgroup_len
> 0x1000) {
2521 protgroup_remainder
= 0x1000 - (sgl
->addr_lo
& 0xfff);
2522 protgroup_offset
+= protgroup_remainder
;
2523 protgrp_blks
= protgroup_remainder
/ 8;
2524 protgrp_bytes
= protgrp_blks
* blksize
;
2526 protgroup_offset
= 0;
2532 /* setup SGE's for data blocks associated with DIF data */
2534 subtotal
= 0; /* total bytes processed for current prot grp */
2536 /* Check to see if we ran out of space */
2537 if (num_sge
>= phba
->cfg_total_seg_cnt
)
2541 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
2542 "9086 BLKGRD:%s Invalid data segment\n",
2547 dataphysaddr
= sg_dma_address(sgde
) + split_offset
;
2549 remainder
= sg_dma_len(sgde
) - split_offset
;
2551 if ((subtotal
+ remainder
) <= protgrp_bytes
) {
2552 /* we can use this whole buffer */
2553 dma_len
= remainder
;
2556 if ((subtotal
+ remainder
) == protgrp_bytes
)
2559 /* must split this buffer with next prot grp */
2560 dma_len
= protgrp_bytes
- subtotal
;
2561 split_offset
+= dma_len
;
2564 subtotal
+= dma_len
;
2566 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(dataphysaddr
));
2567 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(dataphysaddr
));
2568 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
2569 bf_set(lpfc_sli4_sge_offset
, sgl
, dma_offset
);
2570 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
2572 sgl
->sge_len
= cpu_to_le32(dma_len
);
2573 dma_offset
+= dma_len
;
2581 /* Move to the next s/g segment if possible */
2582 sgde
= sg_next(sgde
);
2585 if (protgroup_offset
) {
2586 /* update the reference tag */
2587 reftag
+= protgrp_blks
;
2593 if (curr_prot
== protcnt
) {
2594 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2596 } else if (curr_prot
< protcnt
) {
2597 /* advance to next prot buffer */
2598 sgpe
= sg_next(sgpe
);
2601 /* update the reference tag */
2602 reftag
+= protgrp_blks
;
2604 /* if we're here, we have a bug */
2605 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
2606 "9085 BLKGRD: bug in %s\n", __func__
);
2617 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2618 * @phba: The Hba for which this call is being executed.
2619 * @sc: pointer to scsi command we're working on
2621 * Given a SCSI command that supports DIF, determine composition of protection
2622 * groups involved in setting up buffer lists
2624 * Returns: Protection group type (with or without DIF)
2628 lpfc_prot_group_type(struct lpfc_hba
*phba
, struct scsi_cmnd
*sc
)
2630 int ret
= LPFC_PG_TYPE_INVALID
;
2631 unsigned char op
= scsi_get_prot_op(sc
);
2634 case SCSI_PROT_READ_STRIP
:
2635 case SCSI_PROT_WRITE_INSERT
:
2636 ret
= LPFC_PG_TYPE_NO_DIF
;
2638 case SCSI_PROT_READ_INSERT
:
2639 case SCSI_PROT_WRITE_STRIP
:
2640 case SCSI_PROT_READ_PASS
:
2641 case SCSI_PROT_WRITE_PASS
:
2642 ret
= LPFC_PG_TYPE_DIF_BUF
;
2646 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
2647 "9021 Unsupported protection op:%d\n",
2655 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2656 * @phba: The Hba for which this call is being executed.
2657 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2659 * Adjust the data length to account for how much data
2660 * is actually on the wire.
2662 * returns the adjusted data length
2665 lpfc_bg_scsi_adjust_dl(struct lpfc_hba
*phba
,
2666 struct lpfc_scsi_buf
*lpfc_cmd
)
2668 struct scsi_cmnd
*sc
= lpfc_cmd
->pCmd
;
2671 fcpdl
= scsi_bufflen(sc
);
2673 /* Check if there is protection data on the wire */
2674 if (sc
->sc_data_direction
== DMA_FROM_DEVICE
) {
2675 /* Read check for protection data */
2676 if (scsi_get_prot_op(sc
) == SCSI_PROT_READ_INSERT
)
2680 /* Write check for protection data */
2681 if (scsi_get_prot_op(sc
) == SCSI_PROT_WRITE_STRIP
)
2686 * If we are in DIF Type 1 mode every data block has a 8 byte
2687 * DIF (trailer) attached to it. Must ajust FCP data length
2688 * to account for the protection data.
2690 fcpdl
+= (fcpdl
/ lpfc_cmd_blksize(sc
)) * 8;
2696 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2697 * @phba: The Hba for which this call is being executed.
2698 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2700 * This is the protection/DIF aware version of
2701 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2702 * two functions eventually, but for now, it's here
2705 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba
*phba
,
2706 struct lpfc_scsi_buf
*lpfc_cmd
)
2708 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
2709 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
2710 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
2711 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
2712 uint32_t num_bde
= 0;
2713 int datasegcnt
, protsegcnt
, datadir
= scsi_cmnd
->sc_data_direction
;
2714 int prot_group_type
= 0;
2718 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2719 * fcp_rsp regions to the first data bde entry
2722 if (scsi_sg_count(scsi_cmnd
)) {
2724 * The driver stores the segment count returned from pci_map_sg
2725 * because this a count of dma-mappings used to map the use_sg
2726 * pages. They are not guaranteed to be the same for those
2727 * architectures that implement an IOMMU.
2729 datasegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
2730 scsi_sglist(scsi_cmnd
),
2731 scsi_sg_count(scsi_cmnd
), datadir
);
2732 if (unlikely(!datasegcnt
))
2735 lpfc_cmd
->seg_cnt
= datasegcnt
;
2737 /* First check if data segment count from SCSI Layer is good */
2738 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
)
2741 prot_group_type
= lpfc_prot_group_type(phba
, scsi_cmnd
);
2743 switch (prot_group_type
) {
2744 case LPFC_PG_TYPE_NO_DIF
:
2746 /* Here we need to add a PDE5 and PDE6 to the count */
2747 if ((lpfc_cmd
->seg_cnt
+ 2) > phba
->cfg_total_seg_cnt
)
2750 num_bde
= lpfc_bg_setup_bpl(phba
, scsi_cmnd
, bpl
,
2752 /* we should have 2 or more entries in buffer list */
2757 case LPFC_PG_TYPE_DIF_BUF
:
2759 * This type indicates that protection buffers are
2760 * passed to the driver, so that needs to be prepared
2763 protsegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
2764 scsi_prot_sglist(scsi_cmnd
),
2765 scsi_prot_sg_count(scsi_cmnd
), datadir
);
2766 if (unlikely(!protsegcnt
)) {
2767 scsi_dma_unmap(scsi_cmnd
);
2771 lpfc_cmd
->prot_seg_cnt
= protsegcnt
;
2774 * There is a minimun of 4 BPLs used for every
2775 * protection data segment.
2777 if ((lpfc_cmd
->prot_seg_cnt
* 4) >
2778 (phba
->cfg_total_seg_cnt
- 2))
2781 num_bde
= lpfc_bg_setup_bpl_prot(phba
, scsi_cmnd
, bpl
,
2782 datasegcnt
, protsegcnt
);
2783 /* we should have 3 or more entries in buffer list */
2784 if ((num_bde
< 3) ||
2785 (num_bde
> phba
->cfg_total_seg_cnt
))
2789 case LPFC_PG_TYPE_INVALID
:
2791 scsi_dma_unmap(scsi_cmnd
);
2792 lpfc_cmd
->seg_cnt
= 0;
2794 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
2795 "9022 Unexpected protection group %i\n",
2802 * Finish initializing those IOCB fields that are dependent on the
2803 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2804 * reinitialized since all iocb memory resources are used many times
2805 * for transmit, receive, and continuation bpl's.
2807 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof(struct ulp_bde64
));
2808 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+= (num_bde
* sizeof(struct ulp_bde64
));
2809 iocb_cmd
->ulpBdeCount
= 1;
2810 iocb_cmd
->ulpLe
= 1;
2812 fcpdl
= lpfc_bg_scsi_adjust_dl(phba
, lpfc_cmd
);
2813 fcp_cmnd
->fcpDl
= be32_to_cpu(fcpdl
);
2816 * Due to difference in data length between DIF/non-DIF paths,
2817 * we need to set word 4 of IOCB here
2819 iocb_cmd
->un
.fcpi
.fcpi_parm
= fcpdl
;
2823 if (lpfc_cmd
->seg_cnt
)
2824 scsi_dma_unmap(scsi_cmnd
);
2825 if (lpfc_cmd
->prot_seg_cnt
)
2826 dma_unmap_sg(&phba
->pcidev
->dev
, scsi_prot_sglist(scsi_cmnd
),
2827 scsi_prot_sg_count(scsi_cmnd
),
2828 scsi_cmnd
->sc_data_direction
);
2830 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
2831 "9023 Cannot setup S/G List for HBA"
2832 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2833 lpfc_cmd
->seg_cnt
, lpfc_cmd
->prot_seg_cnt
,
2834 phba
->cfg_total_seg_cnt
, phba
->cfg_sg_seg_cnt
,
2835 prot_group_type
, num_bde
);
2837 lpfc_cmd
->seg_cnt
= 0;
2838 lpfc_cmd
->prot_seg_cnt
= 0;
2843 * This function calcuates the T10 DIF guard tag
2844 * on the specified data using a CRC algorithmn
2848 lpfc_bg_crc(uint8_t *data
, int count
)
2853 crc
= crc_t10dif(data
, count
);
2854 x
= cpu_to_be16(crc
);
2859 * This function calcuates the T10 DIF guard tag
2860 * on the specified data using a CSUM algorithmn
2861 * using ip_compute_csum.
2864 lpfc_bg_csum(uint8_t *data
, int count
)
2868 ret
= ip_compute_csum(data
, count
);
2873 * This function examines the protection data to try to determine
2874 * what type of T10-DIF error occurred.
2877 lpfc_calc_bg_err(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
2879 struct scatterlist
*sgpe
; /* s/g prot entry */
2880 struct scatterlist
*sgde
; /* s/g data entry */
2881 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
2882 struct scsi_dif_tuple
*src
= NULL
;
2883 uint8_t *data_src
= NULL
;
2885 uint16_t start_app_tag
, app_tag
;
2886 uint32_t start_ref_tag
, ref_tag
;
2887 int prot
, protsegcnt
;
2888 int err_type
, len
, data_len
;
2889 int chk_ref
, chk_app
, chk_guard
;
2893 err_type
= BGS_GUARD_ERR_MASK
;
2897 /* First check to see if there is protection data to examine */
2898 prot
= scsi_get_prot_op(cmd
);
2899 if ((prot
== SCSI_PROT_READ_STRIP
) ||
2900 (prot
== SCSI_PROT_WRITE_INSERT
) ||
2901 (prot
== SCSI_PROT_NORMAL
))
2904 /* Currently the driver just supports ref_tag and guard_tag checking */
2909 /* Setup a ptr to the protection data provided by the SCSI host */
2910 sgpe
= scsi_prot_sglist(cmd
);
2911 protsegcnt
= lpfc_cmd
->prot_seg_cnt
;
2913 if (sgpe
&& protsegcnt
) {
2916 * We will only try to verify guard tag if the segment
2917 * data length is a multiple of the blksize.
2919 sgde
= scsi_sglist(cmd
);
2920 blksize
= lpfc_cmd_blksize(cmd
);
2921 data_src
= (uint8_t *)sg_virt(sgde
);
2922 data_len
= sgde
->length
;
2923 if ((data_len
& (blksize
- 1)) == 0)
2926 src
= (struct scsi_dif_tuple
*)sg_virt(sgpe
);
2927 start_ref_tag
= (uint32_t)scsi_get_lba(cmd
); /* Truncate LBA */
2928 start_app_tag
= src
->app_tag
;
2930 while (src
&& protsegcnt
) {
2934 * First check to see if a protection data
2937 if ((src
->ref_tag
== 0xffffffff) ||
2938 (src
->app_tag
== 0xffff)) {
2943 /* First Guard Tag checking */
2945 guard_tag
= src
->guard_tag
;
2946 if (lpfc_cmd_guard_csum(cmd
))
2947 sum
= lpfc_bg_csum(data_src
,
2950 sum
= lpfc_bg_crc(data_src
,
2952 if ((guard_tag
!= sum
)) {
2953 err_type
= BGS_GUARD_ERR_MASK
;
2958 /* Reference Tag checking */
2959 ref_tag
= be32_to_cpu(src
->ref_tag
);
2960 if (chk_ref
&& (ref_tag
!= start_ref_tag
)) {
2961 err_type
= BGS_REFTAG_ERR_MASK
;
2966 /* App Tag checking */
2967 app_tag
= src
->app_tag
;
2968 if (chk_app
&& (app_tag
!= start_app_tag
)) {
2969 err_type
= BGS_APPTAG_ERR_MASK
;
2973 len
-= sizeof(struct scsi_dif_tuple
);
2978 data_src
+= blksize
;
2979 data_len
-= blksize
;
2982 * Are we at the end of the Data segment?
2983 * The data segment is only used for Guard
2986 if (chk_guard
&& (data_len
== 0)) {
2988 sgde
= sg_next(sgde
);
2992 data_src
= (uint8_t *)sg_virt(sgde
);
2993 data_len
= sgde
->length
;
2994 if ((data_len
& (blksize
- 1)) == 0)
2999 /* Goto the next Protection data segment */
3000 sgpe
= sg_next(sgpe
);
3002 src
= (struct scsi_dif_tuple
*)sg_virt(sgpe
);
3011 if (err_type
== BGS_GUARD_ERR_MASK
) {
3012 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
3014 cmd
->result
= DRIVER_SENSE
<< 24
3015 | ScsiResult(DID_ABORT
, SAM_STAT_CHECK_CONDITION
);
3016 phba
->bg_guard_err_cnt
++;
3017 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3018 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3019 (unsigned long)scsi_get_lba(cmd
),
3022 } else if (err_type
== BGS_REFTAG_ERR_MASK
) {
3023 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
3025 cmd
->result
= DRIVER_SENSE
<< 24
3026 | ScsiResult(DID_ABORT
, SAM_STAT_CHECK_CONDITION
);
3028 phba
->bg_reftag_err_cnt
++;
3029 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3030 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3031 (unsigned long)scsi_get_lba(cmd
),
3032 ref_tag
, start_ref_tag
);
3034 } else if (err_type
== BGS_APPTAG_ERR_MASK
) {
3035 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
3037 cmd
->result
= DRIVER_SENSE
<< 24
3038 | ScsiResult(DID_ABORT
, SAM_STAT_CHECK_CONDITION
);
3040 phba
->bg_apptag_err_cnt
++;
3041 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3042 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3043 (unsigned long)scsi_get_lba(cmd
),
3044 app_tag
, start_app_tag
);
3050 * This function checks for BlockGuard errors detected by
3051 * the HBA. In case of errors, the ASC/ASCQ fields in the
3052 * sense buffer will be set accordingly, paired with
3053 * ILLEGAL_REQUEST to signal to the kernel that the HBA
3054 * detected corruption.
3057 * 0 - No error found
3058 * 1 - BlockGuard error found
3059 * -1 - Internal error (bad profile, ...etc)
3062 lpfc_parse_bg_err(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
,
3063 struct lpfc_iocbq
*pIocbOut
)
3065 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
3066 struct sli3_bg_fields
*bgf
= &pIocbOut
->iocb
.unsli3
.sli3_bg
;
3068 uint32_t bghm
= bgf
->bghm
;
3069 uint32_t bgstat
= bgf
->bgstat
;
3070 uint64_t failing_sector
= 0;
3072 spin_lock(&_dump_buf_lock
);
3073 if (!_dump_buf_done
) {
3074 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
, "9070 BLKGRD: Saving"
3075 " Data for %u blocks to debugfs\n",
3076 (cmd
->cmnd
[7] << 8 | cmd
->cmnd
[8]));
3077 lpfc_debug_save_data(phba
, cmd
);
3079 /* If we have a prot sgl, save the DIF buffer */
3080 if (lpfc_prot_group_type(phba
, cmd
) ==
3081 LPFC_PG_TYPE_DIF_BUF
) {
3082 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
, "9071 BLKGRD: "
3083 "Saving DIF for %u blocks to debugfs\n",
3084 (cmd
->cmnd
[7] << 8 | cmd
->cmnd
[8]));
3085 lpfc_debug_save_dif(phba
, cmd
);
3090 spin_unlock(&_dump_buf_lock
);
3092 if (lpfc_bgs_get_invalid_prof(bgstat
)) {
3093 cmd
->result
= ScsiResult(DID_ERROR
, 0);
3094 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3095 "9072 BLKGRD: Invalid BG Profile in cmd"
3096 " 0x%x lba 0x%llx blk cnt 0x%x "
3097 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3098 (unsigned long long)scsi_get_lba(cmd
),
3099 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3104 if (lpfc_bgs_get_uninit_dif_block(bgstat
)) {
3105 cmd
->result
= ScsiResult(DID_ERROR
, 0);
3106 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3107 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
3108 " 0x%x lba 0x%llx blk cnt 0x%x "
3109 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3110 (unsigned long long)scsi_get_lba(cmd
),
3111 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3116 if (lpfc_bgs_get_guard_err(bgstat
)) {
3119 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
3121 cmd
->result
= DRIVER_SENSE
<< 24
3122 | ScsiResult(DID_ABORT
, SAM_STAT_CHECK_CONDITION
);
3123 phba
->bg_guard_err_cnt
++;
3124 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3125 "9055 BLKGRD: Guard Tag error in cmd"
3126 " 0x%x lba 0x%llx blk cnt 0x%x "
3127 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3128 (unsigned long long)scsi_get_lba(cmd
),
3129 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3132 if (lpfc_bgs_get_reftag_err(bgstat
)) {
3135 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
3137 cmd
->result
= DRIVER_SENSE
<< 24
3138 | ScsiResult(DID_ABORT
, SAM_STAT_CHECK_CONDITION
);
3140 phba
->bg_reftag_err_cnt
++;
3141 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3142 "9056 BLKGRD: Ref Tag error in cmd"
3143 " 0x%x lba 0x%llx blk cnt 0x%x "
3144 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3145 (unsigned long long)scsi_get_lba(cmd
),
3146 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3149 if (lpfc_bgs_get_apptag_err(bgstat
)) {
3152 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
3154 cmd
->result
= DRIVER_SENSE
<< 24
3155 | ScsiResult(DID_ABORT
, SAM_STAT_CHECK_CONDITION
);
3157 phba
->bg_apptag_err_cnt
++;
3158 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3159 "9061 BLKGRD: App Tag error in cmd"
3160 " 0x%x lba 0x%llx blk cnt 0x%x "
3161 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3162 (unsigned long long)scsi_get_lba(cmd
),
3163 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3166 if (lpfc_bgs_get_hi_water_mark_present(bgstat
)) {
3168 * setup sense data descriptor 0 per SPC-4 as an information
3169 * field, and put the failing LBA in it.
3170 * This code assumes there was also a guard/app/ref tag error
3173 cmd
->sense_buffer
[7] = 0xc; /* Additional sense length */
3174 cmd
->sense_buffer
[8] = 0; /* Information descriptor type */
3175 cmd
->sense_buffer
[9] = 0xa; /* Additional descriptor length */
3176 cmd
->sense_buffer
[10] = 0x80; /* Validity bit */
3178 /* bghm is a "on the wire" FC frame based count */
3179 switch (scsi_get_prot_op(cmd
)) {
3180 case SCSI_PROT_READ_INSERT
:
3181 case SCSI_PROT_WRITE_STRIP
:
3182 bghm
/= cmd
->device
->sector_size
;
3184 case SCSI_PROT_READ_STRIP
:
3185 case SCSI_PROT_WRITE_INSERT
:
3186 case SCSI_PROT_READ_PASS
:
3187 case SCSI_PROT_WRITE_PASS
:
3188 bghm
/= (cmd
->device
->sector_size
+
3189 sizeof(struct scsi_dif_tuple
));
3193 failing_sector
= scsi_get_lba(cmd
);
3194 failing_sector
+= bghm
;
3196 /* Descriptor Information */
3197 put_unaligned_be64(failing_sector
, &cmd
->sense_buffer
[12]);
3201 /* No error was reported - problem in FW? */
3202 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_BG
,
3203 "9057 BLKGRD: Unknown error in cmd"
3204 " 0x%x lba 0x%llx blk cnt 0x%x "
3205 "bgstat=x%x bghm=x%x\n", cmd
->cmnd
[0],
3206 (unsigned long long)scsi_get_lba(cmd
),
3207 blk_rq_sectors(cmd
->request
), bgstat
, bghm
);
3209 /* Calcuate what type of error it was */
3210 lpfc_calc_bg_err(phba
, lpfc_cmd
);
3217 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3218 * @phba: The Hba for which this call is being executed.
3219 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3221 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3222 * field of @lpfc_cmd for device with SLI-4 interface spec.
3229 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
3231 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
3232 struct scatterlist
*sgel
= NULL
;
3233 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
3234 struct sli4_sge
*sgl
= (struct sli4_sge
*)lpfc_cmd
->fcp_bpl
;
3235 struct sli4_sge
*first_data_sgl
;
3236 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
3237 dma_addr_t physaddr
;
3238 uint32_t num_bde
= 0;
3240 uint32_t dma_offset
= 0;
3242 struct ulp_bde64
*bde
;
3245 * There are three possibilities here - use scatter-gather segment, use
3246 * the single mapping, or neither. Start the lpfc command prep by
3247 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3250 if (scsi_sg_count(scsi_cmnd
)) {
3252 * The driver stores the segment count returned from pci_map_sg
3253 * because this a count of dma-mappings used to map the use_sg
3254 * pages. They are not guaranteed to be the same for those
3255 * architectures that implement an IOMMU.
3258 nseg
= scsi_dma_map(scsi_cmnd
);
3259 if (unlikely(nseg
<= 0))
3262 /* clear the last flag in the fcp_rsp map entry */
3263 sgl
->word2
= le32_to_cpu(sgl
->word2
);
3264 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
3265 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3267 first_data_sgl
= sgl
;
3268 lpfc_cmd
->seg_cnt
= nseg
;
3269 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
3270 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
, "9074 BLKGRD:"
3271 " %s: Too many sg segments from "
3272 "dma_map_sg. Config %d, seg_cnt %d\n",
3273 __func__
, phba
->cfg_sg_seg_cnt
,
3275 lpfc_cmd
->seg_cnt
= 0;
3276 scsi_dma_unmap(scsi_cmnd
);
3281 * The driver established a maximum scatter-gather segment count
3282 * during probe that limits the number of sg elements in any
3283 * single scsi command. Just run through the seg_cnt and format
3285 * When using SLI-3 the driver will try to fit all the BDEs into
3286 * the IOCB. If it can't then the BDEs get added to a BPL as it
3287 * does for SLI-2 mode.
3289 scsi_for_each_sg(scsi_cmnd
, sgel
, nseg
, num_bde
) {
3290 physaddr
= sg_dma_address(sgel
);
3291 dma_len
= sg_dma_len(sgel
);
3292 sgl
->addr_lo
= cpu_to_le32(putPaddrLow(physaddr
));
3293 sgl
->addr_hi
= cpu_to_le32(putPaddrHigh(physaddr
));
3294 sgl
->word2
= le32_to_cpu(sgl
->word2
);
3295 if ((num_bde
+ 1) == nseg
)
3296 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
3298 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
3299 bf_set(lpfc_sli4_sge_offset
, sgl
, dma_offset
);
3300 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
3301 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3302 sgl
->sge_len
= cpu_to_le32(dma_len
);
3303 dma_offset
+= dma_len
;
3306 /* setup the performance hint (first data BDE) if enabled */
3307 if (phba
->sli3_options
& LPFC_SLI4_PERFH_ENABLED
) {
3308 bde
= (struct ulp_bde64
*)
3309 &(iocb_cmd
->unsli3
.sli3Words
[5]);
3310 bde
->addrLow
= first_data_sgl
->addr_lo
;
3311 bde
->addrHigh
= first_data_sgl
->addr_hi
;
3312 bde
->tus
.f
.bdeSize
=
3313 le32_to_cpu(first_data_sgl
->sge_len
);
3314 bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
3315 bde
->tus
.w
= cpu_to_le32(bde
->tus
.w
);
3319 /* clear the last flag in the fcp_rsp map entry */
3320 sgl
->word2
= le32_to_cpu(sgl
->word2
);
3321 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
3322 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3326 * Finish initializing those IOCB fields that are dependent on the
3327 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3328 * explicitly reinitialized.
3329 * all iocb memory resources are reused.
3331 fcp_cmnd
->fcpDl
= cpu_to_be32(scsi_bufflen(scsi_cmnd
));
3334 * Due to difference in data length between DIF/non-DIF paths,
3335 * we need to set word 4 of IOCB here
3337 iocb_cmd
->un
.fcpi
.fcpi_parm
= scsi_bufflen(scsi_cmnd
);
3340 * If the OAS driver feature is enabled and the lun is enabled for
3341 * OAS, set the oas iocb related flags.
3343 if ((phba
->cfg_fof
) && ((struct lpfc_device_data
*)
3344 scsi_cmnd
->device
->hostdata
)->oas_enabled
) {
3345 lpfc_cmd
->cur_iocbq
.iocb_flag
|= (LPFC_IO_OAS
| LPFC_IO_FOF
);
3346 lpfc_cmd
->cur_iocbq
.priority
= ((struct lpfc_device_data
*)
3347 scsi_cmnd
->device
->hostdata
)->priority
;
3353 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3354 * @phba: The Hba for which this call is being executed.
3355 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3357 * This is the protection/DIF aware version of
3358 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3359 * two functions eventually, but for now, it's here
3362 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba
*phba
,
3363 struct lpfc_scsi_buf
*lpfc_cmd
)
3365 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
3366 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
3367 struct sli4_sge
*sgl
= (struct sli4_sge
*)(lpfc_cmd
->fcp_bpl
);
3368 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
3369 uint32_t num_sge
= 0;
3370 int datasegcnt
, protsegcnt
, datadir
= scsi_cmnd
->sc_data_direction
;
3371 int prot_group_type
= 0;
3375 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3376 * fcp_rsp regions to the first data sge entry
3378 if (scsi_sg_count(scsi_cmnd
)) {
3380 * The driver stores the segment count returned from pci_map_sg
3381 * because this a count of dma-mappings used to map the use_sg
3382 * pages. They are not guaranteed to be the same for those
3383 * architectures that implement an IOMMU.
3385 datasegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
3386 scsi_sglist(scsi_cmnd
),
3387 scsi_sg_count(scsi_cmnd
), datadir
);
3388 if (unlikely(!datasegcnt
))
3392 /* clear the last flag in the fcp_rsp map entry */
3393 sgl
->word2
= le32_to_cpu(sgl
->word2
);
3394 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
3395 sgl
->word2
= cpu_to_le32(sgl
->word2
);
3398 lpfc_cmd
->seg_cnt
= datasegcnt
;
3400 /* First check if data segment count from SCSI Layer is good */
3401 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
)
3404 prot_group_type
= lpfc_prot_group_type(phba
, scsi_cmnd
);
3406 switch (prot_group_type
) {
3407 case LPFC_PG_TYPE_NO_DIF
:
3408 /* Here we need to add a DISEED to the count */
3409 if ((lpfc_cmd
->seg_cnt
+ 1) > phba
->cfg_total_seg_cnt
)
3412 num_sge
= lpfc_bg_setup_sgl(phba
, scsi_cmnd
, sgl
,
3415 /* we should have 2 or more entries in buffer list */
3420 case LPFC_PG_TYPE_DIF_BUF
:
3422 * This type indicates that protection buffers are
3423 * passed to the driver, so that needs to be prepared
3426 protsegcnt
= dma_map_sg(&phba
->pcidev
->dev
,
3427 scsi_prot_sglist(scsi_cmnd
),
3428 scsi_prot_sg_count(scsi_cmnd
), datadir
);
3429 if (unlikely(!protsegcnt
)) {
3430 scsi_dma_unmap(scsi_cmnd
);
3434 lpfc_cmd
->prot_seg_cnt
= protsegcnt
;
3436 * There is a minimun of 3 SGEs used for every
3437 * protection data segment.
3439 if ((lpfc_cmd
->prot_seg_cnt
* 3) >
3440 (phba
->cfg_total_seg_cnt
- 2))
3443 num_sge
= lpfc_bg_setup_sgl_prot(phba
, scsi_cmnd
, sgl
,
3444 datasegcnt
, protsegcnt
);
3446 /* we should have 3 or more entries in buffer list */
3447 if ((num_sge
< 3) ||
3448 (num_sge
> phba
->cfg_total_seg_cnt
))
3452 case LPFC_PG_TYPE_INVALID
:
3454 scsi_dma_unmap(scsi_cmnd
);
3455 lpfc_cmd
->seg_cnt
= 0;
3457 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
3458 "9083 Unexpected protection group %i\n",
3464 switch (scsi_get_prot_op(scsi_cmnd
)) {
3465 case SCSI_PROT_WRITE_STRIP
:
3466 case SCSI_PROT_READ_STRIP
:
3467 lpfc_cmd
->cur_iocbq
.iocb_flag
|= LPFC_IO_DIF_STRIP
;
3469 case SCSI_PROT_WRITE_INSERT
:
3470 case SCSI_PROT_READ_INSERT
:
3471 lpfc_cmd
->cur_iocbq
.iocb_flag
|= LPFC_IO_DIF_INSERT
;
3473 case SCSI_PROT_WRITE_PASS
:
3474 case SCSI_PROT_READ_PASS
:
3475 lpfc_cmd
->cur_iocbq
.iocb_flag
|= LPFC_IO_DIF_PASS
;
3479 fcpdl
= lpfc_bg_scsi_adjust_dl(phba
, lpfc_cmd
);
3480 fcp_cmnd
->fcpDl
= be32_to_cpu(fcpdl
);
3483 * Due to difference in data length between DIF/non-DIF paths,
3484 * we need to set word 4 of IOCB here
3486 iocb_cmd
->un
.fcpi
.fcpi_parm
= fcpdl
;
3489 * If the OAS driver feature is enabled and the lun is enabled for
3490 * OAS, set the oas iocb related flags.
3492 if ((phba
->cfg_fof
) && ((struct lpfc_device_data
*)
3493 scsi_cmnd
->device
->hostdata
)->oas_enabled
)
3494 lpfc_cmd
->cur_iocbq
.iocb_flag
|= (LPFC_IO_OAS
| LPFC_IO_FOF
);
3498 if (lpfc_cmd
->seg_cnt
)
3499 scsi_dma_unmap(scsi_cmnd
);
3500 if (lpfc_cmd
->prot_seg_cnt
)
3501 dma_unmap_sg(&phba
->pcidev
->dev
, scsi_prot_sglist(scsi_cmnd
),
3502 scsi_prot_sg_count(scsi_cmnd
),
3503 scsi_cmnd
->sc_data_direction
);
3505 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
3506 "9084 Cannot setup S/G List for HBA"
3507 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3508 lpfc_cmd
->seg_cnt
, lpfc_cmd
->prot_seg_cnt
,
3509 phba
->cfg_total_seg_cnt
, phba
->cfg_sg_seg_cnt
,
3510 prot_group_type
, num_sge
);
3512 lpfc_cmd
->seg_cnt
= 0;
3513 lpfc_cmd
->prot_seg_cnt
= 0;
3518 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3519 * @phba: The Hba for which this call is being executed.
3520 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3522 * This routine wraps the actual DMA mapping function pointer from the
3530 lpfc_scsi_prep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
3532 return phba
->lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
3536 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3538 * @phba: The Hba for which this call is being executed.
3539 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3541 * This routine wraps the actual DMA mapping function pointer from the
3549 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
3551 return phba
->lpfc_bg_scsi_prep_dma_buf(phba
, lpfc_cmd
);
3555 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3556 * @phba: Pointer to hba context object.
3557 * @vport: Pointer to vport object.
3558 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3559 * @rsp_iocb: Pointer to response iocb object which reported error.
3561 * This function posts an event when there is a SCSI command reporting
3562 * error from the scsi device.
3565 lpfc_send_scsi_error_event(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
,
3566 struct lpfc_scsi_buf
*lpfc_cmd
, struct lpfc_iocbq
*rsp_iocb
) {
3567 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
3568 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
3569 uint32_t resp_info
= fcprsp
->rspStatus2
;
3570 uint32_t scsi_status
= fcprsp
->rspStatus3
;
3571 uint32_t fcpi_parm
= rsp_iocb
->iocb
.un
.fcpi
.fcpi_parm
;
3572 struct lpfc_fast_path_event
*fast_path_evt
= NULL
;
3573 struct lpfc_nodelist
*pnode
= lpfc_cmd
->rdata
->pnode
;
3574 unsigned long flags
;
3576 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
))
3579 /* If there is queuefull or busy condition send a scsi event */
3580 if ((cmnd
->result
== SAM_STAT_TASK_SET_FULL
) ||
3581 (cmnd
->result
== SAM_STAT_BUSY
)) {
3582 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
3585 fast_path_evt
->un
.scsi_evt
.event_type
=
3587 fast_path_evt
->un
.scsi_evt
.subcategory
=
3588 (cmnd
->result
== SAM_STAT_TASK_SET_FULL
) ?
3589 LPFC_EVENT_QFULL
: LPFC_EVENT_DEVBSY
;
3590 fast_path_evt
->un
.scsi_evt
.lun
= cmnd
->device
->lun
;
3591 memcpy(&fast_path_evt
->un
.scsi_evt
.wwpn
,
3592 &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
3593 memcpy(&fast_path_evt
->un
.scsi_evt
.wwnn
,
3594 &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
3595 } else if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
&&
3596 ((cmnd
->cmnd
[0] == READ_10
) || (cmnd
->cmnd
[0] == WRITE_10
))) {
3597 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
3600 fast_path_evt
->un
.check_cond_evt
.scsi_event
.event_type
=
3602 fast_path_evt
->un
.check_cond_evt
.scsi_event
.subcategory
=
3603 LPFC_EVENT_CHECK_COND
;
3604 fast_path_evt
->un
.check_cond_evt
.scsi_event
.lun
=
3606 memcpy(&fast_path_evt
->un
.check_cond_evt
.scsi_event
.wwpn
,
3607 &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
3608 memcpy(&fast_path_evt
->un
.check_cond_evt
.scsi_event
.wwnn
,
3609 &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
3610 fast_path_evt
->un
.check_cond_evt
.sense_key
=
3611 cmnd
->sense_buffer
[2] & 0xf;
3612 fast_path_evt
->un
.check_cond_evt
.asc
= cmnd
->sense_buffer
[12];
3613 fast_path_evt
->un
.check_cond_evt
.ascq
= cmnd
->sense_buffer
[13];
3614 } else if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
3616 ((be32_to_cpu(fcprsp
->rspResId
) != fcpi_parm
) ||
3617 ((scsi_status
== SAM_STAT_GOOD
) &&
3618 !(resp_info
& (RESID_UNDER
| RESID_OVER
))))) {
3620 * If status is good or resid does not match with fcp_param and
3621 * there is valid fcpi_parm, then there is a read_check error
3623 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
3626 fast_path_evt
->un
.read_check_error
.header
.event_type
=
3627 FC_REG_FABRIC_EVENT
;
3628 fast_path_evt
->un
.read_check_error
.header
.subcategory
=
3629 LPFC_EVENT_FCPRDCHKERR
;
3630 memcpy(&fast_path_evt
->un
.read_check_error
.header
.wwpn
,
3631 &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
3632 memcpy(&fast_path_evt
->un
.read_check_error
.header
.wwnn
,
3633 &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
3634 fast_path_evt
->un
.read_check_error
.lun
= cmnd
->device
->lun
;
3635 fast_path_evt
->un
.read_check_error
.opcode
= cmnd
->cmnd
[0];
3636 fast_path_evt
->un
.read_check_error
.fcpiparam
=
3641 fast_path_evt
->vport
= vport
;
3642 spin_lock_irqsave(&phba
->hbalock
, flags
);
3643 list_add_tail(&fast_path_evt
->work_evt
.evt_listp
, &phba
->work_list
);
3644 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3645 lpfc_worker_wake_up(phba
);
3650 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3651 * @phba: The HBA for which this call is being executed.
3652 * @psb: The scsi buffer which is going to be un-mapped.
3654 * This routine does DMA un-mapping of scatter gather list of scsi command
3655 * field of @lpfc_cmd for device with SLI-3 interface spec.
3658 lpfc_scsi_unprep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
3661 * There are only two special cases to consider. (1) the scsi command
3662 * requested scatter-gather usage or (2) the scsi command allocated
3663 * a request buffer, but did not request use_sg. There is a third
3664 * case, but it does not require resource deallocation.
3666 if (psb
->seg_cnt
> 0)
3667 scsi_dma_unmap(psb
->pCmd
);
3668 if (psb
->prot_seg_cnt
> 0)
3669 dma_unmap_sg(&phba
->pcidev
->dev
, scsi_prot_sglist(psb
->pCmd
),
3670 scsi_prot_sg_count(psb
->pCmd
),
3671 psb
->pCmd
->sc_data_direction
);
3675 * lpfc_handler_fcp_err - FCP response handler
3676 * @vport: The virtual port for which this call is being executed.
3677 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3678 * @rsp_iocb: The response IOCB which contains FCP error.
3680 * This routine is called to process response IOCB with status field
3681 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3682 * based upon SCSI and FCP error.
3685 lpfc_handle_fcp_err(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
3686 struct lpfc_iocbq
*rsp_iocb
)
3688 struct lpfc_hba
*phba
= vport
->phba
;
3689 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
3690 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
3691 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
3692 uint32_t fcpi_parm
= rsp_iocb
->iocb
.un
.fcpi
.fcpi_parm
;
3693 uint32_t resp_info
= fcprsp
->rspStatus2
;
3694 uint32_t scsi_status
= fcprsp
->rspStatus3
;
3696 uint32_t host_status
= DID_OK
;
3697 uint32_t rsplen
= 0;
3699 uint32_t logit
= LOG_FCP
| LOG_FCP_ERROR
;
3703 * If this is a task management command, there is no
3704 * scsi packet associated with this lpfc_cmd. The driver
3707 if (fcpcmd
->fcpCntl2
) {
3712 if (resp_info
& RSP_LEN_VALID
) {
3713 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
3714 if (rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) {
3715 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3716 "2719 Invalid response length: "
3717 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3719 cmnd
->device
->lun
, cmnd
->cmnd
[0],
3721 host_status
= DID_ERROR
;
3724 if (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
) {
3725 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
3726 "2757 Protocol failure detected during "
3727 "processing of FCP I/O op: "
3728 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3730 cmnd
->device
->lun
, cmnd
->cmnd
[0],
3732 host_status
= DID_ERROR
;
3737 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
3738 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
3739 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
3740 snslen
= SCSI_SENSE_BUFFERSIZE
;
3742 if (resp_info
& RSP_LEN_VALID
)
3743 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
3744 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
3746 lp
= (uint32_t *)cmnd
->sense_buffer
;
3748 /* special handling for under run conditions */
3749 if (!scsi_status
&& (resp_info
& RESID_UNDER
)) {
3750 /* don't log under runs if fcp set... */
3751 if (vport
->cfg_log_verbose
& LOG_FCP
)
3752 logit
= LOG_FCP_ERROR
;
3753 /* unless operator says so */
3754 if (vport
->cfg_log_verbose
& LOG_FCP_UNDER
)
3755 logit
= LOG_FCP_UNDER
;
3758 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
3759 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3760 "Data: x%x x%x x%x x%x x%x\n",
3761 cmnd
->cmnd
[0], scsi_status
,
3762 be32_to_cpu(*lp
), be32_to_cpu(*(lp
+ 3)), resp_info
,
3763 be32_to_cpu(fcprsp
->rspResId
),
3764 be32_to_cpu(fcprsp
->rspSnsLen
),
3765 be32_to_cpu(fcprsp
->rspRspLen
),
3768 scsi_set_resid(cmnd
, 0);
3769 fcpDl
= be32_to_cpu(fcpcmd
->fcpDl
);
3770 if (resp_info
& RESID_UNDER
) {
3771 scsi_set_resid(cmnd
, be32_to_cpu(fcprsp
->rspResId
));
3773 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP_UNDER
,
3774 "9025 FCP Read Underrun, expected %d, "
3775 "residual %d Data: x%x x%x x%x\n",
3777 scsi_get_resid(cmnd
), fcpi_parm
, cmnd
->cmnd
[0],
3781 * If there is an under run check if under run reported by
3782 * storage array is same as the under run reported by HBA.
3783 * If this is not same, there is a dropped frame.
3785 if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
3787 (scsi_get_resid(cmnd
) != fcpi_parm
)) {
3788 lpfc_printf_vlog(vport
, KERN_WARNING
,
3789 LOG_FCP
| LOG_FCP_ERROR
,
3790 "9026 FCP Read Check Error "
3791 "and Underrun Data: x%x x%x x%x x%x\n",
3793 scsi_get_resid(cmnd
), fcpi_parm
,
3795 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
3796 host_status
= DID_ERROR
;
3799 * The cmnd->underflow is the minimum number of bytes that must
3800 * be transferred for this command. Provided a sense condition
3801 * is not present, make sure the actual amount transferred is at
3802 * least the underflow value or fail.
3804 if (!(resp_info
& SNS_LEN_VALID
) &&
3805 (scsi_status
== SAM_STAT_GOOD
) &&
3806 (scsi_bufflen(cmnd
) - scsi_get_resid(cmnd
)
3807 < cmnd
->underflow
)) {
3808 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
3809 "9027 FCP command x%x residual "
3810 "underrun converted to error "
3811 "Data: x%x x%x x%x\n",
3812 cmnd
->cmnd
[0], scsi_bufflen(cmnd
),
3813 scsi_get_resid(cmnd
), cmnd
->underflow
);
3814 host_status
= DID_ERROR
;
3816 } else if (resp_info
& RESID_OVER
) {
3817 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
3818 "9028 FCP command x%x residual overrun error. "
3819 "Data: x%x x%x\n", cmnd
->cmnd
[0],
3820 scsi_bufflen(cmnd
), scsi_get_resid(cmnd
));
3821 host_status
= DID_ERROR
;
3824 * Check SLI validation that all the transfer was actually done
3825 * (fcpi_parm should be zero). Apply check only to reads.
3827 } else if (fcpi_parm
) {
3828 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
| LOG_FCP_ERROR
,
3829 "9029 FCP %s Check Error xri x%x Data: "
3830 "x%x x%x x%x x%x x%x\n",
3831 ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) ?
3833 ((phba
->sli_rev
== LPFC_SLI_REV4
) ?
3834 lpfc_cmd
->cur_iocbq
.sli4_xritag
:
3835 rsp_iocb
->iocb
.ulpContext
),
3836 fcpDl
, be32_to_cpu(fcprsp
->rspResId
),
3837 fcpi_parm
, cmnd
->cmnd
[0], scsi_status
);
3839 /* There is some issue with the LPe12000 that causes it
3840 * to miscalculate the fcpi_parm and falsely trip this
3841 * recovery logic. Detect this case and don't error when true.
3843 if (fcpi_parm
> fcpDl
)
3846 switch (scsi_status
) {
3848 case SAM_STAT_CHECK_CONDITION
:
3849 /* Fabric dropped a data frame. Fail any successful
3850 * command in which we detected dropped frames.
3851 * A status of good or some check conditions could
3852 * be considered a successful command.
3854 host_status
= DID_ERROR
;
3857 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
3861 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
3862 lpfc_send_scsi_error_event(vport
->phba
, vport
, lpfc_cmd
, rsp_iocb
);
3866 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
3867 * @phba: Pointer to HBA context object.
3869 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
3870 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
3872 * If scsi-mq is enabled, get the default block layer mapping of software queues
3873 * to hardware queues. This information is saved in request tag.
3875 * Return: index into SLI4 fast-path FCP queue index.
3877 int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba
*phba
,
3878 struct lpfc_scsi_buf
*lpfc_cmd
)
3880 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
3881 struct lpfc_vector_map_info
*cpup
;
3886 if (cmnd
&& shost_use_blk_mq(cmnd
->device
->host
)) {
3887 tag
= blk_mq_unique_tag(cmnd
->request
);
3888 hwq
= blk_mq_unique_tag_to_hwq(tag
);
3893 if (phba
->cfg_fcp_io_sched
== LPFC_FCP_SCHED_BY_CPU
3894 && phba
->cfg_fcp_io_channel
> 1) {
3895 cpu
= smp_processor_id();
3896 if (cpu
< phba
->sli4_hba
.num_present_cpu
) {
3897 cpup
= phba
->sli4_hba
.cpu_map
;
3899 return cpup
->channel_id
;
3902 chann
= atomic_add_return(1, &phba
->fcp_qidx
);
3903 chann
= chann
% phba
->cfg_fcp_io_channel
;
3909 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3910 * @phba: The Hba for which this call is being executed.
3911 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3912 * @pIocbOut: The response IOCBQ for the scsi cmnd.
3914 * This routine assigns scsi command result by looking into response IOCB
3915 * status field appropriately. This routine handles QUEUE FULL condition as
3916 * well by ramping down device queue depth.
3919 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
3920 struct lpfc_iocbq
*pIocbOut
)
3922 struct lpfc_scsi_buf
*lpfc_cmd
=
3923 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
3924 struct lpfc_vport
*vport
= pIocbIn
->vport
;
3925 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
3926 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
3927 struct scsi_cmnd
*cmd
;
3929 unsigned long flags
;
3930 struct lpfc_fast_path_event
*fast_path_evt
;
3931 struct Scsi_Host
*shost
;
3932 uint32_t logit
= LOG_FCP
;
3934 phba
->fc4ScsiIoCmpls
++;
3936 /* Sanity check on return of outstanding command */
3937 cmd
= lpfc_cmd
->pCmd
;
3940 shost
= cmd
->device
->host
;
3942 lpfc_cmd
->result
= (pIocbOut
->iocb
.un
.ulpWord
[4] & IOERR_PARAM_MASK
);
3943 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
3944 /* pick up SLI4 exhange busy status from HBA */
3945 lpfc_cmd
->exch_busy
= pIocbOut
->iocb_flag
& LPFC_EXCHANGE_BUSY
;
3947 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3948 if (lpfc_cmd
->prot_data_type
) {
3949 struct scsi_dif_tuple
*src
= NULL
;
3951 src
= (struct scsi_dif_tuple
*)lpfc_cmd
->prot_data_segment
;
3953 * Used to restore any changes to protection
3954 * data for error injection.
3956 switch (lpfc_cmd
->prot_data_type
) {
3957 case LPFC_INJERR_REFTAG
:
3959 lpfc_cmd
->prot_data
;
3961 case LPFC_INJERR_APPTAG
:
3963 (uint16_t)lpfc_cmd
->prot_data
;
3965 case LPFC_INJERR_GUARD
:
3967 (uint16_t)lpfc_cmd
->prot_data
;
3973 lpfc_cmd
->prot_data
= 0;
3974 lpfc_cmd
->prot_data_type
= 0;
3975 lpfc_cmd
->prot_data_segment
= NULL
;
3979 if (pnode
&& NLP_CHK_NODE_ACT(pnode
))
3980 atomic_dec(&pnode
->cmd_pending
);
3982 if (lpfc_cmd
->status
) {
3983 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
3984 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
3985 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
3986 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
3987 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
3988 if (lpfc_cmd
->status
== IOSTAT_FCP_RSP_ERROR
&&
3989 !lpfc_cmd
->fcp_rsp
->rspStatus3
&&
3990 (lpfc_cmd
->fcp_rsp
->rspStatus2
& RESID_UNDER
) &&
3991 !(vport
->cfg_log_verbose
& LOG_FCP_UNDER
))
3994 logit
= LOG_FCP
| LOG_FCP_UNDER
;
3995 lpfc_printf_vlog(vport
, KERN_WARNING
, logit
,
3996 "9030 FCP cmd x%x failed <%d/%lld> "
3997 "status: x%x result: x%x "
3998 "sid: x%x did: x%x oxid: x%x "
4001 cmd
->device
? cmd
->device
->id
: 0xffff,
4002 cmd
->device
? cmd
->device
->lun
: 0xffff,
4003 lpfc_cmd
->status
, lpfc_cmd
->result
,
4005 (pnode
) ? pnode
->nlp_DID
: 0,
4006 phba
->sli_rev
== LPFC_SLI_REV4
?
4007 lpfc_cmd
->cur_iocbq
.sli4_xritag
: 0xffff,
4008 pIocbOut
->iocb
.ulpContext
,
4009 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
4011 switch (lpfc_cmd
->status
) {
4012 case IOSTAT_FCP_RSP_ERROR
:
4013 /* Call FCP RSP handler to determine result */
4014 lpfc_handle_fcp_err(vport
, lpfc_cmd
, pIocbOut
);
4016 case IOSTAT_NPORT_BSY
:
4017 case IOSTAT_FABRIC_BSY
:
4018 cmd
->result
= ScsiResult(DID_TRANSPORT_DISRUPTED
, 0);
4019 fast_path_evt
= lpfc_alloc_fast_evt(phba
);
4022 fast_path_evt
->un
.fabric_evt
.event_type
=
4023 FC_REG_FABRIC_EVENT
;
4024 fast_path_evt
->un
.fabric_evt
.subcategory
=
4025 (lpfc_cmd
->status
== IOSTAT_NPORT_BSY
) ?
4026 LPFC_EVENT_PORT_BUSY
: LPFC_EVENT_FABRIC_BUSY
;
4027 if (pnode
&& NLP_CHK_NODE_ACT(pnode
)) {
4028 memcpy(&fast_path_evt
->un
.fabric_evt
.wwpn
,
4029 &pnode
->nlp_portname
,
4030 sizeof(struct lpfc_name
));
4031 memcpy(&fast_path_evt
->un
.fabric_evt
.wwnn
,
4032 &pnode
->nlp_nodename
,
4033 sizeof(struct lpfc_name
));
4035 fast_path_evt
->vport
= vport
;
4036 fast_path_evt
->work_evt
.evt
=
4037 LPFC_EVT_FASTPATH_MGMT_EVT
;
4038 spin_lock_irqsave(&phba
->hbalock
, flags
);
4039 list_add_tail(&fast_path_evt
->work_evt
.evt_listp
,
4041 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4042 lpfc_worker_wake_up(phba
);
4044 case IOSTAT_LOCAL_REJECT
:
4045 case IOSTAT_REMOTE_STOP
:
4046 if (lpfc_cmd
->result
== IOERR_ELXSEC_KEY_UNWRAP_ERROR
||
4048 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR
||
4049 lpfc_cmd
->result
== IOERR_ELXSEC_CRYPTO_ERROR
||
4051 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR
) {
4052 cmd
->result
= ScsiResult(DID_NO_CONNECT
, 0);
4055 if (lpfc_cmd
->result
== IOERR_INVALID_RPI
||
4056 lpfc_cmd
->result
== IOERR_NO_RESOURCES
||
4057 lpfc_cmd
->result
== IOERR_ABORT_REQUESTED
||
4058 lpfc_cmd
->result
== IOERR_SLER_CMD_RCV_FAILURE
) {
4059 cmd
->result
= ScsiResult(DID_REQUEUE
, 0);
4062 if ((lpfc_cmd
->result
== IOERR_RX_DMA_FAILED
||
4063 lpfc_cmd
->result
== IOERR_TX_DMA_FAILED
) &&
4064 pIocbOut
->iocb
.unsli3
.sli3_bg
.bgstat
) {
4065 if (scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
4067 * This is a response for a BG enabled
4068 * cmd. Parse BG error
4070 lpfc_parse_bg_err(phba
, lpfc_cmd
,
4074 lpfc_printf_vlog(vport
, KERN_WARNING
,
4076 "9031 non-zero BGSTAT "
4077 "on unprotected cmd\n");
4080 if ((lpfc_cmd
->status
== IOSTAT_REMOTE_STOP
)
4081 && (phba
->sli_rev
== LPFC_SLI_REV4
)
4082 && (pnode
&& NLP_CHK_NODE_ACT(pnode
))) {
4083 /* This IO was aborted by the target, we don't
4084 * know the rxid and because we did not send the
4085 * ABTS we cannot generate and RRQ.
4087 lpfc_set_rrq_active(phba
, pnode
,
4088 lpfc_cmd
->cur_iocbq
.sli4_lxritag
,
4091 /* else: fall through */
4093 cmd
->result
= ScsiResult(DID_ERROR
, 0);
4097 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
)
4098 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
4099 cmd
->result
= ScsiResult(DID_TRANSPORT_DISRUPTED
,
4102 cmd
->result
= ScsiResult(DID_OK
, 0);
4104 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
4105 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
4107 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
4108 "0710 Iodone <%d/%llu> cmd %p, error "
4109 "x%x SNS x%x x%x Data: x%x x%x\n",
4110 cmd
->device
->id
, cmd
->device
->lun
, cmd
,
4111 cmd
->result
, *lp
, *(lp
+ 3), cmd
->retries
,
4112 scsi_get_resid(cmd
));
4115 lpfc_update_stats(phba
, lpfc_cmd
);
4116 if (vport
->cfg_max_scsicmpl_time
&&
4117 time_after(jiffies
, lpfc_cmd
->start_time
+
4118 msecs_to_jiffies(vport
->cfg_max_scsicmpl_time
))) {
4119 spin_lock_irqsave(shost
->host_lock
, flags
);
4120 if (pnode
&& NLP_CHK_NODE_ACT(pnode
)) {
4121 if (pnode
->cmd_qdepth
>
4122 atomic_read(&pnode
->cmd_pending
) &&
4123 (atomic_read(&pnode
->cmd_pending
) >
4124 LPFC_MIN_TGT_QDEPTH
) &&
4125 ((cmd
->cmnd
[0] == READ_10
) ||
4126 (cmd
->cmnd
[0] == WRITE_10
)))
4128 atomic_read(&pnode
->cmd_pending
);
4130 pnode
->last_change_time
= jiffies
;
4132 spin_unlock_irqrestore(shost
->host_lock
, flags
);
4133 } else if (pnode
&& NLP_CHK_NODE_ACT(pnode
)) {
4134 if ((pnode
->cmd_qdepth
< vport
->cfg_tgt_queue_depth
) &&
4135 time_after(jiffies
, pnode
->last_change_time
+
4136 msecs_to_jiffies(LPFC_TGTQ_INTERVAL
))) {
4137 spin_lock_irqsave(shost
->host_lock
, flags
);
4138 depth
= pnode
->cmd_qdepth
* LPFC_TGTQ_RAMPUP_PCENT
4140 depth
= depth
? depth
: 1;
4141 pnode
->cmd_qdepth
+= depth
;
4142 if (pnode
->cmd_qdepth
> vport
->cfg_tgt_queue_depth
)
4143 pnode
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
4144 pnode
->last_change_time
= jiffies
;
4145 spin_unlock_irqrestore(shost
->host_lock
, flags
);
4149 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
4151 spin_lock_irqsave(&phba
->hbalock
, flags
);
4152 lpfc_cmd
->pCmd
= NULL
;
4153 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4155 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4156 cmd
->scsi_done(cmd
);
4159 * If there is a thread waiting for command completion
4160 * wake up the thread.
4162 spin_lock_irqsave(shost
->host_lock
, flags
);
4163 if (lpfc_cmd
->waitq
)
4164 wake_up(lpfc_cmd
->waitq
);
4165 spin_unlock_irqrestore(shost
->host_lock
, flags
);
4167 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
4171 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4172 * @data: A pointer to the immediate command data portion of the IOCB.
4173 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4175 * The routine copies the entire FCP command from @fcp_cmnd to @data while
4176 * byte swapping the data to big endian format for transmission on the wire.
4179 lpfc_fcpcmd_to_iocb(uint8_t *data
, struct fcp_cmnd
*fcp_cmnd
)
4182 for (i
= 0, j
= 0; i
< sizeof(struct fcp_cmnd
);
4183 i
+= sizeof(uint32_t), j
++) {
4184 ((uint32_t *)data
)[j
] = cpu_to_be32(((uint32_t *)fcp_cmnd
)[j
]);
4189 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4190 * @vport: The virtual port for which this call is being executed.
4191 * @lpfc_cmd: The scsi command which needs to send.
4192 * @pnode: Pointer to lpfc_nodelist.
4194 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4195 * to transfer for device with SLI3 interface spec.
4198 lpfc_scsi_prep_cmnd(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
4199 struct lpfc_nodelist
*pnode
)
4201 struct lpfc_hba
*phba
= vport
->phba
;
4202 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
4203 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
4204 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
4205 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
4206 int datadir
= scsi_cmnd
->sc_data_direction
;
4211 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
))
4214 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
4215 /* clear task management bits */
4216 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
4218 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
4219 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
4221 ptr
= &fcp_cmnd
->fcpCdb
[0];
4222 memcpy(ptr
, scsi_cmnd
->cmnd
, scsi_cmnd
->cmd_len
);
4223 if (scsi_cmnd
->cmd_len
< LPFC_FCP_CDB_LEN
) {
4224 ptr
+= scsi_cmnd
->cmd_len
;
4225 memset(ptr
, 0, (LPFC_FCP_CDB_LEN
- scsi_cmnd
->cmd_len
));
4228 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
4230 sli4
= (phba
->sli_rev
== LPFC_SLI_REV4
);
4231 piocbq
->iocb
.un
.fcpi
.fcpi_XRdy
= 0;
4234 * There are three possibilities here - use scatter-gather segment, use
4235 * the single mapping, or neither. Start the lpfc command prep by
4236 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4239 if (scsi_sg_count(scsi_cmnd
)) {
4240 if (datadir
== DMA_TO_DEVICE
) {
4241 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
4242 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
4243 if (vport
->cfg_first_burst_size
&&
4244 (pnode
->nlp_flag
& NLP_FIRSTBURST
)) {
4245 fcpdl
= scsi_bufflen(scsi_cmnd
);
4246 if (fcpdl
< vport
->cfg_first_burst_size
)
4247 piocbq
->iocb
.un
.fcpi
.fcpi_XRdy
= fcpdl
;
4249 piocbq
->iocb
.un
.fcpi
.fcpi_XRdy
=
4250 vport
->cfg_first_burst_size
;
4252 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
4253 phba
->fc4ScsiOutputRequests
++;
4255 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
4256 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
4257 fcp_cmnd
->fcpCntl3
= READ_DATA
;
4258 phba
->fc4ScsiInputRequests
++;
4261 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
4262 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
4263 iocb_cmd
->ulpPU
= 0;
4264 fcp_cmnd
->fcpCntl3
= 0;
4265 phba
->fc4ScsiControlRequests
++;
4267 if (phba
->sli_rev
== 3 &&
4268 !(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
))
4269 lpfc_fcpcmd_to_iocb(iocb_cmd
->unsli3
.fcp_ext
.icd
, fcp_cmnd
);
4271 * Finish initializing those IOCB fields that are independent
4272 * of the scsi_cmnd request_buffer
4274 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
4276 piocbq
->iocb
.ulpContext
=
4277 phba
->sli4_hba
.rpi_ids
[pnode
->nlp_rpi
];
4278 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
4279 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
4281 piocbq
->iocb
.ulpFCP2Rcvy
= 0;
4283 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
4284 piocbq
->context1
= lpfc_cmd
;
4285 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
4286 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
4287 piocbq
->vport
= vport
;
4291 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4292 * @vport: The virtual port for which this call is being executed.
4293 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4294 * @lun: Logical unit number.
4295 * @task_mgmt_cmd: SCSI task management command.
4297 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4298 * for device with SLI-3 interface spec.
4305 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport
*vport
,
4306 struct lpfc_scsi_buf
*lpfc_cmd
,
4308 uint8_t task_mgmt_cmd
)
4310 struct lpfc_iocbq
*piocbq
;
4312 struct fcp_cmnd
*fcp_cmnd
;
4313 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
4314 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
4316 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
4317 ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)
4320 piocbq
= &(lpfc_cmd
->cur_iocbq
);
4321 piocbq
->vport
= vport
;
4323 piocb
= &piocbq
->iocb
;
4325 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
4326 /* Clear out any old data in the FCP command area */
4327 memset(fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
4328 int_to_scsilun(lun
, &fcp_cmnd
->fcp_lun
);
4329 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
4330 if (vport
->phba
->sli_rev
== 3 &&
4331 !(vport
->phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
))
4332 lpfc_fcpcmd_to_iocb(piocb
->unsli3
.fcp_ext
.icd
, fcp_cmnd
);
4333 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
4334 piocb
->ulpContext
= ndlp
->nlp_rpi
;
4335 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4337 vport
->phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
4339 piocb
->ulpFCP2Rcvy
= (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) ? 1 : 0;
4340 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
4342 piocb
->un
.fcpi
.fcpi_parm
= 0;
4344 /* ulpTimeout is only one byte */
4345 if (lpfc_cmd
->timeout
> 0xff) {
4347 * Do not timeout the command at the firmware level.
4348 * The driver will provide the timeout mechanism.
4350 piocb
->ulpTimeout
= 0;
4352 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
4354 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
4355 lpfc_sli4_set_rsp_sgl_last(vport
->phba
, lpfc_cmd
);
4361 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4362 * @phba: The hba struct for which this call is being executed.
4363 * @dev_grp: The HBA PCI-Device group number.
4365 * This routine sets up the SCSI interface API function jump table in @phba
4367 * Returns: 0 - success, -ENODEV - failure.
4370 lpfc_scsi_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
4373 phba
->lpfc_scsi_unprep_dma_buf
= lpfc_scsi_unprep_dma_buf
;
4374 phba
->lpfc_scsi_prep_cmnd
= lpfc_scsi_prep_cmnd
;
4377 case LPFC_PCI_DEV_LP
:
4378 phba
->lpfc_new_scsi_buf
= lpfc_new_scsi_buf_s3
;
4379 phba
->lpfc_scsi_prep_dma_buf
= lpfc_scsi_prep_dma_buf_s3
;
4380 phba
->lpfc_bg_scsi_prep_dma_buf
= lpfc_bg_scsi_prep_dma_buf_s3
;
4381 phba
->lpfc_release_scsi_buf
= lpfc_release_scsi_buf_s3
;
4382 phba
->lpfc_get_scsi_buf
= lpfc_get_scsi_buf_s3
;
4384 case LPFC_PCI_DEV_OC
:
4385 phba
->lpfc_new_scsi_buf
= lpfc_new_scsi_buf_s4
;
4386 phba
->lpfc_scsi_prep_dma_buf
= lpfc_scsi_prep_dma_buf_s4
;
4387 phba
->lpfc_bg_scsi_prep_dma_buf
= lpfc_bg_scsi_prep_dma_buf_s4
;
4388 phba
->lpfc_release_scsi_buf
= lpfc_release_scsi_buf_s4
;
4389 phba
->lpfc_get_scsi_buf
= lpfc_get_scsi_buf_s4
;
4392 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4393 "1418 Invalid HBA PCI-device group: 0x%x\n",
4398 phba
->lpfc_rampdown_queue_depth
= lpfc_rampdown_queue_depth
;
4399 phba
->lpfc_scsi_cmd_iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
4404 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4405 * @phba: The Hba for which this call is being executed.
4406 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4407 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4409 * This routine is IOCB completion routine for device reset and target reset
4410 * routine. This routine release scsi buffer associated with lpfc_cmd.
4413 lpfc_tskmgmt_def_cmpl(struct lpfc_hba
*phba
,
4414 struct lpfc_iocbq
*cmdiocbq
,
4415 struct lpfc_iocbq
*rspiocbq
)
4417 struct lpfc_scsi_buf
*lpfc_cmd
=
4418 (struct lpfc_scsi_buf
*) cmdiocbq
->context1
;
4420 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
4425 * lpfc_info - Info entry point of scsi_host_template data structure
4426 * @host: The scsi host for which this call is being executed.
4428 * This routine provides module information about hba.
4431 * Pointer to char - Success.
4434 lpfc_info(struct Scsi_Host
*host
)
4436 struct lpfc_vport
*vport
= (struct lpfc_vport
*) host
->hostdata
;
4437 struct lpfc_hba
*phba
= vport
->phba
;
4438 int len
, link_speed
= 0;
4439 static char lpfcinfobuf
[384];
4441 memset(lpfcinfobuf
,0,384);
4442 if (phba
&& phba
->pcidev
){
4443 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
4444 len
= strlen(lpfcinfobuf
);
4445 snprintf(lpfcinfobuf
+ len
,
4447 " on PCI bus %02x device %02x irq %d",
4448 phba
->pcidev
->bus
->number
,
4449 phba
->pcidev
->devfn
,
4451 len
= strlen(lpfcinfobuf
);
4452 if (phba
->Port
[0]) {
4453 snprintf(lpfcinfobuf
+ len
,
4458 len
= strlen(lpfcinfobuf
);
4459 link_speed
= lpfc_sli_port_speed_get(phba
);
4460 if (link_speed
!= 0)
4461 snprintf(lpfcinfobuf
+ len
, 384-len
,
4462 " Logical Link Speed: %d Mbps", link_speed
);
4468 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4469 * @phba: The Hba for which this call is being executed.
4471 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
4472 * The default value of cfg_poll_tmo is 10 milliseconds.
4474 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
4476 unsigned long poll_tmo_expires
=
4477 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
4479 if (!list_empty(&phba
->sli
.sli3_ring
[LPFC_FCP_RING
].txcmplq
))
4480 mod_timer(&phba
->fcp_poll_timer
,
4485 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4486 * @phba: The Hba for which this call is being executed.
4488 * This routine starts the fcp_poll_timer of @phba.
4490 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
4492 lpfc_poll_rearm_timer(phba
);
4496 * lpfc_poll_timeout - Restart polling timer
4497 * @ptr: Map to lpfc_hba data structure pointer.
4499 * This routine restarts fcp_poll timer, when FCP ring polling is enable
4500 * and FCP Ring interrupt is disable.
4503 void lpfc_poll_timeout(unsigned long ptr
)
4505 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
4507 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
4508 lpfc_sli_handle_fast_ring_event(phba
,
4509 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
4511 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
4512 lpfc_poll_rearm_timer(phba
);
4517 * lpfc_queuecommand - scsi_host_template queuecommand entry point
4518 * @cmnd: Pointer to scsi_cmnd data structure.
4519 * @done: Pointer to done routine.
4521 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4522 * This routine prepares an IOCB from scsi command and provides to firmware.
4523 * The @done callback is invoked after driver finished processing the command.
4527 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4530 lpfc_queuecommand(struct Scsi_Host
*shost
, struct scsi_cmnd
*cmnd
)
4532 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
4533 struct lpfc_hba
*phba
= vport
->phba
;
4534 struct lpfc_rport_data
*rdata
;
4535 struct lpfc_nodelist
*ndlp
;
4536 struct lpfc_scsi_buf
*lpfc_cmd
;
4537 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
4540 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
4541 err
= fc_remote_port_chkready(rport
);
4544 goto out_fail_command
;
4546 ndlp
= rdata
->pnode
;
4548 if ((scsi_get_prot_op(cmnd
) != SCSI_PROT_NORMAL
) &&
4549 (!(phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
))) {
4551 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
4552 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4553 " op:%02x str=%s without registering for"
4554 " BlockGuard - Rejecting command\n",
4555 cmnd
->cmnd
[0], scsi_get_prot_op(cmnd
),
4556 dif_op_str
[scsi_get_prot_op(cmnd
)]);
4557 goto out_fail_command
;
4561 * Catch race where our node has transitioned, but the
4562 * transport is still transitioning.
4564 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
4566 if (atomic_read(&ndlp
->cmd_pending
) >= ndlp
->cmd_qdepth
)
4569 lpfc_cmd
= lpfc_get_scsi_buf(phba
, ndlp
);
4570 if (lpfc_cmd
== NULL
) {
4571 lpfc_rampdown_queue_depth(phba
);
4573 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP_ERROR
,
4574 "0707 driver's buffer pool is empty, "
4580 * Store the midlayer's command structure for the completion phase
4581 * and complete the command initialization.
4583 lpfc_cmd
->pCmd
= cmnd
;
4584 lpfc_cmd
->rdata
= rdata
;
4585 lpfc_cmd
->timeout
= 0;
4586 lpfc_cmd
->start_time
= jiffies
;
4587 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
4589 if (scsi_get_prot_op(cmnd
) != SCSI_PROT_NORMAL
) {
4590 if (vport
->phba
->cfg_enable_bg
) {
4591 lpfc_printf_vlog(vport
,
4592 KERN_INFO
, LOG_SCSI_CMD
,
4593 "9033 BLKGRD: rcvd %s cmd:x%x "
4594 "sector x%llx cnt %u pt %x\n",
4595 dif_op_str
[scsi_get_prot_op(cmnd
)],
4597 (unsigned long long)scsi_get_lba(cmnd
),
4598 blk_rq_sectors(cmnd
->request
),
4599 (cmnd
->cmnd
[1]>>5));
4601 err
= lpfc_bg_scsi_prep_dma_buf(phba
, lpfc_cmd
);
4603 if (vport
->phba
->cfg_enable_bg
) {
4604 lpfc_printf_vlog(vport
,
4605 KERN_INFO
, LOG_SCSI_CMD
,
4606 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4607 "x%x sector x%llx cnt %u pt %x\n",
4609 (unsigned long long)scsi_get_lba(cmnd
),
4610 blk_rq_sectors(cmnd
->request
),
4611 (cmnd
->cmnd
[1]>>5));
4613 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
4617 goto out_host_busy_free_buf
;
4619 lpfc_scsi_prep_cmnd(vport
, lpfc_cmd
, ndlp
);
4621 atomic_inc(&ndlp
->cmd_pending
);
4622 err
= lpfc_sli_issue_iocb(phba
, LPFC_FCP_RING
,
4623 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
4625 atomic_dec(&ndlp
->cmd_pending
);
4626 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
4627 "3376 FCP could not issue IOCB err %x"
4628 "FCP cmd x%x <%d/%llu> "
4629 "sid: x%x did: x%x oxid: x%x "
4630 "Data: x%x x%x x%x x%x\n",
4632 cmnd
->device
? cmnd
->device
->id
: 0xffff,
4633 cmnd
->device
? cmnd
->device
->lun
: (u64
) -1,
4634 vport
->fc_myDID
, ndlp
->nlp_DID
,
4635 phba
->sli_rev
== LPFC_SLI_REV4
?
4636 lpfc_cmd
->cur_iocbq
.sli4_xritag
: 0xffff,
4637 lpfc_cmd
->cur_iocbq
.iocb
.ulpContext
,
4638 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
,
4639 lpfc_cmd
->cur_iocbq
.iocb
.ulpTimeout
,
4641 (cmnd
->request
->timeout
/ 1000));
4644 goto out_host_busy_free_buf
;
4646 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
4647 lpfc_sli_handle_fast_ring_event(phba
,
4648 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
4650 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
4651 lpfc_poll_rearm_timer(phba
);
4656 out_host_busy_free_buf
:
4657 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
4658 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
4660 return SCSI_MLQUEUE_HOST_BUSY
;
4663 return SCSI_MLQUEUE_TARGET_BUSY
;
4666 cmnd
->scsi_done(cmnd
);
4672 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4673 * @cmnd: Pointer to scsi_cmnd data structure.
4675 * This routine aborts @cmnd pending in base driver.
4682 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
4684 struct Scsi_Host
*shost
= cmnd
->device
->host
;
4685 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
4686 struct lpfc_hba
*phba
= vport
->phba
;
4687 struct lpfc_iocbq
*iocb
;
4688 struct lpfc_iocbq
*abtsiocb
;
4689 struct lpfc_scsi_buf
*lpfc_cmd
;
4691 int ret
= SUCCESS
, status
= 0;
4692 struct lpfc_sli_ring
*pring_s4
;
4694 unsigned long flags
, iflags
;
4695 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq
);
4697 status
= fc_block_scsi_eh(cmnd
);
4698 if (status
!= 0 && status
!= SUCCESS
)
4701 spin_lock_irqsave(&phba
->hbalock
, flags
);
4702 /* driver queued commands are in process of being flushed */
4703 if (phba
->hba_flag
& HBA_FCP_IOQ_FLUSH
) {
4704 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4705 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
4706 "3168 SCSI Layer abort requested I/O has been "
4707 "flushed by LLD.\n");
4711 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
4712 if (!lpfc_cmd
|| !lpfc_cmd
->pCmd
) {
4713 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4714 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
4715 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4716 "x%x ID %d LUN %llu\n",
4717 SUCCESS
, cmnd
->device
->id
, cmnd
->device
->lun
);
4721 iocb
= &lpfc_cmd
->cur_iocbq
;
4722 /* the command is in process of being cancelled */
4723 if (!(iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
)) {
4724 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4725 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
4726 "3169 SCSI Layer abort requested I/O has been "
4727 "cancelled by LLD.\n");
4731 * If pCmd field of the corresponding lpfc_scsi_buf structure
4732 * points to a different SCSI command, then the driver has
4733 * already completed this command, but the midlayer did not
4734 * see the completion before the eh fired. Just return SUCCESS.
4736 if (lpfc_cmd
->pCmd
!= cmnd
) {
4737 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
4738 "3170 SCSI Layer abort requested I/O has been "
4739 "completed by LLD.\n");
4743 BUG_ON(iocb
->context1
!= lpfc_cmd
);
4745 /* abort issued in recovery is still in progress */
4746 if (iocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
4747 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
4748 "3389 SCSI Layer I/O Abort Request is pending\n");
4749 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4753 abtsiocb
= __lpfc_sli_get_iocbq(phba
);
4754 if (abtsiocb
== NULL
) {
4759 /* Indicate the IO is being aborted by the driver. */
4760 iocb
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
4763 * The scsi command can not be in txq and it is in flight because the
4764 * pCmd is still pointig at the SCSI command we have to abort. There
4765 * is no need to search the txcmplq. Just send an abort to the FW.
4769 icmd
= &abtsiocb
->iocb
;
4770 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
4771 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
4772 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4773 icmd
->un
.acxri
.abortIoTag
= iocb
->sli4_xritag
;
4775 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
4778 icmd
->ulpClass
= cmd
->ulpClass
;
4780 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4781 abtsiocb
->hba_wqidx
= iocb
->hba_wqidx
;
4782 abtsiocb
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
4783 if (iocb
->iocb_flag
& LPFC_IO_FOF
)
4784 abtsiocb
->iocb_flag
|= LPFC_IO_FOF
;
4786 if (lpfc_is_link_up(phba
))
4787 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
4789 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
4791 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
4792 abtsiocb
->vport
= vport
;
4793 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4794 pring_s4
= lpfc_sli4_calc_ring(phba
, iocb
);
4795 if (pring_s4
== NULL
) {
4799 /* Note: both hbalock and ring_lock must be set here */
4800 spin_lock_irqsave(&pring_s4
->ring_lock
, iflags
);
4801 ret_val
= __lpfc_sli_issue_iocb(phba
, pring_s4
->ringno
,
4803 spin_unlock_irqrestore(&pring_s4
->ring_lock
, iflags
);
4805 ret_val
= __lpfc_sli_issue_iocb(phba
, LPFC_FCP_RING
,
4808 /* no longer need the lock after this point */
4809 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4812 if (ret_val
== IOCB_ERROR
) {
4813 lpfc_sli_release_iocbq(phba
, abtsiocb
);
4818 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
4819 lpfc_sli_handle_fast_ring_event(phba
,
4820 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
4823 lpfc_cmd
->waitq
= &waitq
;
4824 /* Wait for abort to complete */
4825 wait_event_timeout(waitq
,
4826 (lpfc_cmd
->pCmd
!= cmnd
),
4827 msecs_to_jiffies(2*vport
->cfg_devloss_tmo
*1000));
4829 spin_lock_irqsave(shost
->host_lock
, flags
);
4830 lpfc_cmd
->waitq
= NULL
;
4831 spin_unlock_irqrestore(shost
->host_lock
, flags
);
4833 if (lpfc_cmd
->pCmd
== cmnd
) {
4835 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
4836 "0748 abort handler timed out waiting "
4837 "for aborting I/O (xri:x%x) to complete: "
4838 "ret %#x, ID %d, LUN %llu\n",
4839 iocb
->sli4_xritag
, ret
,
4840 cmnd
->device
->id
, cmnd
->device
->lun
);
4845 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
4847 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
4848 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4849 "LUN %llu\n", ret
, cmnd
->device
->id
,
4855 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd
)
4857 switch (task_mgmt_cmd
) {
4858 case FCP_ABORT_TASK_SET
:
4859 return "ABORT_TASK_SET";
4860 case FCP_CLEAR_TASK_SET
:
4861 return "FCP_CLEAR_TASK_SET";
4863 return "FCP_BUS_RESET";
4865 return "FCP_LUN_RESET";
4866 case FCP_TARGET_RESET
:
4867 return "FCP_TARGET_RESET";
4869 return "FCP_CLEAR_ACA";
4870 case FCP_TERMINATE_TASK
:
4871 return "FCP_TERMINATE_TASK";
4879 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4880 * @vport: The virtual port for which this call is being executed.
4881 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4883 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4890 lpfc_check_fcp_rsp(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
)
4892 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
4895 uint8_t rsp_info_code
;
4900 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
4901 "0703 fcp_rsp is missing\n");
4903 rsp_info
= fcprsp
->rspStatus2
;
4904 rsp_len
= be32_to_cpu(fcprsp
->rspRspLen
);
4905 rsp_info_code
= fcprsp
->rspInfo3
;
4908 lpfc_printf_vlog(vport
, KERN_INFO
,
4910 "0706 fcp_rsp valid 0x%x,"
4911 " rsp len=%d code 0x%x\n",
4913 rsp_len
, rsp_info_code
);
4915 if ((fcprsp
->rspStatus2
&RSP_LEN_VALID
) && (rsp_len
== 8)) {
4916 switch (rsp_info_code
) {
4917 case RSP_NO_FAILURE
:
4918 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
4919 "0715 Task Mgmt No Failure\n");
4922 case RSP_TM_NOT_SUPPORTED
: /* TM rejected */
4923 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
4924 "0716 Task Mgmt Target "
4927 case RSP_TM_NOT_COMPLETED
: /* TM failed */
4928 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
4929 "0717 Task Mgmt Target "
4932 case RSP_TM_INVALID_LU
: /* TM to invalid LU! */
4933 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
4934 "0718 Task Mgmt to invalid "
4945 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4946 * @vport: The virtual port for which this call is being executed.
4947 * @rdata: Pointer to remote port local data
4948 * @tgt_id: Target ID of remote device.
4949 * @lun_id: Lun number for the TMF
4950 * @task_mgmt_cmd: type of TMF to send
4952 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4960 lpfc_send_taskmgmt(struct lpfc_vport
*vport
, struct scsi_cmnd
*cmnd
,
4961 unsigned int tgt_id
, uint64_t lun_id
,
4962 uint8_t task_mgmt_cmd
)
4964 struct lpfc_hba
*phba
= vport
->phba
;
4965 struct lpfc_scsi_buf
*lpfc_cmd
;
4966 struct lpfc_iocbq
*iocbq
;
4967 struct lpfc_iocbq
*iocbqrsp
;
4968 struct lpfc_rport_data
*rdata
;
4969 struct lpfc_nodelist
*pnode
;
4973 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
4974 if (!rdata
|| !rdata
->pnode
|| !NLP_CHK_NODE_ACT(rdata
->pnode
))
4976 pnode
= rdata
->pnode
;
4978 lpfc_cmd
= lpfc_get_scsi_buf(phba
, pnode
);
4979 if (lpfc_cmd
== NULL
)
4981 lpfc_cmd
->timeout
= phba
->cfg_task_mgmt_tmo
;
4982 lpfc_cmd
->rdata
= rdata
;
4983 lpfc_cmd
->pCmd
= cmnd
;
4985 status
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, lun_id
,
4988 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
4992 iocbq
= &lpfc_cmd
->cur_iocbq
;
4993 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
4994 if (iocbqrsp
== NULL
) {
4995 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
4998 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
5000 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5001 "0702 Issue %s to TGT %d LUN %llu "
5002 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5003 lpfc_taskmgmt_name(task_mgmt_cmd
), tgt_id
, lun_id
,
5004 pnode
->nlp_rpi
, pnode
->nlp_flag
, iocbq
->sli4_xritag
,
5007 status
= lpfc_sli_issue_iocb_wait(phba
, LPFC_FCP_RING
,
5008 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
5009 if ((status
!= IOCB_SUCCESS
) ||
5010 (iocbqrsp
->iocb
.ulpStatus
!= IOSTAT_SUCCESS
)) {
5011 if (status
!= IOCB_SUCCESS
||
5012 iocbqrsp
->iocb
.ulpStatus
!= IOSTAT_FCP_RSP_ERROR
)
5013 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5014 "0727 TMF %s to TGT %d LUN %llu "
5015 "failed (%d, %d) iocb_flag x%x\n",
5016 lpfc_taskmgmt_name(task_mgmt_cmd
),
5018 iocbqrsp
->iocb
.ulpStatus
,
5019 iocbqrsp
->iocb
.un
.ulpWord
[4],
5021 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5022 if (status
== IOCB_SUCCESS
) {
5023 if (iocbqrsp
->iocb
.ulpStatus
== IOSTAT_FCP_RSP_ERROR
)
5024 /* Something in the FCP_RSP was invalid.
5025 * Check conditions */
5026 ret
= lpfc_check_fcp_rsp(vport
, lpfc_cmd
);
5029 } else if (status
== IOCB_TIMEDOUT
) {
5030 ret
= TIMEOUT_ERROR
;
5037 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
5039 if (ret
!= TIMEOUT_ERROR
)
5040 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
5046 * lpfc_chk_tgt_mapped -
5047 * @vport: The virtual port to check on
5048 * @cmnd: Pointer to scsi_cmnd data structure.
5050 * This routine delays until the scsi target (aka rport) for the
5051 * command exists (is present and logged in) or we declare it non-existent.
5058 lpfc_chk_tgt_mapped(struct lpfc_vport
*vport
, struct scsi_cmnd
*cmnd
)
5060 struct lpfc_rport_data
*rdata
;
5061 struct lpfc_nodelist
*pnode
;
5062 unsigned long later
;
5064 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5066 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FCP
,
5067 "0797 Tgt Map rport failure: rdata x%p\n", rdata
);
5070 pnode
= rdata
->pnode
;
5072 * If target is not in a MAPPED state, delay until
5073 * target is rediscovered or devloss timeout expires.
5075 later
= msecs_to_jiffies(2 * vport
->cfg_devloss_tmo
* 1000) + jiffies
;
5076 while (time_after(later
, jiffies
)) {
5077 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
))
5079 if (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
)
5081 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5082 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5085 pnode
= rdata
->pnode
;
5087 if (!pnode
|| !NLP_CHK_NODE_ACT(pnode
) ||
5088 (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
5094 * lpfc_reset_flush_io_context -
5095 * @vport: The virtual port (scsi_host) for the flush context
5096 * @tgt_id: If aborting by Target contect - specifies the target id
5097 * @lun_id: If aborting by Lun context - specifies the lun id
5098 * @context: specifies the context level to flush at.
5100 * After a reset condition via TMF, we need to flush orphaned i/o
5101 * contexts from the adapter. This routine aborts any contexts
5102 * outstanding, then waits for their completions. The wait is
5103 * bounded by devloss_tmo though.
5110 lpfc_reset_flush_io_context(struct lpfc_vport
*vport
, uint16_t tgt_id
,
5111 uint64_t lun_id
, lpfc_ctx_cmd context
)
5113 struct lpfc_hba
*phba
= vport
->phba
;
5114 unsigned long later
;
5117 cnt
= lpfc_sli_sum_iocb(vport
, tgt_id
, lun_id
, context
);
5119 lpfc_sli_abort_taskmgmt(vport
,
5120 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
5121 tgt_id
, lun_id
, context
);
5122 later
= msecs_to_jiffies(2 * vport
->cfg_devloss_tmo
* 1000) + jiffies
;
5123 while (time_after(later
, jiffies
) && cnt
) {
5124 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5125 cnt
= lpfc_sli_sum_iocb(vport
, tgt_id
, lun_id
, context
);
5128 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5129 "0724 I/O flush failure for context %s : cnt x%x\n",
5130 ((context
== LPFC_CTX_LUN
) ? "LUN" :
5131 ((context
== LPFC_CTX_TGT
) ? "TGT" :
5132 ((context
== LPFC_CTX_HOST
) ? "HOST" : "Unknown"))),
5140 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5141 * @cmnd: Pointer to scsi_cmnd data structure.
5143 * This routine does a device reset by sending a LUN_RESET task management
5151 lpfc_device_reset_handler(struct scsi_cmnd
*cmnd
)
5153 struct Scsi_Host
*shost
= cmnd
->device
->host
;
5154 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5155 struct lpfc_rport_data
*rdata
;
5156 struct lpfc_nodelist
*pnode
;
5157 unsigned tgt_id
= cmnd
->device
->id
;
5158 uint64_t lun_id
= cmnd
->device
->lun
;
5159 struct lpfc_scsi_event_header scsi_event
;
5162 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5163 if (!rdata
|| !rdata
->pnode
) {
5164 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5165 "0798 Device Reset rport failure: rdata x%p\n",
5169 pnode
= rdata
->pnode
;
5170 status
= fc_block_scsi_eh(cmnd
);
5171 if (status
!= 0 && status
!= SUCCESS
)
5174 status
= lpfc_chk_tgt_mapped(vport
, cmnd
);
5175 if (status
== FAILED
) {
5176 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5177 "0721 Device Reset rport failure: rdata x%p\n", rdata
);
5181 scsi_event
.event_type
= FC_REG_SCSI_EVENT
;
5182 scsi_event
.subcategory
= LPFC_EVENT_LUNRESET
;
5183 scsi_event
.lun
= lun_id
;
5184 memcpy(scsi_event
.wwpn
, &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
5185 memcpy(scsi_event
.wwnn
, &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
5187 fc_host_post_vendor_event(shost
, fc_get_event_number(),
5188 sizeof(scsi_event
), (char *)&scsi_event
, LPFC_NL_VENDOR_ID
);
5190 status
= lpfc_send_taskmgmt(vport
, cmnd
, tgt_id
, lun_id
,
5193 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5194 "0713 SCSI layer issued Device Reset (%d, %llu) "
5195 "return x%x\n", tgt_id
, lun_id
, status
);
5198 * We have to clean up i/o as : they may be orphaned by the TMF;
5199 * or if the TMF failed, they may be in an indeterminate state.
5201 * We will report success if all the i/o aborts successfully.
5203 if (status
== SUCCESS
)
5204 status
= lpfc_reset_flush_io_context(vport
, tgt_id
, lun_id
,
5211 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5212 * @cmnd: Pointer to scsi_cmnd data structure.
5214 * This routine does a target reset by sending a TARGET_RESET task management
5222 lpfc_target_reset_handler(struct scsi_cmnd
*cmnd
)
5224 struct Scsi_Host
*shost
= cmnd
->device
->host
;
5225 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5226 struct lpfc_rport_data
*rdata
;
5227 struct lpfc_nodelist
*pnode
;
5228 unsigned tgt_id
= cmnd
->device
->id
;
5229 uint64_t lun_id
= cmnd
->device
->lun
;
5230 struct lpfc_scsi_event_header scsi_event
;
5233 rdata
= lpfc_rport_data_from_scsi_device(cmnd
->device
);
5235 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5236 "0799 Target Reset rport failure: rdata x%p\n", rdata
);
5239 pnode
= rdata
->pnode
;
5240 status
= fc_block_scsi_eh(cmnd
);
5241 if (status
!= 0 && status
!= SUCCESS
)
5244 status
= lpfc_chk_tgt_mapped(vport
, cmnd
);
5245 if (status
== FAILED
) {
5246 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5247 "0722 Target Reset rport failure: rdata x%p\n", rdata
);
5249 spin_lock_irq(shost
->host_lock
);
5250 pnode
->nlp_flag
&= ~NLP_NPR_ADISC
;
5251 pnode
->nlp_fcp_info
&= ~NLP_FCP_2_DEVICE
;
5252 spin_unlock_irq(shost
->host_lock
);
5254 lpfc_reset_flush_io_context(vport
, tgt_id
, lun_id
,
5256 return FAST_IO_FAIL
;
5259 scsi_event
.event_type
= FC_REG_SCSI_EVENT
;
5260 scsi_event
.subcategory
= LPFC_EVENT_TGTRESET
;
5262 memcpy(scsi_event
.wwpn
, &pnode
->nlp_portname
, sizeof(struct lpfc_name
));
5263 memcpy(scsi_event
.wwnn
, &pnode
->nlp_nodename
, sizeof(struct lpfc_name
));
5265 fc_host_post_vendor_event(shost
, fc_get_event_number(),
5266 sizeof(scsi_event
), (char *)&scsi_event
, LPFC_NL_VENDOR_ID
);
5268 status
= lpfc_send_taskmgmt(vport
, cmnd
, tgt_id
, lun_id
,
5271 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5272 "0723 SCSI layer issued Target Reset (%d, %llu) "
5273 "return x%x\n", tgt_id
, lun_id
, status
);
5276 * We have to clean up i/o as : they may be orphaned by the TMF;
5277 * or if the TMF failed, they may be in an indeterminate state.
5279 * We will report success if all the i/o aborts successfully.
5281 if (status
== SUCCESS
)
5282 status
= lpfc_reset_flush_io_context(vport
, tgt_id
, lun_id
,
5288 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5289 * @cmnd: Pointer to scsi_cmnd data structure.
5291 * This routine does target reset to all targets on @cmnd->device->host.
5292 * This emulates Parallel SCSI Bus Reset Semantics.
5299 lpfc_bus_reset_handler(struct scsi_cmnd
*cmnd
)
5301 struct Scsi_Host
*shost
= cmnd
->device
->host
;
5302 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5303 struct lpfc_nodelist
*ndlp
= NULL
;
5304 struct lpfc_scsi_event_header scsi_event
;
5306 int ret
= SUCCESS
, status
, i
;
5308 scsi_event
.event_type
= FC_REG_SCSI_EVENT
;
5309 scsi_event
.subcategory
= LPFC_EVENT_BUSRESET
;
5311 memcpy(scsi_event
.wwpn
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
5312 memcpy(scsi_event
.wwnn
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
5314 fc_host_post_vendor_event(shost
, fc_get_event_number(),
5315 sizeof(scsi_event
), (char *)&scsi_event
, LPFC_NL_VENDOR_ID
);
5317 status
= fc_block_scsi_eh(cmnd
);
5318 if (status
!= 0 && status
!= SUCCESS
)
5322 * Since the driver manages a single bus device, reset all
5323 * targets known to the driver. Should any target reset
5324 * fail, this routine returns failure to the midlayer.
5326 for (i
= 0; i
< LPFC_MAX_TARGET
; i
++) {
5327 /* Search for mapped node by target ID */
5329 spin_lock_irq(shost
->host_lock
);
5330 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5331 if (!NLP_CHK_NODE_ACT(ndlp
))
5333 if (vport
->phba
->cfg_fcp2_no_tgt_reset
&&
5334 (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
))
5336 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
&&
5337 ndlp
->nlp_sid
== i
&&
5339 ndlp
->nlp_type
& NLP_FCP_TARGET
) {
5344 spin_unlock_irq(shost
->host_lock
);
5348 status
= lpfc_send_taskmgmt(vport
, cmnd
,
5349 i
, 0, FCP_TARGET_RESET
);
5351 if (status
!= SUCCESS
) {
5352 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5353 "0700 Bus Reset on target %d failed\n",
5359 * We have to clean up i/o as : they may be orphaned by the TMFs
5360 * above; or if any of the TMFs failed, they may be in an
5361 * indeterminate state.
5362 * We will report success if all the i/o aborts successfully.
5365 status
= lpfc_reset_flush_io_context(vport
, 0, 0, LPFC_CTX_HOST
);
5366 if (status
!= SUCCESS
)
5369 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5370 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret
);
5375 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5376 * @cmnd: Pointer to scsi_cmnd data structure.
5378 * This routine does host reset to the adaptor port. It brings the HBA
5379 * offline, performs a board restart, and then brings the board back online.
5380 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5381 * reject all outstanding SCSI commands to the host and error returned
5382 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5383 * of error handling, it will only return error if resetting of the adapter
5384 * is not successful; in all other cases, will return success.
5391 lpfc_host_reset_handler(struct scsi_cmnd
*cmnd
)
5393 struct Scsi_Host
*shost
= cmnd
->device
->host
;
5394 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
5395 struct lpfc_hba
*phba
= vport
->phba
;
5396 int rc
, ret
= SUCCESS
;
5398 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5399 "3172 SCSI layer issued Host Reset Data:\n");
5401 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
5403 rc
= lpfc_sli_brdrestart(phba
);
5406 rc
= lpfc_online(phba
);
5409 lpfc_unblock_mgmt_io(phba
);
5411 if (ret
== FAILED
) {
5412 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5413 "3323 Failed host reset, bring it offline\n");
5414 lpfc_sli4_offline_eratt(phba
);
5420 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5421 * @sdev: Pointer to scsi_device.
5423 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
5424 * globally available list of scsi buffers. This routine also makes sure scsi
5425 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5426 * of scsi buffer exists for the lifetime of the driver.
5433 lpfc_slave_alloc(struct scsi_device
*sdev
)
5435 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
5436 struct lpfc_hba
*phba
= vport
->phba
;
5437 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
5439 uint32_t num_to_alloc
= 0;
5440 int num_allocated
= 0;
5442 struct lpfc_device_data
*device_data
;
5443 unsigned long flags
;
5444 struct lpfc_name target_wwpn
;
5446 if (!rport
|| fc_remote_port_chkready(rport
))
5449 if (phba
->cfg_fof
) {
5452 * Check to see if the device data structure for the lun
5453 * exists. If not, create one.
5456 u64_to_wwn(rport
->port_name
, target_wwpn
.u
.wwn
);
5457 spin_lock_irqsave(&phba
->devicelock
, flags
);
5458 device_data
= __lpfc_get_device_data(phba
,
5460 &vport
->fc_portname
,
5464 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
5465 device_data
= lpfc_create_device_data(phba
,
5466 &vport
->fc_portname
,
5469 phba
->cfg_XLanePriority
,
5473 spin_lock_irqsave(&phba
->devicelock
, flags
);
5474 list_add_tail(&device_data
->listentry
, &phba
->luns
);
5476 device_data
->rport_data
= rport
->dd_data
;
5477 device_data
->available
= true;
5478 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
5479 sdev
->hostdata
= device_data
;
5481 sdev
->hostdata
= rport
->dd_data
;
5483 sdev_cnt
= atomic_inc_return(&phba
->sdev_cnt
);
5486 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5487 * available list of scsi buffers. Don't allocate more than the
5488 * HBA limit conveyed to the midlayer via the host structure. The
5489 * formula accounts for the lun_queue_depth + error handlers + 1
5490 * extra. This list of scsi bufs exists for the lifetime of the driver.
5492 total
= phba
->total_scsi_bufs
;
5493 num_to_alloc
= vport
->cfg_lun_queue_depth
+ 2;
5495 /* If allocated buffers are enough do nothing */
5496 if ((sdev_cnt
* (vport
->cfg_lun_queue_depth
+ 2)) < total
)
5499 /* Allow some exchanges to be available always to complete discovery */
5500 if (total
>= phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
5501 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
5502 "0704 At limitation of %d preallocated "
5503 "command buffers\n", total
);
5505 /* Allow some exchanges to be available always to complete discovery */
5506 } else if (total
+ num_to_alloc
>
5507 phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
5508 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_FCP
,
5509 "0705 Allocation request of %d "
5510 "command buffers will exceed max of %d. "
5511 "Reducing allocation request to %d.\n",
5512 num_to_alloc
, phba
->cfg_hba_queue_depth
,
5513 (phba
->cfg_hba_queue_depth
- total
));
5514 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
5516 num_allocated
= lpfc_new_scsi_buf(vport
, num_to_alloc
);
5517 if (num_to_alloc
!= num_allocated
) {
5518 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_FCP
,
5519 "0708 Allocation request of %d "
5520 "command buffers did not succeed. "
5521 "Allocated %d buffers.\n",
5522 num_to_alloc
, num_allocated
);
5524 if (num_allocated
> 0)
5525 phba
->total_scsi_bufs
+= num_allocated
;
5530 * lpfc_slave_configure - scsi_host_template slave_configure entry point
5531 * @sdev: Pointer to scsi_device.
5533 * This routine configures following items
5534 * - Tag command queuing support for @sdev if supported.
5535 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5541 lpfc_slave_configure(struct scsi_device
*sdev
)
5543 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
5544 struct lpfc_hba
*phba
= vport
->phba
;
5546 scsi_change_queue_depth(sdev
, vport
->cfg_lun_queue_depth
);
5548 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
5549 lpfc_sli_handle_fast_ring_event(phba
,
5550 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
], HA_R0RE_REQ
);
5551 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
5552 lpfc_poll_rearm_timer(phba
);
5559 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5560 * @sdev: Pointer to scsi_device.
5562 * This routine sets @sdev hostatdata filed to null.
5565 lpfc_slave_destroy(struct scsi_device
*sdev
)
5567 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
5568 struct lpfc_hba
*phba
= vport
->phba
;
5569 unsigned long flags
;
5570 struct lpfc_device_data
*device_data
= sdev
->hostdata
;
5572 atomic_dec(&phba
->sdev_cnt
);
5573 if ((phba
->cfg_fof
) && (device_data
)) {
5574 spin_lock_irqsave(&phba
->devicelock
, flags
);
5575 device_data
->available
= false;
5576 if (!device_data
->oas_enabled
)
5577 lpfc_delete_device_data(phba
, device_data
);
5578 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
5580 sdev
->hostdata
= NULL
;
5585 * lpfc_create_device_data - creates and initializes device data structure for OAS
5586 * @pha: Pointer to host bus adapter structure.
5587 * @vport_wwpn: Pointer to vport's wwpn information
5588 * @target_wwpn: Pointer to target's wwpn information
5589 * @lun: Lun on target
5590 * @atomic_create: Flag to indicate if memory should be allocated using the
5591 * GFP_ATOMIC flag or not.
5593 * This routine creates a device data structure which will contain identifying
5594 * information for the device (host wwpn, target wwpn, lun), state of OAS,
5595 * whether or not the corresponding lun is available by the system,
5596 * and pointer to the rport data.
5600 * Pointer to lpfc_device_data - Success
5602 struct lpfc_device_data
*
5603 lpfc_create_device_data(struct lpfc_hba
*phba
, struct lpfc_name
*vport_wwpn
,
5604 struct lpfc_name
*target_wwpn
, uint64_t lun
,
5605 uint32_t pri
, bool atomic_create
)
5608 struct lpfc_device_data
*lun_info
;
5611 if (unlikely(!phba
) || !vport_wwpn
|| !target_wwpn
||
5615 /* Attempt to create the device data to contain lun info */
5618 memory_flags
= GFP_ATOMIC
;
5620 memory_flags
= GFP_KERNEL
;
5621 lun_info
= mempool_alloc(phba
->device_data_mem_pool
, memory_flags
);
5624 INIT_LIST_HEAD(&lun_info
->listentry
);
5625 lun_info
->rport_data
= NULL
;
5626 memcpy(&lun_info
->device_id
.vport_wwpn
, vport_wwpn
,
5627 sizeof(struct lpfc_name
));
5628 memcpy(&lun_info
->device_id
.target_wwpn
, target_wwpn
,
5629 sizeof(struct lpfc_name
));
5630 lun_info
->device_id
.lun
= lun
;
5631 lun_info
->oas_enabled
= false;
5632 lun_info
->priority
= pri
;
5633 lun_info
->available
= false;
5638 * lpfc_delete_device_data - frees a device data structure for OAS
5639 * @pha: Pointer to host bus adapter structure.
5640 * @lun_info: Pointer to device data structure to free.
5642 * This routine frees the previously allocated device data structure passed.
5646 lpfc_delete_device_data(struct lpfc_hba
*phba
,
5647 struct lpfc_device_data
*lun_info
)
5650 if (unlikely(!phba
) || !lun_info
||
5654 if (!list_empty(&lun_info
->listentry
))
5655 list_del(&lun_info
->listentry
);
5656 mempool_free(lun_info
, phba
->device_data_mem_pool
);
5661 * __lpfc_get_device_data - returns the device data for the specified lun
5662 * @pha: Pointer to host bus adapter structure.
5663 * @list: Point to list to search.
5664 * @vport_wwpn: Pointer to vport's wwpn information
5665 * @target_wwpn: Pointer to target's wwpn information
5666 * @lun: Lun on target
5668 * This routine searches the list passed for the specified lun's device data.
5669 * This function does not hold locks, it is the responsibility of the caller
5670 * to ensure the proper lock is held before calling the function.
5674 * Pointer to lpfc_device_data - Success
5676 struct lpfc_device_data
*
5677 __lpfc_get_device_data(struct lpfc_hba
*phba
, struct list_head
*list
,
5678 struct lpfc_name
*vport_wwpn
,
5679 struct lpfc_name
*target_wwpn
, uint64_t lun
)
5682 struct lpfc_device_data
*lun_info
;
5684 if (unlikely(!phba
) || !list
|| !vport_wwpn
|| !target_wwpn
||
5688 /* Check to see if the lun is already enabled for OAS. */
5690 list_for_each_entry(lun_info
, list
, listentry
) {
5691 if ((memcmp(&lun_info
->device_id
.vport_wwpn
, vport_wwpn
,
5692 sizeof(struct lpfc_name
)) == 0) &&
5693 (memcmp(&lun_info
->device_id
.target_wwpn
, target_wwpn
,
5694 sizeof(struct lpfc_name
)) == 0) &&
5695 (lun_info
->device_id
.lun
== lun
))
5703 * lpfc_find_next_oas_lun - searches for the next oas lun
5704 * @pha: Pointer to host bus adapter structure.
5705 * @vport_wwpn: Pointer to vport's wwpn information
5706 * @target_wwpn: Pointer to target's wwpn information
5707 * @starting_lun: Pointer to the lun to start searching for
5708 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5709 * @found_target_wwpn: Pointer to the found lun's target wwpn information
5710 * @found_lun: Pointer to the found lun.
5711 * @found_lun_status: Pointer to status of the found lun.
5713 * This routine searches the luns list for the specified lun
5714 * or the first lun for the vport/target. If the vport wwpn contains
5715 * a zero value then a specific vport is not specified. In this case
5716 * any vport which contains the lun will be considered a match. If the
5717 * target wwpn contains a zero value then a specific target is not specified.
5718 * In this case any target which contains the lun will be considered a
5719 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
5720 * are returned. The function will also return the next lun if available.
5721 * If the next lun is not found, starting_lun parameter will be set to
5729 lpfc_find_next_oas_lun(struct lpfc_hba
*phba
, struct lpfc_name
*vport_wwpn
,
5730 struct lpfc_name
*target_wwpn
, uint64_t *starting_lun
,
5731 struct lpfc_name
*found_vport_wwpn
,
5732 struct lpfc_name
*found_target_wwpn
,
5733 uint64_t *found_lun
,
5734 uint32_t *found_lun_status
,
5735 uint32_t *found_lun_pri
)
5738 unsigned long flags
;
5739 struct lpfc_device_data
*lun_info
;
5740 struct lpfc_device_id
*device_id
;
5744 if (unlikely(!phba
) || !vport_wwpn
|| !target_wwpn
||
5745 !starting_lun
|| !found_vport_wwpn
||
5746 !found_target_wwpn
|| !found_lun
|| !found_lun_status
||
5747 (*starting_lun
== NO_MORE_OAS_LUN
) ||
5751 lun
= *starting_lun
;
5752 *found_lun
= NO_MORE_OAS_LUN
;
5753 *starting_lun
= NO_MORE_OAS_LUN
;
5755 /* Search for lun or the lun closet in value */
5757 spin_lock_irqsave(&phba
->devicelock
, flags
);
5758 list_for_each_entry(lun_info
, &phba
->luns
, listentry
) {
5759 if (((wwn_to_u64(vport_wwpn
->u
.wwn
) == 0) ||
5760 (memcmp(&lun_info
->device_id
.vport_wwpn
, vport_wwpn
,
5761 sizeof(struct lpfc_name
)) == 0)) &&
5762 ((wwn_to_u64(target_wwpn
->u
.wwn
) == 0) ||
5763 (memcmp(&lun_info
->device_id
.target_wwpn
, target_wwpn
,
5764 sizeof(struct lpfc_name
)) == 0)) &&
5765 (lun_info
->oas_enabled
)) {
5766 device_id
= &lun_info
->device_id
;
5768 ((lun
== FIND_FIRST_OAS_LUN
) ||
5769 (device_id
->lun
== lun
))) {
5770 *found_lun
= device_id
->lun
;
5771 memcpy(found_vport_wwpn
,
5772 &device_id
->vport_wwpn
,
5773 sizeof(struct lpfc_name
));
5774 memcpy(found_target_wwpn
,
5775 &device_id
->target_wwpn
,
5776 sizeof(struct lpfc_name
));
5777 if (lun_info
->available
)
5779 OAS_LUN_STATUS_EXISTS
;
5781 *found_lun_status
= 0;
5782 *found_lun_pri
= lun_info
->priority
;
5783 if (phba
->cfg_oas_flags
& OAS_FIND_ANY_VPORT
)
5784 memset(vport_wwpn
, 0x0,
5785 sizeof(struct lpfc_name
));
5786 if (phba
->cfg_oas_flags
& OAS_FIND_ANY_TARGET
)
5787 memset(target_wwpn
, 0x0,
5788 sizeof(struct lpfc_name
));
5791 *starting_lun
= device_id
->lun
;
5792 memcpy(vport_wwpn
, &device_id
->vport_wwpn
,
5793 sizeof(struct lpfc_name
));
5794 memcpy(target_wwpn
, &device_id
->target_wwpn
,
5795 sizeof(struct lpfc_name
));
5800 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
5805 * lpfc_enable_oas_lun - enables a lun for OAS operations
5806 * @pha: Pointer to host bus adapter structure.
5807 * @vport_wwpn: Pointer to vport's wwpn information
5808 * @target_wwpn: Pointer to target's wwpn information
5811 * This routine enables a lun for oas operations. The routines does so by
5812 * doing the following :
5814 * 1) Checks to see if the device data for the lun has been created.
5815 * 2) If found, sets the OAS enabled flag if not set and returns.
5816 * 3) Otherwise, creates a device data structure.
5817 * 4) If successfully created, indicates the device data is for an OAS lun,
5818 * indicates the lun is not available and add to the list of luns.
5825 lpfc_enable_oas_lun(struct lpfc_hba
*phba
, struct lpfc_name
*vport_wwpn
,
5826 struct lpfc_name
*target_wwpn
, uint64_t lun
, uint8_t pri
)
5829 struct lpfc_device_data
*lun_info
;
5830 unsigned long flags
;
5832 if (unlikely(!phba
) || !vport_wwpn
|| !target_wwpn
||
5836 spin_lock_irqsave(&phba
->devicelock
, flags
);
5838 /* Check to see if the device data for the lun has been created */
5839 lun_info
= __lpfc_get_device_data(phba
, &phba
->luns
, vport_wwpn
,
5842 if (!lun_info
->oas_enabled
)
5843 lun_info
->oas_enabled
= true;
5844 lun_info
->priority
= pri
;
5845 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
5849 /* Create an lun info structure and add to list of luns */
5850 lun_info
= lpfc_create_device_data(phba
, vport_wwpn
, target_wwpn
, lun
,
5853 lun_info
->oas_enabled
= true;
5854 lun_info
->priority
= pri
;
5855 lun_info
->available
= false;
5856 list_add_tail(&lun_info
->listentry
, &phba
->luns
);
5857 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
5860 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
5865 * lpfc_disable_oas_lun - disables a lun for OAS operations
5866 * @pha: Pointer to host bus adapter structure.
5867 * @vport_wwpn: Pointer to vport's wwpn information
5868 * @target_wwpn: Pointer to target's wwpn information
5871 * This routine disables a lun for oas operations. The routines does so by
5872 * doing the following :
5874 * 1) Checks to see if the device data for the lun is created.
5875 * 2) If present, clears the flag indicating this lun is for OAS.
5876 * 3) If the lun is not available by the system, the device data is
5884 lpfc_disable_oas_lun(struct lpfc_hba
*phba
, struct lpfc_name
*vport_wwpn
,
5885 struct lpfc_name
*target_wwpn
, uint64_t lun
, uint8_t pri
)
5888 struct lpfc_device_data
*lun_info
;
5889 unsigned long flags
;
5891 if (unlikely(!phba
) || !vport_wwpn
|| !target_wwpn
||
5895 spin_lock_irqsave(&phba
->devicelock
, flags
);
5897 /* Check to see if the lun is available. */
5898 lun_info
= __lpfc_get_device_data(phba
,
5899 &phba
->luns
, vport_wwpn
,
5902 lun_info
->oas_enabled
= false;
5903 lun_info
->priority
= pri
;
5904 if (!lun_info
->available
)
5905 lpfc_delete_device_data(phba
, lun_info
);
5906 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
5910 spin_unlock_irqrestore(&phba
->devicelock
, flags
);
5915 lpfc_no_command(struct Scsi_Host
*shost
, struct scsi_cmnd
*cmnd
)
5917 return SCSI_MLQUEUE_HOST_BUSY
;
5921 lpfc_no_handler(struct scsi_cmnd
*cmnd
)
5927 lpfc_no_slave(struct scsi_device
*sdev
)
5932 struct scsi_host_template lpfc_template_nvme
= {
5933 .module
= THIS_MODULE
,
5934 .name
= LPFC_DRIVER_NAME
,
5935 .proc_name
= LPFC_DRIVER_NAME
,
5937 .queuecommand
= lpfc_no_command
,
5938 .eh_abort_handler
= lpfc_no_handler
,
5939 .eh_device_reset_handler
= lpfc_no_handler
,
5940 .eh_target_reset_handler
= lpfc_no_handler
,
5941 .eh_bus_reset_handler
= lpfc_no_handler
,
5942 .eh_host_reset_handler
= lpfc_no_handler
,
5943 .slave_alloc
= lpfc_no_slave
,
5944 .slave_configure
= lpfc_no_slave
,
5945 .scan_finished
= lpfc_scan_finished
,
5949 .use_clustering
= ENABLE_CLUSTERING
,
5950 .shost_attrs
= lpfc_hba_attrs
,
5951 .max_sectors
= 0xFFFF,
5952 .vendor_id
= LPFC_NL_VENDOR_ID
,
5953 .track_queue_depth
= 0,
5956 struct scsi_host_template lpfc_template_s3
= {
5957 .module
= THIS_MODULE
,
5958 .name
= LPFC_DRIVER_NAME
,
5959 .proc_name
= LPFC_DRIVER_NAME
,
5961 .queuecommand
= lpfc_queuecommand
,
5962 .eh_abort_handler
= lpfc_abort_handler
,
5963 .eh_device_reset_handler
= lpfc_device_reset_handler
,
5964 .eh_target_reset_handler
= lpfc_target_reset_handler
,
5965 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
5966 .slave_alloc
= lpfc_slave_alloc
,
5967 .slave_configure
= lpfc_slave_configure
,
5968 .slave_destroy
= lpfc_slave_destroy
,
5969 .scan_finished
= lpfc_scan_finished
,
5971 .sg_tablesize
= LPFC_DEFAULT_SG_SEG_CNT
,
5972 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
5973 .use_clustering
= ENABLE_CLUSTERING
,
5974 .shost_attrs
= lpfc_hba_attrs
,
5975 .max_sectors
= 0xFFFF,
5976 .vendor_id
= LPFC_NL_VENDOR_ID
,
5977 .change_queue_depth
= scsi_change_queue_depth
,
5978 .track_queue_depth
= 1,
5981 struct scsi_host_template lpfc_template
= {
5982 .module
= THIS_MODULE
,
5983 .name
= LPFC_DRIVER_NAME
,
5984 .proc_name
= LPFC_DRIVER_NAME
,
5986 .queuecommand
= lpfc_queuecommand
,
5987 .eh_timed_out
= fc_eh_timed_out
,
5988 .eh_abort_handler
= lpfc_abort_handler
,
5989 .eh_device_reset_handler
= lpfc_device_reset_handler
,
5990 .eh_target_reset_handler
= lpfc_target_reset_handler
,
5991 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
5992 .eh_host_reset_handler
= lpfc_host_reset_handler
,
5993 .slave_alloc
= lpfc_slave_alloc
,
5994 .slave_configure
= lpfc_slave_configure
,
5995 .slave_destroy
= lpfc_slave_destroy
,
5996 .scan_finished
= lpfc_scan_finished
,
5998 .sg_tablesize
= LPFC_DEFAULT_SG_SEG_CNT
,
5999 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
6000 .use_clustering
= ENABLE_CLUSTERING
,
6001 .shost_attrs
= lpfc_hba_attrs
,
6002 .max_sectors
= 0xFFFF,
6003 .vendor_id
= LPFC_NL_VENDOR_ID
,
6004 .change_queue_depth
= scsi_change_queue_depth
,
6005 .track_queue_depth
= 1,
6008 struct scsi_host_template lpfc_vport_template
= {
6009 .module
= THIS_MODULE
,
6010 .name
= LPFC_DRIVER_NAME
,
6011 .proc_name
= LPFC_DRIVER_NAME
,
6013 .queuecommand
= lpfc_queuecommand
,
6014 .eh_timed_out
= fc_eh_timed_out
,
6015 .eh_abort_handler
= lpfc_abort_handler
,
6016 .eh_device_reset_handler
= lpfc_device_reset_handler
,
6017 .eh_target_reset_handler
= lpfc_target_reset_handler
,
6018 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
6019 .slave_alloc
= lpfc_slave_alloc
,
6020 .slave_configure
= lpfc_slave_configure
,
6021 .slave_destroy
= lpfc_slave_destroy
,
6022 .scan_finished
= lpfc_scan_finished
,
6024 .sg_tablesize
= LPFC_DEFAULT_SG_SEG_CNT
,
6025 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
6026 .use_clustering
= ENABLE_CLUSTERING
,
6027 .shost_attrs
= lpfc_vport_attrs
,
6028 .max_sectors
= 0xFFFF,
6029 .change_queue_depth
= scsi_change_queue_depth
,
6030 .track_queue_depth
= 1,