2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/version.h>
30 #include <linux/blkdev.h>
31 #include <linux/interrupt.h>
32 #include <linux/pci.h>
33 #include <linux/delay.h>
34 #include <linux/list.h>
35 #include <linux/workqueue.h>
36 #include <asm/unaligned.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_tcq.h>
40 #include <target/target_core_base.h>
41 #include <target/target_core_fabric.h>
44 #include "qla_target.h"
46 static char *qlini_mode
= QLA2XXX_INI_MODE_STR_ENABLED
;
47 module_param(qlini_mode
, charp
, S_IRUGO
);
48 MODULE_PARM_DESC(qlini_mode
,
49 "Determines when initiator mode will be enabled. Possible values: "
50 "\"exclusive\" - initiator mode will be enabled on load, "
51 "disabled on enabling target mode and then on disabling target mode "
53 "\"disabled\" - initiator mode will never be enabled; "
54 "\"enabled\" (default) - initiator mode will always stay enabled.");
56 static int ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
59 * From scsi/fc/fc_fcp.h
61 enum fcp_resp_rsp_codes
{
63 FCP_DATA_LEN_INVALID
= 1,
64 FCP_CMND_FIELDS_INVALID
= 2,
65 FCP_DATA_PARAM_MISMATCH
= 3,
68 FCP_TMF_INVALID_LUN
= 9,
72 * fc_pri_ta from scsi/fc/fc_fcp.h
74 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
75 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
76 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
77 #define FCP_PTA_ACA 4 /* auto. contigent allegiance */
78 #define FCP_PTA_MASK 7 /* mask for task attribute field */
79 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
80 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
83 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
84 * must be called under HW lock and could unlock/lock it inside.
85 * It isn't an issue, since in the current implementation on the time when
86 * those functions are called:
88 * - Either context is IRQ and only IRQ handler can modify HW data,
89 * including rings related fields,
91 * - Or access to target mode variables from struct qla_tgt doesn't
92 * cross those functions boundaries, except tgt_stop, which
93 * additionally protected by irq_cmd_count.
95 /* Predefs for callbacks handed to qla2xxx LLD */
96 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*ha
,
97 struct atio_from_isp
*pkt
);
98 static void qlt_response_pkt(struct scsi_qla_host
*ha
, response_t
*pkt
);
99 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
100 int fn
, void *iocb
, int flags
);
101 static void qlt_send_term_exchange(struct scsi_qla_host
*ha
, struct qla_tgt_cmd
102 *cmd
, struct atio_from_isp
*atio
, int ha_locked
);
103 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*ha
,
104 struct qla_tgt_srr_imm
*imm
, int ha_lock
);
108 static struct kmem_cache
*qla_tgt_cmd_cachep
;
109 static struct kmem_cache
*qla_tgt_mgmt_cmd_cachep
;
110 static mempool_t
*qla_tgt_mgmt_cmd_mempool
;
111 static struct workqueue_struct
*qla_tgt_wq
;
112 static DEFINE_MUTEX(qla_tgt_mutex
);
113 static LIST_HEAD(qla_tgt_glist
);
115 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
116 static struct qla_tgt_sess
*qlt_find_sess_by_port_name(
118 const uint8_t *port_name
)
120 struct qla_tgt_sess
*sess
;
122 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
) {
123 if (!memcmp(sess
->port_name
, port_name
, WWN_SIZE
))
130 /* Might release hw lock, then reaquire!! */
131 static inline int qlt_issue_marker(struct scsi_qla_host
*vha
, int vha_locked
)
133 /* Send marker if required */
134 if (unlikely(vha
->marker_needed
!= 0)) {
135 int rc
= qla2x00_issue_marker(vha
, vha_locked
);
136 if (rc
!= QLA_SUCCESS
) {
137 ql_dbg(ql_dbg_tgt
, vha
, 0xe03d,
138 "qla_target(%d): issue_marker() failed\n",
147 struct scsi_qla_host
*qlt_find_host_by_d_id(struct scsi_qla_host
*vha
,
150 struct qla_hw_data
*ha
= vha
->hw
;
153 if ((vha
->d_id
.b
.area
!= d_id
[1]) || (vha
->d_id
.b
.domain
!= d_id
[0]))
156 if (vha
->d_id
.b
.al_pa
== d_id
[2])
159 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
160 vp_idx
= ha
->tgt
.tgt_vp_map
[d_id
[2]].idx
;
161 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
162 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
168 struct scsi_qla_host
*qlt_find_host_by_vp_idx(struct scsi_qla_host
*vha
,
171 struct qla_hw_data
*ha
= vha
->hw
;
173 if (vha
->vp_idx
== vp_idx
)
176 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
177 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
178 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
183 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host
*vha
,
184 struct atio_from_isp
*atio
)
186 switch (atio
->u
.raw
.entry_type
) {
189 struct scsi_qla_host
*host
= qlt_find_host_by_d_id(vha
,
190 atio
->u
.isp24
.fcp_hdr
.d_id
);
191 if (unlikely(NULL
== host
)) {
192 ql_dbg(ql_dbg_tgt
, vha
, 0xe03e,
193 "qla_target(%d): Received ATIO_TYPE7 "
194 "with unknown d_id %x:%x:%x\n", vha
->vp_idx
,
195 atio
->u
.isp24
.fcp_hdr
.d_id
[0],
196 atio
->u
.isp24
.fcp_hdr
.d_id
[1],
197 atio
->u
.isp24
.fcp_hdr
.d_id
[2]);
200 qlt_24xx_atio_pkt(host
, atio
);
204 case IMMED_NOTIFY_TYPE
:
206 struct scsi_qla_host
*host
= vha
;
207 struct imm_ntfy_from_isp
*entry
=
208 (struct imm_ntfy_from_isp
*)atio
;
210 if ((entry
->u
.isp24
.vp_index
!= 0xFF) &&
211 (entry
->u
.isp24
.nport_handle
!= 0xFFFF)) {
212 host
= qlt_find_host_by_vp_idx(vha
,
213 entry
->u
.isp24
.vp_index
);
214 if (unlikely(!host
)) {
215 ql_dbg(ql_dbg_tgt
, vha
, 0xe03f,
216 "qla_target(%d): Received "
217 "ATIO (IMMED_NOTIFY_TYPE) "
218 "with unknown vp_index %d\n",
219 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
223 qlt_24xx_atio_pkt(host
, atio
);
228 ql_dbg(ql_dbg_tgt
, vha
, 0xe040,
229 "qla_target(%d): Received unknown ATIO atio "
230 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
237 void qlt_response_pkt_all_vps(struct scsi_qla_host
*vha
, response_t
*pkt
)
239 switch (pkt
->entry_type
) {
242 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
243 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
245 if (unlikely(!host
)) {
246 ql_dbg(ql_dbg_tgt
, vha
, 0xe041,
247 "qla_target(%d): Response pkt (CTIO_TYPE7) "
248 "received, with unknown vp_index %d\n",
249 vha
->vp_idx
, entry
->vp_index
);
252 qlt_response_pkt(host
, pkt
);
256 case IMMED_NOTIFY_TYPE
:
258 struct scsi_qla_host
*host
= vha
;
259 struct imm_ntfy_from_isp
*entry
=
260 (struct imm_ntfy_from_isp
*)pkt
;
262 host
= qlt_find_host_by_vp_idx(vha
, entry
->u
.isp24
.vp_index
);
263 if (unlikely(!host
)) {
264 ql_dbg(ql_dbg_tgt
, vha
, 0xe042,
265 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
266 "received, with unknown vp_index %d\n",
267 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
270 qlt_response_pkt(host
, pkt
);
274 case NOTIFY_ACK_TYPE
:
276 struct scsi_qla_host
*host
= vha
;
277 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
279 if (0xFF != entry
->u
.isp24
.vp_index
) {
280 host
= qlt_find_host_by_vp_idx(vha
,
281 entry
->u
.isp24
.vp_index
);
282 if (unlikely(!host
)) {
283 ql_dbg(ql_dbg_tgt
, vha
, 0xe043,
284 "qla_target(%d): Response "
285 "pkt (NOTIFY_ACK_TYPE) "
286 "received, with unknown "
287 "vp_index %d\n", vha
->vp_idx
,
288 entry
->u
.isp24
.vp_index
);
292 qlt_response_pkt(host
, pkt
);
298 struct abts_recv_from_24xx
*entry
=
299 (struct abts_recv_from_24xx
*)pkt
;
300 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
302 if (unlikely(!host
)) {
303 ql_dbg(ql_dbg_tgt
, vha
, 0xe044,
304 "qla_target(%d): Response pkt "
305 "(ABTS_RECV_24XX) received, with unknown "
306 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
309 qlt_response_pkt(host
, pkt
);
315 struct abts_resp_to_24xx
*entry
=
316 (struct abts_resp_to_24xx
*)pkt
;
317 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
319 if (unlikely(!host
)) {
320 ql_dbg(ql_dbg_tgt
, vha
, 0xe045,
321 "qla_target(%d): Response pkt "
322 "(ABTS_RECV_24XX) received, with unknown "
323 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
326 qlt_response_pkt(host
, pkt
);
331 qlt_response_pkt(vha
, pkt
);
337 static void qlt_free_session_done(struct work_struct
*work
)
339 struct qla_tgt_sess
*sess
= container_of(work
, struct qla_tgt_sess
,
341 struct qla_tgt
*tgt
= sess
->tgt
;
342 struct scsi_qla_host
*vha
= sess
->vha
;
343 struct qla_hw_data
*ha
= vha
->hw
;
347 * Release the target session for FC Nexus from fabric module code.
349 if (sess
->se_sess
!= NULL
)
350 ha
->tgt
.tgt_ops
->free_session(sess
);
352 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf001,
353 "Unregistration of sess %p finished\n", sess
);
357 * We need to protect against race, when tgt is freed before or
361 if (tgt
->sess_count
== 0)
362 wake_up_all(&tgt
->waitQ
);
365 /* ha->hardware_lock supposed to be held on entry */
366 void qlt_unreg_sess(struct qla_tgt_sess
*sess
)
368 struct scsi_qla_host
*vha
= sess
->vha
;
370 vha
->hw
->tgt
.tgt_ops
->clear_nacl_from_fcport_map(sess
);
372 list_del(&sess
->sess_list_entry
);
374 list_del(&sess
->del_list_entry
);
376 INIT_WORK(&sess
->free_work
, qlt_free_session_done
);
377 schedule_work(&sess
->free_work
);
379 EXPORT_SYMBOL(qlt_unreg_sess
);
381 /* ha->hardware_lock supposed to be held on entry */
382 static int qlt_reset(struct scsi_qla_host
*vha
, void *iocb
, int mcmd
)
384 struct qla_hw_data
*ha
= vha
->hw
;
385 struct qla_tgt_sess
*sess
= NULL
;
386 uint32_t unpacked_lun
, lun
= 0;
389 struct imm_ntfy_from_isp
*n
= (struct imm_ntfy_from_isp
*)iocb
;
390 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
392 loop_id
= le16_to_cpu(n
->u
.isp24
.nport_handle
);
393 if (loop_id
== 0xFFFF) {
394 #if 0 /* FIXME: Re-enable Global event handling.. */
396 atomic_inc(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
);
397 qlt_clear_tgt_db(ha
->tgt
.qla_tgt
, 1);
398 if (!list_empty(&ha
->tgt
.qla_tgt
->sess_list
)) {
399 sess
= list_entry(ha
->tgt
.qla_tgt
->sess_list
.next
,
400 typeof(*sess
), sess_list_entry
);
402 case QLA_TGT_NEXUS_LOSS_SESS
:
403 mcmd
= QLA_TGT_NEXUS_LOSS
;
405 case QLA_TGT_ABORT_ALL_SESS
:
406 mcmd
= QLA_TGT_ABORT_ALL
;
408 case QLA_TGT_NEXUS_LOSS
:
409 case QLA_TGT_ABORT_ALL
:
412 ql_dbg(ql_dbg_tgt
, vha
, 0xe046,
413 "qla_target(%d): Not allowed "
414 "command %x in %s", vha
->vp_idx
,
423 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
426 ql_dbg(ql_dbg_tgt
, vha
, 0xe000,
427 "Using sess for qla_tgt_reset: %p\n", sess
);
433 ql_dbg(ql_dbg_tgt
, vha
, 0xe047,
434 "scsi(%ld): resetting (session %p from port "
435 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
436 "mcmd %x, loop_id %d)\n", vha
->host_no
, sess
,
437 sess
->port_name
[0], sess
->port_name
[1],
438 sess
->port_name
[2], sess
->port_name
[3],
439 sess
->port_name
[4], sess
->port_name
[5],
440 sess
->port_name
[6], sess
->port_name
[7],
443 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
444 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
446 return qlt_issue_task_mgmt(sess
, unpacked_lun
, mcmd
,
447 iocb
, QLA24XX_MGMT_SEND_NACK
);
450 /* ha->hardware_lock supposed to be held on entry */
451 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess
*sess
,
454 struct qla_tgt
*tgt
= sess
->tgt
;
455 uint32_t dev_loss_tmo
= tgt
->ha
->port_down_retry_count
+ 5;
460 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe001,
461 "Scheduling sess %p for deletion\n", sess
);
462 list_add_tail(&sess
->del_list_entry
, &tgt
->del_sess_list
);
468 sess
->expires
= jiffies
+ dev_loss_tmo
* HZ
;
470 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe048,
471 "qla_target(%d): session for port %02x:%02x:%02x:"
472 "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
473 "deletion in %u secs (expires: %lu) immed: %d\n",
475 sess
->port_name
[0], sess
->port_name
[1],
476 sess
->port_name
[2], sess
->port_name
[3],
477 sess
->port_name
[4], sess
->port_name
[5],
478 sess
->port_name
[6], sess
->port_name
[7],
479 sess
->loop_id
, dev_loss_tmo
, sess
->expires
, immediate
);
482 schedule_delayed_work(&tgt
->sess_del_work
, 0);
484 schedule_delayed_work(&tgt
->sess_del_work
,
485 jiffies
- sess
->expires
);
488 /* ha->hardware_lock supposed to be held on entry */
489 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
, bool local_only
)
491 struct qla_tgt_sess
*sess
;
493 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
)
494 qlt_schedule_sess_for_deletion(sess
, true);
496 /* At this point tgt could be already dead */
499 static int qla24xx_get_loop_id(struct scsi_qla_host
*vha
, const uint8_t *s_id
,
502 struct qla_hw_data
*ha
= vha
->hw
;
503 dma_addr_t gid_list_dma
;
504 struct gid_list_info
*gid_list
;
509 gid_list
= dma_alloc_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
510 &gid_list_dma
, GFP_KERNEL
);
512 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf044,
513 "qla_target(%d): DMA Alloc failed of %u\n",
514 vha
->vp_idx
, qla2x00_gid_list_size(ha
));
518 /* Get list of logged in devices */
519 rc
= qla2x00_get_id_list(vha
, gid_list
, gid_list_dma
, &entries
);
520 if (rc
!= QLA_SUCCESS
) {
521 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf045,
522 "qla_target(%d): get_id_list() failed: %x\n",
525 goto out_free_id_list
;
528 id_iter
= (char *)gid_list
;
530 for (i
= 0; i
< entries
; i
++) {
531 struct gid_list_info
*gid
= (struct gid_list_info
*)id_iter
;
532 if ((gid
->al_pa
== s_id
[2]) &&
533 (gid
->area
== s_id
[1]) &&
534 (gid
->domain
== s_id
[0])) {
535 *loop_id
= le16_to_cpu(gid
->loop_id
);
539 id_iter
+= ha
->gid_list_info_size
;
543 dma_free_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
544 gid_list
, gid_list_dma
);
548 static bool qlt_check_fcport_exist(struct scsi_qla_host
*vha
,
549 struct qla_tgt_sess
*sess
)
551 struct qla_hw_data
*ha
= vha
->hw
;
552 struct qla_port_24xx_data
*pmap24
;
553 bool res
, found
= false;
555 uint16_t loop_id
= 0xFFFF; /* to eliminate compiler's warning */
563 global_resets
= atomic_read(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
);
565 rc
= qla2x00_get_node_name_list(vha
, &pmap
, &pmap_len
);
566 if (rc
!= QLA_SUCCESS
) {
572 entries
= pmap_len
/sizeof(*pmap24
);
574 for (i
= 0; i
< entries
; ++i
) {
575 if (!memcmp(sess
->port_name
, pmap24
[i
].port_name
, WWN_SIZE
)) {
576 loop_id
= le16_to_cpu(pmap24
[i
].loop_id
);
589 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf046,
590 "qlt_check_fcport_exist(): loop_id %d", loop_id
);
592 fcport
= kzalloc(sizeof(*fcport
), GFP_KERNEL
);
593 if (fcport
== NULL
) {
594 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf047,
595 "qla_target(%d): Allocation of tmp FC port failed",
601 fcport
->loop_id
= loop_id
;
603 rc
= qla2x00_get_port_database(vha
, fcport
, 0);
604 if (rc
!= QLA_SUCCESS
) {
605 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf048,
606 "qla_target(%d): Failed to retrieve fcport "
607 "information -- get_port_database() returned %x "
608 "(loop_id=0x%04x)", vha
->vp_idx
, rc
, loop_id
);
610 goto out_free_fcport
;
614 atomic_read(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
)) {
615 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf002,
616 "qla_target(%d): global reset during session discovery"
617 " (counter was %d, new %d), retrying",
618 vha
->vp_idx
, global_resets
,
619 atomic_read(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
));
623 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf003,
624 "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
625 "loop_id %d", sess
, sess
->s_id
.b
.domain
, sess
->s_id
.b
.al_pa
,
626 sess
->s_id
.b
.area
, sess
->loop_id
, fcport
->d_id
.b
.domain
,
627 fcport
->d_id
.b
.al_pa
, fcport
->d_id
.b
.area
, fcport
->loop_id
);
629 sess
->s_id
= fcport
->d_id
;
630 sess
->loop_id
= fcport
->loop_id
;
631 sess
->conf_compl_supported
= !!(fcport
->flags
&
632 FCF_CONF_COMP_SUPPORTED
);
643 /* ha->hardware_lock supposed to be held on entry */
644 static void qlt_undelete_sess(struct qla_tgt_sess
*sess
)
646 BUG_ON(!sess
->deleted
);
648 list_del(&sess
->del_list_entry
);
652 static void qlt_del_sess_work_fn(struct delayed_work
*work
)
654 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
,
656 struct scsi_qla_host
*vha
= tgt
->vha
;
657 struct qla_hw_data
*ha
= vha
->hw
;
658 struct qla_tgt_sess
*sess
;
661 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
662 while (!list_empty(&tgt
->del_sess_list
)) {
663 sess
= list_entry(tgt
->del_sess_list
.next
, typeof(*sess
),
665 if (time_after_eq(jiffies
, sess
->expires
)) {
668 qlt_undelete_sess(sess
);
670 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
671 cancel
= qlt_check_fcport_exist(vha
, sess
);
676 * sess was again deleted while we were
679 spin_lock_irqsave(&ha
->hardware_lock
,
684 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf049,
685 "qla_target(%d): cancel deletion of "
686 "session for port %02x:%02x:%02x:%02x:%02x:"
687 "%02x:%02x:%02x (loop ID %d), because "
688 " it isn't deleted by firmware",
689 vha
->vp_idx
, sess
->port_name
[0],
690 sess
->port_name
[1], sess
->port_name
[2],
691 sess
->port_name
[3], sess
->port_name
[4],
692 sess
->port_name
[5], sess
->port_name
[6],
693 sess
->port_name
[7], sess
->loop_id
);
695 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf004,
696 "Timeout: sess %p about to be deleted\n",
698 ha
->tgt
.tgt_ops
->shutdown_sess(sess
);
699 ha
->tgt
.tgt_ops
->put_sess(sess
);
702 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
704 schedule_delayed_work(&tgt
->sess_del_work
,
705 jiffies
- sess
->expires
);
709 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
713 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
714 * Caller must put it.
716 static struct qla_tgt_sess
*qlt_create_sess(
717 struct scsi_qla_host
*vha
,
721 struct qla_hw_data
*ha
= vha
->hw
;
722 struct qla_tgt_sess
*sess
;
724 unsigned char be_sid
[3];
726 /* Check to avoid double sessions */
727 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
728 list_for_each_entry(sess
, &ha
->tgt
.qla_tgt
->sess_list
,
730 if (!memcmp(sess
->port_name
, fcport
->port_name
, WWN_SIZE
)) {
731 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf005,
732 "Double sess %p found (s_id %x:%x:%x, "
733 "loop_id %d), updating to d_id %x:%x:%x, "
734 "loop_id %d", sess
, sess
->s_id
.b
.domain
,
735 sess
->s_id
.b
.al_pa
, sess
->s_id
.b
.area
,
736 sess
->loop_id
, fcport
->d_id
.b
.domain
,
737 fcport
->d_id
.b
.al_pa
, fcport
->d_id
.b
.area
,
741 qlt_undelete_sess(sess
);
743 kref_get(&sess
->se_sess
->sess_kref
);
744 sess
->s_id
= fcport
->d_id
;
745 sess
->loop_id
= fcport
->loop_id
;
746 sess
->conf_compl_supported
= !!(fcport
->flags
&
747 FCF_CONF_COMP_SUPPORTED
);
748 if (sess
->local
&& !local
)
750 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
755 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
757 sess
= kzalloc(sizeof(*sess
), GFP_KERNEL
);
759 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04a,
760 "qla_target(%u): session allocation failed, "
761 "all commands from port %02x:%02x:%02x:%02x:"
762 "%02x:%02x:%02x:%02x will be refused", vha
->vp_idx
,
763 fcport
->port_name
[0], fcport
->port_name
[1],
764 fcport
->port_name
[2], fcport
->port_name
[3],
765 fcport
->port_name
[4], fcport
->port_name
[5],
766 fcport
->port_name
[6], fcport
->port_name
[7]);
770 sess
->tgt
= ha
->tgt
.qla_tgt
;
772 sess
->s_id
= fcport
->d_id
;
773 sess
->loop_id
= fcport
->loop_id
;
776 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf006,
777 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
778 sess
, ha
->tgt
.qla_tgt
);
780 be_sid
[0] = sess
->s_id
.b
.domain
;
781 be_sid
[1] = sess
->s_id
.b
.area
;
782 be_sid
[2] = sess
->s_id
.b
.al_pa
;
784 * Determine if this fc_port->port_name is allowed to access
785 * target mode using explict NodeACLs+MappedLUNs, or using
786 * TPG demo mode. If this is successful a target mode FC nexus
789 if (ha
->tgt
.tgt_ops
->check_initiator_node_acl(vha
,
790 &fcport
->port_name
[0], sess
, &be_sid
[0], fcport
->loop_id
) < 0) {
795 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
796 * access across ->hardware_lock reaquire.
798 kref_get(&sess
->se_sess
->sess_kref
);
800 sess
->conf_compl_supported
= !!(fcport
->flags
&
801 FCF_CONF_COMP_SUPPORTED
);
802 BUILD_BUG_ON(sizeof(sess
->port_name
) != sizeof(fcport
->port_name
));
803 memcpy(sess
->port_name
, fcport
->port_name
, sizeof(sess
->port_name
));
805 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
806 list_add_tail(&sess
->sess_list_entry
, &ha
->tgt
.qla_tgt
->sess_list
);
807 ha
->tgt
.qla_tgt
->sess_count
++;
808 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
810 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04b,
811 "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
812 "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
813 " completion %ssupported) added\n",
814 vha
->vp_idx
, local
? "local " : "", fcport
->port_name
[0],
815 fcport
->port_name
[1], fcport
->port_name
[2], fcport
->port_name
[3],
816 fcport
->port_name
[4], fcport
->port_name
[5], fcport
->port_name
[6],
817 fcport
->port_name
[7], fcport
->loop_id
, sess
->s_id
.b
.domain
,
818 sess
->s_id
.b
.area
, sess
->s_id
.b
.al_pa
, sess
->conf_compl_supported
?
825 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
827 void qlt_fc_port_added(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
829 struct qla_hw_data
*ha
= vha
->hw
;
830 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
831 struct qla_tgt_sess
*sess
;
834 if (!vha
->hw
->tgt
.tgt_ops
)
837 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
840 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
842 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
845 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
847 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
849 mutex_lock(&ha
->tgt
.tgt_mutex
);
850 sess
= qlt_create_sess(vha
, fcport
, false);
851 mutex_unlock(&ha
->tgt
.tgt_mutex
);
853 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
855 kref_get(&sess
->se_sess
->sess_kref
);
858 qlt_undelete_sess(sess
);
860 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04c,
861 "qla_target(%u): %ssession for port %02x:"
862 "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
863 "reappeared\n", vha
->vp_idx
, sess
->local
? "local "
864 : "", sess
->port_name
[0], sess
->port_name
[1],
865 sess
->port_name
[2], sess
->port_name
[3],
866 sess
->port_name
[4], sess
->port_name
[5],
867 sess
->port_name
[6], sess
->port_name
[7],
870 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf007,
871 "Reappeared sess %p\n", sess
);
873 sess
->s_id
= fcport
->d_id
;
874 sess
->loop_id
= fcport
->loop_id
;
875 sess
->conf_compl_supported
= !!(fcport
->flags
&
876 FCF_CONF_COMP_SUPPORTED
);
879 if (sess
&& sess
->local
) {
880 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04d,
881 "qla_target(%u): local session for "
882 "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
883 "(loop ID %d) became global\n", vha
->vp_idx
,
884 fcport
->port_name
[0], fcport
->port_name
[1],
885 fcport
->port_name
[2], fcport
->port_name
[3],
886 fcport
->port_name
[4], fcport
->port_name
[5],
887 fcport
->port_name
[6], fcport
->port_name
[7],
891 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
893 ha
->tgt
.tgt_ops
->put_sess(sess
);
896 void qlt_fc_port_deleted(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
898 struct qla_hw_data
*ha
= vha
->hw
;
899 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
900 struct qla_tgt_sess
*sess
;
903 if (!vha
->hw
->tgt
.tgt_ops
)
906 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
909 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
911 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
914 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
916 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
920 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf008, "qla_tgt_fc_port_deleted %p", sess
);
923 qlt_schedule_sess_for_deletion(sess
, false);
924 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
927 static inline int test_tgt_sess_count(struct qla_tgt
*tgt
)
929 struct qla_hw_data
*ha
= tgt
->ha
;
933 * We need to protect against race, when tgt is freed before or
936 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
937 ql_dbg(ql_dbg_tgt
, tgt
->vha
, 0xe002,
938 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
939 tgt
, list_empty(&tgt
->sess_list
), tgt
->sess_count
);
940 res
= (tgt
->sess_count
== 0);
941 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
946 /* Called by tcm_qla2xxx configfs code */
947 void qlt_stop_phase1(struct qla_tgt
*tgt
)
949 struct scsi_qla_host
*vha
= tgt
->vha
;
950 struct qla_hw_data
*ha
= tgt
->ha
;
953 if (tgt
->tgt_stop
|| tgt
->tgt_stopped
) {
954 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04e,
955 "Already in tgt->tgt_stop or tgt_stopped state\n");
960 ql_dbg(ql_dbg_tgt
, vha
, 0xe003, "Stopping target for host %ld(%p)\n",
963 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
964 * Lock is needed, because we still can get an incoming packet.
966 mutex_lock(&ha
->tgt
.tgt_mutex
);
967 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
969 qlt_clear_tgt_db(tgt
, true);
970 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
971 mutex_unlock(&ha
->tgt
.tgt_mutex
);
973 flush_delayed_work_sync(&tgt
->sess_del_work
);
975 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf009,
976 "Waiting for sess works (tgt %p)", tgt
);
977 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
978 while (!list_empty(&tgt
->sess_works_list
)) {
979 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
980 flush_scheduled_work();
981 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
983 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
985 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00a,
986 "Waiting for tgt %p: list_empty(sess_list)=%d "
987 "sess_count=%d\n", tgt
, list_empty(&tgt
->sess_list
),
990 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
993 if (!ha
->flags
.host_shutting_down
&& qla_tgt_mode_enabled(vha
))
994 qlt_disable_vha(vha
);
996 /* Wait for sessions to clear out (just in case) */
997 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
999 EXPORT_SYMBOL(qlt_stop_phase1
);
1001 /* Called by tcm_qla2xxx configfs code */
1002 void qlt_stop_phase2(struct qla_tgt
*tgt
)
1004 struct qla_hw_data
*ha
= tgt
->ha
;
1005 unsigned long flags
;
1007 if (tgt
->tgt_stopped
) {
1008 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf04f,
1009 "Already in tgt->tgt_stopped state\n");
1014 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00b,
1015 "Waiting for %d IRQ commands to complete (tgt %p)",
1016 tgt
->irq_cmd_count
, tgt
);
1018 mutex_lock(&ha
->tgt
.tgt_mutex
);
1019 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1020 while (tgt
->irq_cmd_count
!= 0) {
1021 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1023 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1026 tgt
->tgt_stopped
= 1;
1027 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1028 mutex_unlock(&ha
->tgt
.tgt_mutex
);
1030 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00c, "Stop of tgt %p finished",
1033 EXPORT_SYMBOL(qlt_stop_phase2
);
1035 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1036 void qlt_release(struct qla_tgt
*tgt
)
1038 struct qla_hw_data
*ha
= tgt
->ha
;
1040 if ((ha
->tgt
.qla_tgt
!= NULL
) && !tgt
->tgt_stopped
)
1041 qlt_stop_phase2(tgt
);
1043 ha
->tgt
.qla_tgt
= NULL
;
1045 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00d,
1046 "Release of tgt %p finished\n", tgt
);
1051 /* ha->hardware_lock supposed to be held on entry */
1052 static int qlt_sched_sess_work(struct qla_tgt
*tgt
, int type
,
1053 const void *param
, unsigned int param_size
)
1055 struct qla_tgt_sess_work_param
*prm
;
1056 unsigned long flags
;
1058 prm
= kzalloc(sizeof(*prm
), GFP_ATOMIC
);
1060 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf050,
1061 "qla_target(%d): Unable to create session "
1062 "work, command will be refused", 0);
1066 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00e,
1067 "Scheduling work (type %d, prm %p)"
1068 " to find session for param %p (size %d, tgt %p)\n",
1069 type
, prm
, param
, param_size
, tgt
);
1072 memcpy(&prm
->tm_iocb
, param
, param_size
);
1074 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
1075 list_add_tail(&prm
->sess_works_list_entry
, &tgt
->sess_works_list
);
1076 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1078 schedule_work(&tgt
->sess_work
);
1084 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1086 static void qlt_send_notify_ack(struct scsi_qla_host
*vha
,
1087 struct imm_ntfy_from_isp
*ntfy
,
1088 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
1089 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
)
1091 struct qla_hw_data
*ha
= vha
->hw
;
1093 struct nack_to_isp
*nack
;
1095 ql_dbg(ql_dbg_tgt
, vha
, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha
);
1097 /* Send marker if required */
1098 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1101 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
1103 ql_dbg(ql_dbg_tgt
, vha
, 0xe049,
1104 "qla_target(%d): %s failed: unable to allocate "
1105 "request packet\n", vha
->vp_idx
, __func__
);
1109 if (ha
->tgt
.qla_tgt
!= NULL
)
1110 ha
->tgt
.qla_tgt
->notify_ack_expected
++;
1112 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
1113 pkt
->entry_count
= 1;
1115 nack
= (struct nack_to_isp
*)pkt
;
1116 nack
->ox_id
= ntfy
->ox_id
;
1118 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
1119 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
1120 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
1121 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
1123 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
1124 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
1125 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
1126 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
1127 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
1128 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
1129 nack
->u
.isp24
.srr_flags
= cpu_to_le16(srr_flags
);
1130 nack
->u
.isp24
.srr_reject_code
= srr_reject_code
;
1131 nack
->u
.isp24
.srr_reject_code_expl
= srr_explan
;
1132 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
1134 ql_dbg(ql_dbg_tgt
, vha
, 0xe005,
1135 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1136 vha
->vp_idx
, nack
->u
.isp24
.status
);
1138 qla2x00_start_iocbs(vha
, vha
->req
);
1142 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1144 static void qlt_24xx_send_abts_resp(struct scsi_qla_host
*vha
,
1145 struct abts_recv_from_24xx
*abts
, uint32_t status
,
1148 struct qla_hw_data
*ha
= vha
->hw
;
1149 struct abts_resp_to_24xx
*resp
;
1153 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1154 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1157 /* Send marker if required */
1158 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1161 resp
= (struct abts_resp_to_24xx
*)qla2x00_alloc_iocbs(vha
, NULL
);
1163 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1164 "qla_target(%d): %s failed: unable to allocate "
1165 "request packet", vha
->vp_idx
, __func__
);
1169 resp
->entry_type
= ABTS_RESP_24XX
;
1170 resp
->entry_count
= 1;
1171 resp
->nport_handle
= abts
->nport_handle
;
1172 resp
->vp_index
= vha
->vp_idx
;
1173 resp
->sof_type
= abts
->sof_type
;
1174 resp
->exchange_address
= abts
->exchange_address
;
1175 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1176 f_ctl
= __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1177 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1178 F_CTL_SEQ_INITIATIVE
);
1179 p
= (uint8_t *)&f_ctl
;
1180 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1181 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1182 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1184 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1185 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1186 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1187 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1188 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1189 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1191 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1192 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1193 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1194 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1195 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1196 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1198 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1199 if (status
== FCP_TMF_CMPL
) {
1200 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1201 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1202 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1203 resp
->payload
.ba_acct
.high_seq_cnt
= 0xFFFF;
1204 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1205 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1207 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1208 resp
->payload
.ba_rjt
.reason_code
=
1209 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1210 /* Other bytes are zero */
1213 ha
->tgt
.qla_tgt
->abts_resp_expected
++;
1215 qla2x00_start_iocbs(vha
, vha
->req
);
1219 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1221 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host
*vha
,
1222 struct abts_resp_from_24xx_fw
*entry
)
1224 struct ctio7_to_24xx
*ctio
;
1226 ql_dbg(ql_dbg_tgt
, vha
, 0xe007,
1227 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha
->hw
);
1228 /* Send marker if required */
1229 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1232 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(vha
, NULL
);
1234 ql_dbg(ql_dbg_tgt
, vha
, 0xe04b,
1235 "qla_target(%d): %s failed: unable to allocate "
1236 "request packet\n", vha
->vp_idx
, __func__
);
1241 * We've got on entrance firmware's response on by us generated
1242 * ABTS response. So, in it ID fields are reversed.
1245 ctio
->entry_type
= CTIO_TYPE7
;
1246 ctio
->entry_count
= 1;
1247 ctio
->nport_handle
= entry
->nport_handle
;
1248 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1249 ctio
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1250 ctio
->vp_index
= vha
->vp_idx
;
1251 ctio
->initiator_id
[0] = entry
->fcp_hdr_le
.d_id
[0];
1252 ctio
->initiator_id
[1] = entry
->fcp_hdr_le
.d_id
[1];
1253 ctio
->initiator_id
[2] = entry
->fcp_hdr_le
.d_id
[2];
1254 ctio
->exchange_addr
= entry
->exchange_addr_to_abort
;
1255 ctio
->u
.status1
.flags
=
1256 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1257 CTIO7_FLAGS_TERMINATE
);
1258 ctio
->u
.status1
.ox_id
= entry
->fcp_hdr_le
.ox_id
;
1260 qla2x00_start_iocbs(vha
, vha
->req
);
1262 qlt_24xx_send_abts_resp(vha
, (struct abts_recv_from_24xx
*)entry
,
1263 FCP_TMF_CMPL
, true);
1266 /* ha->hardware_lock supposed to be held on entry */
1267 static int __qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1268 struct abts_recv_from_24xx
*abts
, struct qla_tgt_sess
*sess
)
1270 struct qla_hw_data
*ha
= vha
->hw
;
1271 struct qla_tgt_mgmt_cmd
*mcmd
;
1274 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00f,
1275 "qla_target(%d): task abort (tag=%d)\n",
1276 vha
->vp_idx
, abts
->exchange_addr_to_abort
);
1278 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
1280 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf051,
1281 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1282 vha
->vp_idx
, __func__
);
1285 memset(mcmd
, 0, sizeof(*mcmd
));
1288 memcpy(&mcmd
->orig_iocb
.abts
, abts
, sizeof(mcmd
->orig_iocb
.abts
));
1290 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, 0, TMR_ABORT_TASK
,
1291 abts
->exchange_addr_to_abort
);
1293 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf052,
1294 "qla_target(%d): tgt_ops->handle_tmr()"
1295 " failed: %d", vha
->vp_idx
, rc
);
1296 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1304 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1306 static void qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1307 struct abts_recv_from_24xx
*abts
)
1309 struct qla_hw_data
*ha
= vha
->hw
;
1310 struct qla_tgt_sess
*sess
;
1311 uint32_t tag
= abts
->exchange_addr_to_abort
;
1315 if (le32_to_cpu(abts
->fcp_hdr_le
.parameter
) & ABTS_PARAM_ABORT_SEQ
) {
1316 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf053,
1317 "qla_target(%d): ABTS: Abort Sequence not "
1318 "supported\n", vha
->vp_idx
);
1319 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1323 if (tag
== ATIO_EXCHANGE_ADDRESS_UNKNOWN
) {
1324 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf010,
1325 "qla_target(%d): ABTS: Unknown Exchange "
1326 "Address received\n", vha
->vp_idx
);
1327 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1331 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf011,
1332 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1333 "tag=%d, param=%x)\n", vha
->vp_idx
, abts
->fcp_hdr_le
.s_id
[2],
1334 abts
->fcp_hdr_le
.s_id
[1], abts
->fcp_hdr_le
.s_id
[0], tag
,
1335 le32_to_cpu(abts
->fcp_hdr_le
.parameter
));
1337 s_id
[0] = abts
->fcp_hdr_le
.s_id
[2];
1338 s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1339 s_id
[2] = abts
->fcp_hdr_le
.s_id
[0];
1341 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
1343 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf012,
1344 "qla_target(%d): task abort for non-existant session\n",
1346 rc
= qlt_sched_sess_work(ha
->tgt
.qla_tgt
,
1347 QLA_TGT_SESS_WORK_ABORT
, abts
, sizeof(*abts
));
1349 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
,
1355 rc
= __qlt_24xx_handle_abts(vha
, abts
, sess
);
1357 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf054,
1358 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1360 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1366 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1368 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host
*ha
,
1369 struct qla_tgt_mgmt_cmd
*mcmd
, uint32_t resp_code
)
1371 struct atio_from_isp
*atio
= &mcmd
->orig_iocb
.atio
;
1372 struct ctio7_to_24xx
*ctio
;
1374 ql_dbg(ql_dbg_tgt
, ha
, 0xe008,
1375 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1376 ha
, atio
, resp_code
);
1378 /* Send marker if required */
1379 if (qlt_issue_marker(ha
, 1) != QLA_SUCCESS
)
1382 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(ha
, NULL
);
1384 ql_dbg(ql_dbg_tgt
, ha
, 0xe04c,
1385 "qla_target(%d): %s failed: unable to allocate "
1386 "request packet\n", ha
->vp_idx
, __func__
);
1390 ctio
->entry_type
= CTIO_TYPE7
;
1391 ctio
->entry_count
= 1;
1392 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1393 ctio
->nport_handle
= mcmd
->sess
->loop_id
;
1394 ctio
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1395 ctio
->vp_index
= ha
->vp_idx
;
1396 ctio
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1397 ctio
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1398 ctio
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1399 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1400 ctio
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
1401 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1402 CTIO7_FLAGS_SEND_STATUS
);
1403 ctio
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1404 ctio
->u
.status1
.scsi_status
=
1405 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
);
1406 ctio
->u
.status1
.response_len
= __constant_cpu_to_le16(8);
1407 ((uint32_t *)ctio
->u
.status1
.sense_data
)[0] = cpu_to_be32(resp_code
);
1409 qla2x00_start_iocbs(ha
, ha
->req
);
1412 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd
*mcmd
)
1414 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1416 EXPORT_SYMBOL(qlt_free_mcmd
);
1418 /* callback from target fabric module code */
1419 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd
*mcmd
)
1421 struct scsi_qla_host
*vha
= mcmd
->sess
->vha
;
1422 struct qla_hw_data
*ha
= vha
->hw
;
1423 unsigned long flags
;
1425 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf013,
1426 "TM response mcmd (%p) status %#x state %#x",
1427 mcmd
, mcmd
->fc_tm_rsp
, mcmd
->flags
);
1429 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1430 if (mcmd
->flags
== QLA24XX_MGMT_SEND_NACK
)
1431 qlt_send_notify_ack(vha
, &mcmd
->orig_iocb
.imm_ntfy
,
1434 if (mcmd
->se_cmd
.se_tmr_req
->function
== TMR_ABORT_TASK
)
1435 qlt_24xx_send_abts_resp(vha
, &mcmd
->orig_iocb
.abts
,
1436 mcmd
->fc_tm_rsp
, false);
1438 qlt_24xx_send_task_mgmt_ctio(vha
, mcmd
,
1442 * Make the callback for ->free_mcmd() to queue_work() and invoke
1443 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1444 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1445 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1446 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1447 * qlt_xmit_tm_rsp() returns here..
1449 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
1450 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1452 EXPORT_SYMBOL(qlt_xmit_tm_rsp
);
1455 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm
*prm
)
1457 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
1459 BUG_ON(cmd
->sg_cnt
== 0);
1461 prm
->sg
= (struct scatterlist
*)cmd
->sg
;
1462 prm
->seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
, cmd
->sg
,
1463 cmd
->sg_cnt
, cmd
->dma_data_direction
);
1464 if (unlikely(prm
->seg_cnt
== 0))
1467 prm
->cmd
->sg_mapped
= 1;
1470 * If greater than four sg entries then we need to allocate
1471 * the continuation entries
1473 if (prm
->seg_cnt
> prm
->tgt
->datasegs_per_cmd
)
1474 prm
->req_cnt
+= DIV_ROUND_UP(prm
->seg_cnt
-
1475 prm
->tgt
->datasegs_per_cmd
, prm
->tgt
->datasegs_per_cont
);
1477 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1478 prm
->seg_cnt
, prm
->req_cnt
);
1482 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe04d,
1483 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1484 0, prm
->cmd
->sg_cnt
);
1488 static inline void qlt_unmap_sg(struct scsi_qla_host
*vha
,
1489 struct qla_tgt_cmd
*cmd
)
1491 struct qla_hw_data
*ha
= vha
->hw
;
1493 BUG_ON(!cmd
->sg_mapped
);
1494 pci_unmap_sg(ha
->pdev
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
1498 static int qlt_check_reserve_free_req(struct scsi_qla_host
*vha
,
1501 struct qla_hw_data
*ha
= vha
->hw
;
1502 device_reg_t __iomem
*reg
= ha
->iobase
;
1505 if (vha
->req
->cnt
< (req_cnt
+ 2)) {
1506 cnt
= (uint16_t)RD_REG_DWORD(®
->isp24
.req_q_out
);
1508 ql_dbg(ql_dbg_tgt
, vha
, 0xe00a,
1509 "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1510 "vha->req->cnt=%d, req_cnt=%d\n", cnt
,
1511 vha
->req
->ring_index
, vha
->req
->cnt
, req_cnt
);
1512 if (vha
->req
->ring_index
< cnt
)
1513 vha
->req
->cnt
= cnt
- vha
->req
->ring_index
;
1515 vha
->req
->cnt
= vha
->req
->length
-
1516 (vha
->req
->ring_index
- cnt
);
1519 if (unlikely(vha
->req
->cnt
< (req_cnt
+ 2))) {
1520 ql_dbg(ql_dbg_tgt
, vha
, 0xe00b,
1521 "qla_target(%d): There is no room in the "
1522 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
1523 "req_cnt=%d\n", vha
->vp_idx
, vha
->req
->ring_index
,
1524 vha
->req
->cnt
, req_cnt
);
1527 vha
->req
->cnt
-= req_cnt
;
1533 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1535 static inline void *qlt_get_req_pkt(struct scsi_qla_host
*vha
)
1537 /* Adjust ring index. */
1538 vha
->req
->ring_index
++;
1539 if (vha
->req
->ring_index
== vha
->req
->length
) {
1540 vha
->req
->ring_index
= 0;
1541 vha
->req
->ring_ptr
= vha
->req
->ring
;
1543 vha
->req
->ring_ptr
++;
1545 return (cont_entry_t
*)vha
->req
->ring_ptr
;
1548 /* ha->hardware_lock supposed to be held on entry */
1549 static inline uint32_t qlt_make_handle(struct scsi_qla_host
*vha
)
1551 struct qla_hw_data
*ha
= vha
->hw
;
1554 h
= ha
->tgt
.current_handle
;
1555 /* always increment cmd handle */
1558 if (h
> MAX_OUTSTANDING_COMMANDS
)
1559 h
= 1; /* 0 is QLA_TGT_NULL_HANDLE */
1560 if (h
== ha
->tgt
.current_handle
) {
1561 ql_dbg(ql_dbg_tgt
, vha
, 0xe04e,
1562 "qla_target(%d): Ran out of "
1563 "empty cmd slots in ha %p\n", vha
->vp_idx
, ha
);
1564 h
= QLA_TGT_NULL_HANDLE
;
1567 } while ((h
== QLA_TGT_NULL_HANDLE
) ||
1568 (h
== QLA_TGT_SKIP_HANDLE
) ||
1569 (ha
->tgt
.cmds
[h
-1] != NULL
));
1571 if (h
!= QLA_TGT_NULL_HANDLE
)
1572 ha
->tgt
.current_handle
= h
;
1577 /* ha->hardware_lock supposed to be held on entry */
1578 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm
*prm
,
1579 struct scsi_qla_host
*vha
)
1582 struct ctio7_to_24xx
*pkt
;
1583 struct qla_hw_data
*ha
= vha
->hw
;
1584 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
1586 pkt
= (struct ctio7_to_24xx
*)vha
->req
->ring_ptr
;
1588 memset(pkt
, 0, sizeof(*pkt
));
1590 pkt
->entry_type
= CTIO_TYPE7
;
1591 pkt
->entry_count
= (uint8_t)prm
->req_cnt
;
1592 pkt
->vp_index
= vha
->vp_idx
;
1594 h
= qlt_make_handle(vha
);
1595 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
1597 * CTIO type 7 from the firmware doesn't provide a way to
1598 * know the initiator's LOOP ID, hence we can't find
1599 * the session and, so, the command.
1603 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
1605 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
1606 pkt
->nport_handle
= prm
->cmd
->loop_id
;
1607 pkt
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1608 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1609 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1610 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1611 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1612 pkt
->u
.status0
.flags
|= (atio
->u
.isp24
.attr
<< 9);
1613 pkt
->u
.status0
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1614 pkt
->u
.status0
.relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
1616 ql_dbg(ql_dbg_tgt
, vha
, 0xe00c,
1617 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1618 vha
->vp_idx
, pkt
->handle
, QLA_TGT_TIMEOUT
,
1619 le16_to_cpu(pkt
->u
.status0
.ox_id
));
1624 * ha->hardware_lock supposed to be held on entry. We have already made sure
1625 * that there is sufficient amount of request entries to not drop it.
1627 static void qlt_load_cont_data_segments(struct qla_tgt_prm
*prm
,
1628 struct scsi_qla_host
*vha
)
1631 uint32_t *dword_ptr
;
1632 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1634 /* Build continuation packets */
1635 while (prm
->seg_cnt
> 0) {
1636 cont_a64_entry_t
*cont_pkt64
=
1637 (cont_a64_entry_t
*)qlt_get_req_pkt(vha
);
1640 * Make sure that from cont_pkt64 none of
1641 * 64-bit specific fields used for 32-bit
1642 * addressing. Cast to (cont_entry_t *) for
1646 memset(cont_pkt64
, 0, sizeof(*cont_pkt64
));
1648 cont_pkt64
->entry_count
= 1;
1649 cont_pkt64
->sys_define
= 0;
1651 if (enable_64bit_addressing
) {
1652 cont_pkt64
->entry_type
= CONTINUE_A64_TYPE
;
1654 (uint32_t *)&cont_pkt64
->dseg_0_address
;
1656 cont_pkt64
->entry_type
= CONTINUE_TYPE
;
1658 (uint32_t *)&((cont_entry_t
*)
1659 cont_pkt64
)->dseg_0_address
;
1662 /* Load continuation entry data segments */
1664 cnt
< prm
->tgt
->datasegs_per_cont
&& prm
->seg_cnt
;
1665 cnt
++, prm
->seg_cnt
--) {
1667 cpu_to_le32(pci_dma_lo32
1668 (sg_dma_address(prm
->sg
)));
1669 if (enable_64bit_addressing
) {
1671 cpu_to_le32(pci_dma_hi32
1675 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1677 ql_dbg(ql_dbg_tgt
, vha
, 0xe00d,
1678 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1679 (long long unsigned int)
1680 pci_dma_hi32(sg_dma_address(prm
->sg
)),
1681 (long long unsigned int)
1682 pci_dma_lo32(sg_dma_address(prm
->sg
)),
1683 (int)sg_dma_len(prm
->sg
));
1685 prm
->sg
= sg_next(prm
->sg
);
1691 * ha->hardware_lock supposed to be held on entry. We have already made sure
1692 * that there is sufficient amount of request entries to not drop it.
1694 static void qlt_load_data_segments(struct qla_tgt_prm
*prm
,
1695 struct scsi_qla_host
*vha
)
1698 uint32_t *dword_ptr
;
1699 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1700 struct ctio7_to_24xx
*pkt24
= (struct ctio7_to_24xx
*)prm
->pkt
;
1702 ql_dbg(ql_dbg_tgt
, vha
, 0xe00e,
1703 "iocb->scsi_status=%x, iocb->flags=%x\n",
1704 le16_to_cpu(pkt24
->u
.status0
.scsi_status
),
1705 le16_to_cpu(pkt24
->u
.status0
.flags
));
1707 pkt24
->u
.status0
.transfer_length
= cpu_to_le32(prm
->cmd
->bufflen
);
1709 /* Setup packet address segment pointer */
1710 dword_ptr
= pkt24
->u
.status0
.dseg_0_address
;
1712 /* Set total data segment count */
1714 pkt24
->dseg_count
= cpu_to_le16(prm
->seg_cnt
);
1716 if (prm
->seg_cnt
== 0) {
1717 /* No data transfer */
1723 /* If scatter gather */
1724 ql_dbg(ql_dbg_tgt
, vha
, 0xe00f, "%s", "Building S/G data segments...");
1726 /* Load command entry data segments */
1728 (cnt
< prm
->tgt
->datasegs_per_cmd
) && prm
->seg_cnt
;
1729 cnt
++, prm
->seg_cnt
--) {
1731 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm
->sg
)));
1732 if (enable_64bit_addressing
) {
1734 cpu_to_le32(pci_dma_hi32(
1735 sg_dma_address(prm
->sg
)));
1737 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1739 ql_dbg(ql_dbg_tgt
, vha
, 0xe010,
1740 "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1741 (long long unsigned int)pci_dma_hi32(sg_dma_address(
1743 (long long unsigned int)pci_dma_lo32(sg_dma_address(
1745 (int)sg_dma_len(prm
->sg
));
1747 prm
->sg
= sg_next(prm
->sg
);
1750 qlt_load_cont_data_segments(prm
, vha
);
1753 static inline int qlt_has_data(struct qla_tgt_cmd
*cmd
)
1755 return cmd
->bufflen
> 0;
1759 * Called without ha->hardware_lock held
1761 static int qlt_pre_xmit_response(struct qla_tgt_cmd
*cmd
,
1762 struct qla_tgt_prm
*prm
, int xmit_type
, uint8_t scsi_status
,
1763 uint32_t *full_req_cnt
)
1765 struct qla_tgt
*tgt
= cmd
->tgt
;
1766 struct scsi_qla_host
*vha
= tgt
->vha
;
1767 struct qla_hw_data
*ha
= vha
->hw
;
1768 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1770 if (unlikely(cmd
->aborted
)) {
1771 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf014,
1772 "qla_target(%d): terminating exchange "
1773 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha
->vp_idx
, cmd
,
1776 cmd
->state
= QLA_TGT_STATE_ABORTED
;
1778 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 0);
1780 /* !! At this point cmd could be already freed !! */
1781 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
;
1784 ql_dbg(ql_dbg_tgt
, vha
, 0xe011, "qla_target(%d): tag=%u\n",
1785 vha
->vp_idx
, cmd
->tag
);
1789 prm
->rq_result
= scsi_status
;
1790 prm
->sense_buffer
= &cmd
->sense_buffer
[0];
1791 prm
->sense_buffer_len
= TRANSPORT_SENSE_BUFFER
;
1795 prm
->add_status_pkt
= 0;
1797 ql_dbg(ql_dbg_tgt
, vha
, 0xe012, "rq_result=%x, xmit_type=%x\n",
1798 prm
->rq_result
, xmit_type
);
1800 /* Send marker if required */
1801 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
1804 ql_dbg(ql_dbg_tgt
, vha
, 0xe013, "CTIO start: vha(%d)\n", vha
->vp_idx
);
1806 if ((xmit_type
& QLA_TGT_XMIT_DATA
) && qlt_has_data(cmd
)) {
1807 if (qlt_pci_map_calc_cnt(prm
) != 0)
1811 *full_req_cnt
= prm
->req_cnt
;
1813 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
1814 prm
->residual
= se_cmd
->residual_count
;
1815 ql_dbg(ql_dbg_tgt
, vha
, 0xe014,
1816 "Residual underflow: %d (tag %d, "
1817 "op %x, bufflen %d, rq_result %x)\n", prm
->residual
,
1818 cmd
->tag
, se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1819 cmd
->bufflen
, prm
->rq_result
);
1820 prm
->rq_result
|= SS_RESIDUAL_UNDER
;
1821 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
1822 prm
->residual
= se_cmd
->residual_count
;
1823 ql_dbg(ql_dbg_tgt
, vha
, 0xe015,
1824 "Residual overflow: %d (tag %d, "
1825 "op %x, bufflen %d, rq_result %x)\n", prm
->residual
,
1826 cmd
->tag
, se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1827 cmd
->bufflen
, prm
->rq_result
);
1828 prm
->rq_result
|= SS_RESIDUAL_OVER
;
1831 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
1833 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1834 * ignored in *xmit_response() below
1836 if (qlt_has_data(cmd
)) {
1837 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
) ||
1838 (IS_FWI2_CAPABLE(ha
) &&
1839 (prm
->rq_result
!= 0))) {
1840 prm
->add_status_pkt
= 1;
1846 ql_dbg(ql_dbg_tgt
, vha
, 0xe016,
1847 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1848 prm
->req_cnt
, *full_req_cnt
, prm
->add_status_pkt
);
1853 static inline int qlt_need_explicit_conf(struct qla_hw_data
*ha
,
1854 struct qla_tgt_cmd
*cmd
, int sending_sense
)
1856 if (ha
->tgt
.enable_class_2
)
1860 return cmd
->conf_compl_supported
;
1862 return ha
->tgt
.enable_explicit_conf
&&
1863 cmd
->conf_compl_supported
;
1866 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1868 * Original taken from the XFS code
1870 static unsigned long qlt_srr_random(void)
1873 static unsigned long RandomValue
;
1874 static DEFINE_SPINLOCK(lock
);
1875 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1879 unsigned long flags
;
1881 spin_lock_irqsave(&lock
, flags
);
1883 RandomValue
= jiffies
;
1889 rv
= 16807 * lo
- 2836 * hi
;
1893 spin_unlock_irqrestore(&lock
, flags
);
1897 static void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1899 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1900 if ((*xmit_type
& QLA_TGT_XMIT_STATUS
) && (qlt_srr_random() % 200)
1902 *xmit_type
&= ~QLA_TGT_XMIT_STATUS
;
1903 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf015,
1904 "Dropping cmd %p (tag %d) status", cmd
, cmd
->tag
);
1908 * It's currently not possible to simulate SRRs for FCP_WRITE without
1909 * a physical link layer failure, so don't even try here..
1911 if (cmd
->dma_data_direction
!= DMA_FROM_DEVICE
)
1914 if (qlt_has_data(cmd
) && (cmd
->sg_cnt
> 1) &&
1915 ((qlt_srr_random() % 100) == 20)) {
1917 unsigned int tot_len
= 0;
1920 leave
= qlt_srr_random() % cmd
->sg_cnt
;
1922 for (i
= 0; i
< leave
; i
++)
1923 tot_len
+= cmd
->sg
[i
].length
;
1925 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf016,
1926 "Cutting cmd %p (tag %d) buffer"
1927 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1928 " cmd->sg_cnt %d)", cmd
, cmd
->tag
, tot_len
, leave
,
1929 cmd
->bufflen
, cmd
->sg_cnt
);
1931 cmd
->bufflen
= tot_len
;
1932 cmd
->sg_cnt
= leave
;
1935 if (qlt_has_data(cmd
) && ((qlt_srr_random() % 100) == 70)) {
1936 unsigned int offset
= qlt_srr_random() % cmd
->bufflen
;
1938 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf017,
1939 "Cutting cmd %p (tag %d) buffer head "
1940 "to offset %d (cmd->bufflen %d)", cmd
, cmd
->tag
, offset
,
1943 *xmit_type
&= ~QLA_TGT_XMIT_DATA
;
1944 else if (qlt_set_data_offset(cmd
, offset
)) {
1945 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf018,
1946 "qlt_set_data_offset() failed (tag %d)", cmd
->tag
);
1951 static inline void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1955 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx
*ctio
,
1956 struct qla_tgt_prm
*prm
)
1958 prm
->sense_buffer_len
= min_t(uint32_t, prm
->sense_buffer_len
,
1959 (uint32_t)sizeof(ctio
->u
.status1
.sense_data
));
1960 ctio
->u
.status0
.flags
|=
1961 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS
);
1962 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 0)) {
1963 ctio
->u
.status0
.flags
|= __constant_cpu_to_le16(
1964 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1965 CTIO7_FLAGS_CONFORM_REQ
);
1967 ctio
->u
.status0
.residual
= cpu_to_le32(prm
->residual
);
1968 ctio
->u
.status0
.scsi_status
= cpu_to_le16(prm
->rq_result
);
1969 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
)) {
1972 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 1)) {
1973 if (prm
->cmd
->se_cmd
.scsi_status
!= 0) {
1974 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe017,
1975 "Skipping EXPLICIT_CONFORM and "
1976 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1977 "non GOOD status\n");
1978 goto skip_explict_conf
;
1980 ctio
->u
.status1
.flags
|= __constant_cpu_to_le16(
1981 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1982 CTIO7_FLAGS_CONFORM_REQ
);
1985 ctio
->u
.status1
.flags
&=
1986 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
1987 ctio
->u
.status1
.flags
|=
1988 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
1989 ctio
->u
.status1
.scsi_status
|=
1990 __constant_cpu_to_le16(SS_SENSE_LEN_VALID
);
1991 ctio
->u
.status1
.sense_length
=
1992 cpu_to_le16(prm
->sense_buffer_len
);
1993 for (i
= 0; i
< prm
->sense_buffer_len
/4; i
++)
1994 ((uint32_t *)ctio
->u
.status1
.sense_data
)[i
] =
1995 cpu_to_be32(((uint32_t *)prm
->sense_buffer
)[i
]);
1997 if (unlikely((prm
->sense_buffer_len
% 4) != 0)) {
2000 ql_dbg(ql_dbg_tgt
, vha
, 0xe04f,
2001 "qla_target(%d): %d bytes of sense "
2002 "lost", prm
->tgt
->ha
->vp_idx
,
2003 prm
->sense_buffer_len
% 4);
2009 ctio
->u
.status1
.flags
&=
2010 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
2011 ctio
->u
.status1
.flags
|=
2012 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
2013 ctio
->u
.status1
.sense_length
= 0;
2014 memset(ctio
->u
.status1
.sense_data
, 0,
2015 sizeof(ctio
->u
.status1
.sense_data
));
2018 /* Sense with len > 24, is it possible ??? */
2022 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2023 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2025 int qlt_xmit_response(struct qla_tgt_cmd
*cmd
, int xmit_type
,
2026 uint8_t scsi_status
)
2028 struct scsi_qla_host
*vha
= cmd
->vha
;
2029 struct qla_hw_data
*ha
= vha
->hw
;
2030 struct ctio7_to_24xx
*pkt
;
2031 struct qla_tgt_prm prm
;
2032 uint32_t full_req_cnt
= 0;
2033 unsigned long flags
= 0;
2036 memset(&prm
, 0, sizeof(prm
));
2037 qlt_check_srr_debug(cmd
, &xmit_type
);
2039 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe018,
2040 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
2041 "cmd->dma_data_direction=%d\n", (xmit_type
& QLA_TGT_XMIT_STATUS
) ?
2042 1 : 0, cmd
->bufflen
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
2044 res
= qlt_pre_xmit_response(cmd
, &prm
, xmit_type
, scsi_status
,
2046 if (unlikely(res
!= 0)) {
2047 if (res
== QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
)
2053 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2055 /* Does F/W have an IOCBs for this request */
2056 res
= qlt_check_reserve_free_req(vha
, full_req_cnt
);
2058 goto out_unmap_unlock
;
2060 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2061 if (unlikely(res
!= 0))
2062 goto out_unmap_unlock
;
2065 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2067 if (qlt_has_data(cmd
) && (xmit_type
& QLA_TGT_XMIT_DATA
)) {
2068 pkt
->u
.status0
.flags
|=
2069 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN
|
2070 CTIO7_FLAGS_STATUS_MODE_0
);
2072 qlt_load_data_segments(&prm
, vha
);
2074 if (prm
.add_status_pkt
== 0) {
2075 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
2076 pkt
->u
.status0
.scsi_status
=
2077 cpu_to_le16(prm
.rq_result
);
2078 pkt
->u
.status0
.residual
=
2079 cpu_to_le32(prm
.residual
);
2080 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(
2081 CTIO7_FLAGS_SEND_STATUS
);
2082 if (qlt_need_explicit_conf(ha
, cmd
, 0)) {
2083 pkt
->u
.status0
.flags
|=
2084 __constant_cpu_to_le16(
2085 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2086 CTIO7_FLAGS_CONFORM_REQ
);
2092 * We have already made sure that there is sufficient
2093 * amount of request entries to not drop HW lock in
2096 struct ctio7_to_24xx
*ctio
=
2097 (struct ctio7_to_24xx
*)qlt_get_req_pkt(vha
);
2099 ql_dbg(ql_dbg_tgt
, vha
, 0xe019,
2100 "Building additional status packet\n");
2102 memcpy(ctio
, pkt
, sizeof(*ctio
));
2103 ctio
->entry_count
= 1;
2104 ctio
->dseg_count
= 0;
2105 ctio
->u
.status1
.flags
&= ~__constant_cpu_to_le16(
2106 CTIO7_FLAGS_DATA_IN
);
2108 /* Real finish is ctio_m1's finish */
2109 pkt
->handle
|= CTIO_INTERMEDIATE_HANDLE_MARK
;
2110 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(
2111 CTIO7_FLAGS_DONT_RET_CTIO
);
2112 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx
*)ctio
,
2114 pr_debug("Status CTIO7: %p\n", ctio
);
2117 qlt_24xx_init_ctio_to_isp(pkt
, &prm
);
2120 cmd
->state
= QLA_TGT_STATE_PROCESSED
; /* Mid-level is done processing */
2122 ql_dbg(ql_dbg_tgt
, vha
, 0xe01a,
2123 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
2126 qla2x00_start_iocbs(vha
, vha
->req
);
2127 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2133 qlt_unmap_sg(vha
, cmd
);
2134 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2138 EXPORT_SYMBOL(qlt_xmit_response
);
2140 int qlt_rdy_to_xfer(struct qla_tgt_cmd
*cmd
)
2142 struct ctio7_to_24xx
*pkt
;
2143 struct scsi_qla_host
*vha
= cmd
->vha
;
2144 struct qla_hw_data
*ha
= vha
->hw
;
2145 struct qla_tgt
*tgt
= cmd
->tgt
;
2146 struct qla_tgt_prm prm
;
2147 unsigned long flags
;
2150 memset(&prm
, 0, sizeof(prm
));
2156 /* Send marker if required */
2157 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
2160 ql_dbg(ql_dbg_tgt
, vha
, 0xe01b, "CTIO_start: vha(%d)",
2163 /* Calculate number of entries and segments required */
2164 if (qlt_pci_map_calc_cnt(&prm
) != 0)
2167 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2169 /* Does F/W have an IOCBs for this request */
2170 res
= qlt_check_reserve_free_req(vha
, prm
.req_cnt
);
2172 goto out_unlock_free_unmap
;
2174 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2175 if (unlikely(res
!= 0))
2176 goto out_unlock_free_unmap
;
2177 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2178 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT
|
2179 CTIO7_FLAGS_STATUS_MODE_0
);
2180 qlt_load_data_segments(&prm
, vha
);
2182 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
2184 qla2x00_start_iocbs(vha
, vha
->req
);
2185 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2189 out_unlock_free_unmap
:
2191 qlt_unmap_sg(vha
, cmd
);
2192 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2196 EXPORT_SYMBOL(qlt_rdy_to_xfer
);
2198 /* If hardware_lock held on entry, might drop it, then reaquire */
2199 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2200 static int __qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2201 struct qla_tgt_cmd
*cmd
,
2202 struct atio_from_isp
*atio
)
2204 struct ctio7_to_24xx
*ctio24
;
2205 struct qla_hw_data
*ha
= vha
->hw
;
2209 ql_dbg(ql_dbg_tgt
, vha
, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha
);
2211 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
2213 ql_dbg(ql_dbg_tgt
, vha
, 0xe050,
2214 "qla_target(%d): %s failed: unable to allocate "
2215 "request packet\n", vha
->vp_idx
, __func__
);
2220 if (cmd
->state
< QLA_TGT_STATE_PROCESSED
) {
2221 ql_dbg(ql_dbg_tgt
, vha
, 0xe051,
2222 "qla_target(%d): Terminating cmd %p with "
2223 "incorrect state %d\n", vha
->vp_idx
, cmd
,
2229 pkt
->entry_count
= 1;
2230 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
2232 ctio24
= (struct ctio7_to_24xx
*)pkt
;
2233 ctio24
->entry_type
= CTIO_TYPE7
;
2234 ctio24
->nport_handle
= cmd
? cmd
->loop_id
: CTIO7_NHANDLE_UNRECOGNIZED
;
2235 ctio24
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
2236 ctio24
->vp_index
= vha
->vp_idx
;
2237 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2238 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2239 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2240 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2241 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
2242 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
2243 CTIO7_FLAGS_TERMINATE
);
2244 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2246 /* Most likely, it isn't needed */
2247 ctio24
->u
.status1
.residual
= get_unaligned((uint32_t *)
2248 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
2249 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]);
2250 if (ctio24
->u
.status1
.residual
!= 0)
2251 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
2253 qla2x00_start_iocbs(vha
, vha
->req
);
2257 static void qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2258 struct qla_tgt_cmd
*cmd
, struct atio_from_isp
*atio
, int ha_locked
)
2260 unsigned long flags
;
2263 if (qlt_issue_marker(vha
, ha_locked
) < 0)
2267 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2270 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
2271 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2272 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
2275 if (!ha_locked
&& !in_interrupt())
2276 msleep(250); /* just in case */
2278 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
2282 void qlt_free_cmd(struct qla_tgt_cmd
*cmd
)
2284 BUG_ON(cmd
->sg_mapped
);
2286 if (unlikely(cmd
->free_sg
))
2288 kmem_cache_free(qla_tgt_cmd_cachep
, cmd
);
2290 EXPORT_SYMBOL(qlt_free_cmd
);
2292 /* ha->hardware_lock supposed to be held on entry */
2293 static int qlt_prepare_srr_ctio(struct scsi_qla_host
*vha
,
2294 struct qla_tgt_cmd
*cmd
, void *ctio
)
2296 struct qla_tgt_srr_ctio
*sc
;
2297 struct qla_hw_data
*ha
= vha
->hw
;
2298 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
2299 struct qla_tgt_srr_imm
*imm
;
2303 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf019,
2304 "qla_target(%d): CTIO with SRR status received\n", vha
->vp_idx
);
2307 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf055,
2308 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2313 sc
= kzalloc(sizeof(*sc
), GFP_ATOMIC
);
2316 /* IRQ is already OFF */
2317 spin_lock(&tgt
->srr_lock
);
2318 sc
->srr_id
= tgt
->ctio_srr_id
;
2319 list_add_tail(&sc
->srr_list_entry
,
2320 &tgt
->srr_ctio_list
);
2321 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01a,
2322 "CTIO SRR %p added (id %d)\n", sc
, sc
->srr_id
);
2323 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
2325 list_for_each_entry(imm
, &tgt
->srr_imm_list
,
2327 if (imm
->srr_id
== sc
->srr_id
) {
2333 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01b,
2334 "Scheduling srr work\n");
2335 schedule_work(&tgt
->srr_work
);
2337 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf056,
2338 "qla_target(%d): imm_srr_id "
2339 "== ctio_srr_id (%d), but there is no "
2340 "corresponding SRR IMM, deleting CTIO "
2341 "SRR %p\n", vha
->vp_idx
,
2342 tgt
->ctio_srr_id
, sc
);
2343 list_del(&sc
->srr_list_entry
);
2344 spin_unlock(&tgt
->srr_lock
);
2350 spin_unlock(&tgt
->srr_lock
);
2352 struct qla_tgt_srr_imm
*ti
;
2354 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf057,
2355 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2357 spin_lock(&tgt
->srr_lock
);
2358 list_for_each_entry_safe(imm
, ti
, &tgt
->srr_imm_list
,
2360 if (imm
->srr_id
== tgt
->ctio_srr_id
) {
2361 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01c,
2362 "IMM SRR %p deleted (id %d)\n",
2364 list_del(&imm
->srr_list_entry
);
2365 qlt_reject_free_srr_imm(vha
, imm
, 1);
2368 spin_unlock(&tgt
->srr_lock
);
2377 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2379 static int qlt_term_ctio_exchange(struct scsi_qla_host
*vha
, void *ctio
,
2380 struct qla_tgt_cmd
*cmd
, uint32_t status
)
2385 struct ctio7_from_24xx
*c
= (struct ctio7_from_24xx
*)ctio
;
2387 __constant_cpu_to_le16(OF_TERM_EXCH
));
2392 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
2397 /* ha->hardware_lock supposed to be held on entry */
2398 static inline struct qla_tgt_cmd
*qlt_get_cmd(struct scsi_qla_host
*vha
,
2401 struct qla_hw_data
*ha
= vha
->hw
;
2404 if (ha
->tgt
.cmds
[handle
] != NULL
) {
2405 struct qla_tgt_cmd
*cmd
= ha
->tgt
.cmds
[handle
];
2406 ha
->tgt
.cmds
[handle
] = NULL
;
2412 /* ha->hardware_lock supposed to be held on entry */
2413 static struct qla_tgt_cmd
*qlt_ctio_to_cmd(struct scsi_qla_host
*vha
,
2414 uint32_t handle
, void *ctio
)
2416 struct qla_tgt_cmd
*cmd
= NULL
;
2418 /* Clear out internal marks */
2419 handle
&= ~(CTIO_COMPLETION_HANDLE_MARK
|
2420 CTIO_INTERMEDIATE_HANDLE_MARK
);
2422 if (handle
!= QLA_TGT_NULL_HANDLE
) {
2423 if (unlikely(handle
== QLA_TGT_SKIP_HANDLE
)) {
2424 ql_dbg(ql_dbg_tgt
, vha
, 0xe01d, "%s",
2425 "SKIP_HANDLE CTIO\n");
2428 /* handle-1 is actually used */
2429 if (unlikely(handle
> MAX_OUTSTANDING_COMMANDS
)) {
2430 ql_dbg(ql_dbg_tgt
, vha
, 0xe052,
2431 "qla_target(%d): Wrong handle %x received\n",
2432 vha
->vp_idx
, handle
);
2435 cmd
= qlt_get_cmd(vha
, handle
);
2436 if (unlikely(cmd
== NULL
)) {
2437 ql_dbg(ql_dbg_tgt
, vha
, 0xe053,
2438 "qla_target(%d): Suspicious: unable to "
2439 "find the command with handle %x\n", vha
->vp_idx
,
2443 } else if (ctio
!= NULL
) {
2444 /* We can't get loop ID from CTIO7 */
2445 ql_dbg(ql_dbg_tgt
, vha
, 0xe054,
2446 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2447 "support NULL handles\n", vha
->vp_idx
);
2455 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2457 static void qlt_do_ctio_completion(struct scsi_qla_host
*vha
, uint32_t handle
,
2458 uint32_t status
, void *ctio
)
2460 struct qla_hw_data
*ha
= vha
->hw
;
2461 struct se_cmd
*se_cmd
;
2462 struct target_core_fabric_ops
*tfo
;
2463 struct qla_tgt_cmd
*cmd
;
2465 ql_dbg(ql_dbg_tgt
, vha
, 0xe01e,
2466 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2467 vha
->vp_idx
, ctio
, status
, handle
);
2469 if (handle
& CTIO_INTERMEDIATE_HANDLE_MARK
) {
2470 /* That could happen only in case of an error/reset/abort */
2471 if (status
!= CTIO_SUCCESS
) {
2472 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01d,
2473 "Intermediate CTIO received"
2474 " (status %x)\n", status
);
2479 cmd
= qlt_ctio_to_cmd(vha
, handle
, ctio
);
2481 if (status
!= CTIO_SUCCESS
)
2482 qlt_term_ctio_exchange(vha
, ctio
, NULL
, status
);
2485 se_cmd
= &cmd
->se_cmd
;
2486 tfo
= se_cmd
->se_tfo
;
2489 qlt_unmap_sg(vha
, cmd
);
2491 if (unlikely(status
!= CTIO_SUCCESS
)) {
2492 switch (status
& 0xFFFF) {
2493 case CTIO_LIP_RESET
:
2494 case CTIO_TARGET_RESET
:
2497 case CTIO_INVALID_RX_ID
:
2499 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf058,
2500 "qla_target(%d): CTIO with "
2501 "status %#x received, state %x, se_cmd %p, "
2502 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2503 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha
->vp_idx
,
2504 status
, cmd
->state
, se_cmd
);
2507 case CTIO_PORT_LOGGED_OUT
:
2508 case CTIO_PORT_UNAVAILABLE
:
2509 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf059,
2510 "qla_target(%d): CTIO with PORT LOGGED "
2511 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2512 "received (state %x, se_cmd %p)\n", vha
->vp_idx
,
2513 status
, cmd
->state
, se_cmd
);
2516 case CTIO_SRR_RECEIVED
:
2517 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05a,
2518 "qla_target(%d): CTIO with SRR_RECEIVED"
2519 " status %x received (state %x, se_cmd %p)\n",
2520 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
2521 if (qlt_prepare_srr_ctio(vha
, cmd
, ctio
) != 0)
2527 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05b,
2528 "qla_target(%d): CTIO with error status "
2529 "0x%x received (state %x, se_cmd %p\n",
2530 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
2534 if (cmd
->state
!= QLA_TGT_STATE_NEED_DATA
)
2535 if (qlt_term_ctio_exchange(vha
, ctio
, cmd
, status
))
2539 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
2540 ql_dbg(ql_dbg_tgt
, vha
, 0xe01f, "Command %p finished\n", cmd
);
2541 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
2544 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
2546 if (unlikely(status
!= CTIO_SUCCESS
))
2549 cmd
->write_data_transferred
= 1;
2551 ql_dbg(ql_dbg_tgt
, vha
, 0xe020,
2552 "Data received, context %x, rx_status %d\n",
2555 ha
->tgt
.tgt_ops
->handle_data(cmd
);
2557 } else if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
2558 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01e,
2559 "Aborted command %p (tag %d) finished\n", cmd
, cmd
->tag
);
2561 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05c,
2562 "qla_target(%d): A command in state (%d) should "
2563 "not return a CTIO complete\n", vha
->vp_idx
, cmd
->state
);
2566 if (unlikely(status
!= CTIO_SUCCESS
)) {
2567 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01f, "Finishing failed CTIO\n");
2571 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
2574 /* ha->hardware_lock supposed to be held on entry */
2575 /* called via callback from qla2xxx */
2576 void qlt_ctio_completion(struct scsi_qla_host
*vha
, uint32_t handle
)
2578 struct qla_hw_data
*ha
= vha
->hw
;
2579 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
2581 if (likely(tgt
== NULL
)) {
2582 ql_dbg(ql_dbg_tgt
, vha
, 0xe021,
2583 "CTIO, but target mode not enabled"
2584 " (ha %d %p handle %#x)", vha
->vp_idx
, ha
, handle
);
2588 tgt
->irq_cmd_count
++;
2589 qlt_do_ctio_completion(vha
, handle
, CTIO_SUCCESS
, NULL
);
2590 tgt
->irq_cmd_count
--;
2593 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host
*vha
,
2598 switch (task_codes
) {
2599 case ATIO_SIMPLE_QUEUE
:
2600 fcp_task_attr
= MSG_SIMPLE_TAG
;
2602 case ATIO_HEAD_OF_QUEUE
:
2603 fcp_task_attr
= MSG_HEAD_TAG
;
2605 case ATIO_ORDERED_QUEUE
:
2606 fcp_task_attr
= MSG_ORDERED_TAG
;
2608 case ATIO_ACA_QUEUE
:
2609 fcp_task_attr
= MSG_ACA_TAG
;
2612 fcp_task_attr
= MSG_SIMPLE_TAG
;
2615 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05d,
2616 "qla_target: unknown task code %x, use ORDERED instead\n",
2618 fcp_task_attr
= MSG_ORDERED_TAG
;
2622 return fcp_task_attr
;
2625 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*,
2628 * Process context for I/O path into tcm_qla2xxx code
2630 static void qlt_do_work(struct work_struct
*work
)
2632 struct qla_tgt_cmd
*cmd
= container_of(work
, struct qla_tgt_cmd
, work
);
2633 scsi_qla_host_t
*vha
= cmd
->vha
;
2634 struct qla_hw_data
*ha
= vha
->hw
;
2635 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
2636 struct qla_tgt_sess
*sess
= NULL
;
2637 struct atio_from_isp
*atio
= &cmd
->atio
;
2639 unsigned long flags
;
2640 uint32_t data_length
;
2641 int ret
, fcp_task_attr
, data_dir
, bidi
= 0;
2646 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2647 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
2648 atio
->u
.isp24
.fcp_hdr
.s_id
);
2650 if (unlikely(sess
->tearing_down
)) {
2652 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2656 * Do the extra kref_get() before dropping
2657 * qla_hw_data->hardware_lock.
2659 kref_get(&sess
->se_sess
->sess_kref
);
2662 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2664 if (unlikely(!sess
)) {
2665 uint8_t *s_id
= atio
->u
.isp24
.fcp_hdr
.s_id
;
2667 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf022,
2668 "qla_target(%d): Unable to find wwn login"
2669 " (s_id %x:%x:%x), trying to create it manually\n",
2670 vha
->vp_idx
, s_id
[0], s_id
[1], s_id
[2]);
2672 if (atio
->u
.raw
.entry_count
> 1) {
2673 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf023,
2674 "Dropping multy entry cmd %p\n", cmd
);
2678 mutex_lock(&ha
->tgt
.tgt_mutex
);
2679 sess
= qlt_make_local_sess(vha
, s_id
);
2680 /* sess has an extra creation ref. */
2681 mutex_unlock(&ha
->tgt
.tgt_mutex
);
2688 cmd
->loop_id
= sess
->loop_id
;
2689 cmd
->conf_compl_supported
= sess
->conf_compl_supported
;
2691 cdb
= &atio
->u
.isp24
.fcp_cmnd
.cdb
[0];
2692 cmd
->tag
= atio
->u
.isp24
.exchange_addr
;
2693 cmd
->unpacked_lun
= scsilun_to_int(
2694 (struct scsi_lun
*)&atio
->u
.isp24
.fcp_cmnd
.lun
);
2696 if (atio
->u
.isp24
.fcp_cmnd
.rddata
&&
2697 atio
->u
.isp24
.fcp_cmnd
.wrdata
) {
2699 data_dir
= DMA_TO_DEVICE
;
2700 } else if (atio
->u
.isp24
.fcp_cmnd
.rddata
)
2701 data_dir
= DMA_FROM_DEVICE
;
2702 else if (atio
->u
.isp24
.fcp_cmnd
.wrdata
)
2703 data_dir
= DMA_TO_DEVICE
;
2705 data_dir
= DMA_NONE
;
2707 fcp_task_attr
= qlt_get_fcp_task_attr(vha
,
2708 atio
->u
.isp24
.fcp_cmnd
.task_attr
);
2709 data_length
= be32_to_cpu(get_unaligned((uint32_t *)
2710 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
2711 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]));
2713 ql_dbg(ql_dbg_tgt
, vha
, 0xe022,
2714 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
2715 cmd
, cmd
->unpacked_lun
, cmd
->tag
);
2717 ret
= vha
->hw
->tgt
.tgt_ops
->handle_cmd(vha
, cmd
, cdb
, data_length
,
2718 fcp_task_attr
, data_dir
, bidi
);
2722 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
2724 ha
->tgt
.tgt_ops
->put_sess(sess
);
2728 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf020, "Terminating work cmd %p", cmd
);
2730 * cmd has not sent to target yet, so pass NULL as the second argument
2732 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2733 qlt_send_term_exchange(vha
, NULL
, &cmd
->atio
, 1);
2734 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2736 ha
->tgt
.tgt_ops
->put_sess(sess
);
2739 /* ha->hardware_lock supposed to be held on entry */
2740 static int qlt_handle_cmd_for_atio(struct scsi_qla_host
*vha
,
2741 struct atio_from_isp
*atio
)
2743 struct qla_hw_data
*ha
= vha
->hw
;
2744 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
2745 struct qla_tgt_cmd
*cmd
;
2747 if (unlikely(tgt
->tgt_stop
)) {
2748 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf021,
2749 "New command while device %p is shutting down\n", tgt
);
2753 cmd
= kmem_cache_zalloc(qla_tgt_cmd_cachep
, GFP_ATOMIC
);
2755 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05e,
2756 "qla_target(%d): Allocation of cmd failed\n", vha
->vp_idx
);
2760 INIT_LIST_HEAD(&cmd
->cmd_list
);
2762 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
2763 cmd
->state
= QLA_TGT_STATE_NEW
;
2764 cmd
->tgt
= ha
->tgt
.qla_tgt
;
2767 INIT_WORK(&cmd
->work
, qlt_do_work
);
2768 queue_work(qla_tgt_wq
, &cmd
->work
);
2773 /* ha->hardware_lock supposed to be held on entry */
2774 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
2775 int fn
, void *iocb
, int flags
)
2777 struct scsi_qla_host
*vha
= sess
->vha
;
2778 struct qla_hw_data
*ha
= vha
->hw
;
2779 struct qla_tgt_mgmt_cmd
*mcmd
;
2783 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
2785 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10009,
2786 "qla_target(%d): Allocation of management "
2787 "command failed, some commands and their data could "
2788 "leak\n", vha
->vp_idx
);
2791 memset(mcmd
, 0, sizeof(*mcmd
));
2795 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
2796 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
2798 mcmd
->tmr_func
= fn
;
2799 mcmd
->flags
= flags
;
2802 case QLA_TGT_CLEAR_ACA
:
2803 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10000,
2804 "qla_target(%d): CLEAR_ACA received\n", sess
->vha
->vp_idx
);
2805 tmr_func
= TMR_CLEAR_ACA
;
2808 case QLA_TGT_TARGET_RESET
:
2809 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10001,
2810 "qla_target(%d): TARGET_RESET received\n",
2812 tmr_func
= TMR_TARGET_WARM_RESET
;
2815 case QLA_TGT_LUN_RESET
:
2816 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10002,
2817 "qla_target(%d): LUN_RESET received\n", sess
->vha
->vp_idx
);
2818 tmr_func
= TMR_LUN_RESET
;
2821 case QLA_TGT_CLEAR_TS
:
2822 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10003,
2823 "qla_target(%d): CLEAR_TS received\n", sess
->vha
->vp_idx
);
2824 tmr_func
= TMR_CLEAR_TASK_SET
;
2827 case QLA_TGT_ABORT_TS
:
2828 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10004,
2829 "qla_target(%d): ABORT_TS received\n", sess
->vha
->vp_idx
);
2830 tmr_func
= TMR_ABORT_TASK_SET
;
2833 case QLA_TGT_ABORT_ALL
:
2834 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10005,
2835 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
2840 case QLA_TGT_ABORT_ALL_SESS
:
2841 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10006,
2842 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
2847 case QLA_TGT_NEXUS_LOSS_SESS
:
2848 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10007,
2849 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
2854 case QLA_TGT_NEXUS_LOSS
:
2855 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10008,
2856 "qla_target(%d): Doing NEXUS_LOSS\n", sess
->vha
->vp_idx
);
2861 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000a,
2862 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
2863 sess
->vha
->vp_idx
, fn
);
2864 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2868 res
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, tmr_func
, 0);
2870 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000b,
2871 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
2872 sess
->vha
->vp_idx
, res
);
2873 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2880 /* ha->hardware_lock supposed to be held on entry */
2881 static int qlt_handle_task_mgmt(struct scsi_qla_host
*vha
, void *iocb
)
2883 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
2884 struct qla_hw_data
*ha
= vha
->hw
;
2885 struct qla_tgt
*tgt
;
2886 struct qla_tgt_sess
*sess
;
2887 uint32_t lun
, unpacked_lun
;
2890 tgt
= ha
->tgt
.qla_tgt
;
2892 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
2893 lun_size
= sizeof(a
->u
.isp24
.fcp_cmnd
.lun
);
2894 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
2895 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
2896 a
->u
.isp24
.fcp_hdr
.s_id
);
2897 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
2900 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf024,
2901 "qla_target(%d): task mgmt fn 0x%x for "
2902 "non-existant session\n", vha
->vp_idx
, fn
);
2903 return qlt_sched_sess_work(tgt
, QLA_TGT_SESS_WORK_TM
, iocb
,
2904 sizeof(struct atio_from_isp
));
2907 return qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
2910 /* ha->hardware_lock supposed to be held on entry */
2911 static int __qlt_abort_task(struct scsi_qla_host
*vha
,
2912 struct imm_ntfy_from_isp
*iocb
, struct qla_tgt_sess
*sess
)
2914 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
2915 struct qla_hw_data
*ha
= vha
->hw
;
2916 struct qla_tgt_mgmt_cmd
*mcmd
;
2917 uint32_t lun
, unpacked_lun
;
2920 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
2922 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05f,
2923 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
2924 vha
->vp_idx
, __func__
);
2927 memset(mcmd
, 0, sizeof(*mcmd
));
2930 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
2931 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
2933 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
2934 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
2936 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, unpacked_lun
, TMR_ABORT_TASK
,
2937 le16_to_cpu(iocb
->u
.isp2x
.seq_id
));
2939 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf060,
2940 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2942 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2949 /* ha->hardware_lock supposed to be held on entry */
2950 static int qlt_abort_task(struct scsi_qla_host
*vha
,
2951 struct imm_ntfy_from_isp
*iocb
)
2953 struct qla_hw_data
*ha
= vha
->hw
;
2954 struct qla_tgt_sess
*sess
;
2957 loop_id
= GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
);
2959 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
2961 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf025,
2962 "qla_target(%d): task abort for unexisting "
2963 "session\n", vha
->vp_idx
);
2964 return qlt_sched_sess_work(ha
->tgt
.qla_tgt
,
2965 QLA_TGT_SESS_WORK_ABORT
, iocb
, sizeof(*iocb
));
2968 return __qlt_abort_task(vha
, iocb
, sess
);
2972 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2974 static int qlt_24xx_handle_els(struct scsi_qla_host
*vha
,
2975 struct imm_ntfy_from_isp
*iocb
)
2977 struct qla_hw_data
*ha
= vha
->hw
;
2980 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf026,
2981 "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
2982 " ELS opcode: 0x%02x\n", vha
->vp_idx
, iocb
->u
.isp24
.port_id
[0],
2983 iocb
->u
.isp24
.port_id
[1], iocb
->u
.isp24
.port_id
[2],
2984 iocb
->u
.isp24
.status_subcode
);
2986 switch (iocb
->u
.isp24
.status_subcode
) {
2992 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
2997 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
2998 if (tgt
->link_reinit_iocb_pending
) {
2999 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
3001 tgt
->link_reinit_iocb_pending
= 0;
3003 res
= 1; /* send notify ack */
3008 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf061,
3009 "qla_target(%d): Unsupported ELS command %x "
3010 "received\n", vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
);
3011 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
3018 static int qlt_set_data_offset(struct qla_tgt_cmd
*cmd
, uint32_t offset
)
3020 struct scatterlist
*sg
, *sgp
, *sg_srr
, *sg_srr_start
= NULL
;
3021 size_t first_offset
= 0, rem_offset
= offset
, tmp
= 0;
3022 int i
, sg_srr_cnt
, bufflen
= 0;
3024 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe023,
3025 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3026 "cmd->sg_cnt: %u, direction: %d\n",
3027 cmd
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
3030 * FIXME: Reject non zero SRR relative offset until we can test
3031 * this code properly.
3033 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset
);
3036 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3037 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe055,
3038 "Missing cmd->sg or zero cmd->sg_cnt in"
3039 " qla_tgt_set_data_offset\n");
3043 * Walk the current cmd->sg list until we locate the new sg_srr_start
3045 for_each_sg(cmd
->sg
, sg
, cmd
->sg_cnt
, i
) {
3046 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe024,
3047 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3048 i
, sg
, sg_page(sg
), sg
->length
, sg
->offset
);
3050 if ((sg
->length
+ tmp
) > offset
) {
3051 first_offset
= rem_offset
;
3053 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe025,
3054 "Found matching sg[%d], using %p as sg_srr_start, "
3055 "and using first_offset: %zu\n", i
, sg
,
3060 rem_offset
-= sg
->length
;
3063 if (!sg_srr_start
) {
3064 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe056,
3065 "Unable to locate sg_srr_start for offset: %u\n", offset
);
3068 sg_srr_cnt
= (cmd
->sg_cnt
- i
);
3070 sg_srr
= kzalloc(sizeof(struct scatterlist
) * sg_srr_cnt
, GFP_KERNEL
);
3072 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe057,
3073 "Unable to allocate sgp\n");
3076 sg_init_table(sg_srr
, sg_srr_cnt
);
3079 * Walk the remaining list for sg_srr_start, mapping to the newly
3080 * allocated sg_srr taking first_offset into account.
3082 for_each_sg(sg_srr_start
, sg
, sg_srr_cnt
, i
) {
3084 sg_set_page(sgp
, sg_page(sg
),
3085 (sg
->length
- first_offset
), first_offset
);
3088 sg_set_page(sgp
, sg_page(sg
), sg
->length
, 0);
3090 bufflen
+= sgp
->length
;
3098 cmd
->sg_cnt
= sg_srr_cnt
;
3099 cmd
->bufflen
= bufflen
;
3100 cmd
->offset
+= offset
;
3103 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe026, "New cmd->sg: %p\n", cmd
->sg
);
3104 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe027, "New cmd->sg_cnt: %u\n",
3106 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe028, "New cmd->bufflen: %u\n",
3108 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe029, "New cmd->offset: %u\n",
3111 if (cmd
->sg_cnt
< 0)
3114 if (cmd
->bufflen
< 0)
3120 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd
*cmd
,
3121 uint32_t srr_rel_offs
, int *xmit_type
)
3123 int res
= 0, rel_offs
;
3125 rel_offs
= srr_rel_offs
- cmd
->offset
;
3126 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3127 srr_rel_offs
, rel_offs
);
3129 *xmit_type
= QLA_TGT_XMIT_ALL
;
3132 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf062,
3133 "qla_target(%d): SRR rel_offs (%d) < 0",
3134 cmd
->vha
->vp_idx
, rel_offs
);
3136 } else if (rel_offs
== cmd
->bufflen
)
3137 *xmit_type
= QLA_TGT_XMIT_STATUS
;
3138 else if (rel_offs
> 0)
3139 res
= qlt_set_data_offset(cmd
, rel_offs
);
3144 /* No locks, thread context */
3145 static void qlt_handle_srr(struct scsi_qla_host
*vha
,
3146 struct qla_tgt_srr_ctio
*sctio
, struct qla_tgt_srr_imm
*imm
)
3148 struct imm_ntfy_from_isp
*ntfy
=
3149 (struct imm_ntfy_from_isp
*)&imm
->imm_ntfy
;
3150 struct qla_hw_data
*ha
= vha
->hw
;
3151 struct qla_tgt_cmd
*cmd
= sctio
->cmd
;
3152 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3153 unsigned long flags
;
3154 int xmit_type
= 0, resp
= 0;
3158 offset
= le32_to_cpu(ntfy
->u
.isp24
.srr_rel_offs
);
3159 srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3161 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf028, "SRR cmd %p, srr_ui %x\n",
3166 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3167 qlt_send_notify_ack(vha
, ntfy
,
3168 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3169 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3170 xmit_type
= QLA_TGT_XMIT_STATUS
;
3173 case SRR_IU_DATA_IN
:
3174 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3175 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf063,
3176 "Unable to process SRR_IU_DATA_IN due to"
3177 " missing cmd->sg, state: %d\n", cmd
->state
);
3181 if (se_cmd
->scsi_status
!= 0) {
3182 ql_dbg(ql_dbg_tgt
, vha
, 0xe02a,
3183 "Rejecting SRR_IU_DATA_IN with non GOOD "
3187 cmd
->bufflen
= se_cmd
->data_length
;
3189 if (qlt_has_data(cmd
)) {
3190 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3192 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3193 qlt_send_notify_ack(vha
, ntfy
,
3194 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3195 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3198 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf064,
3199 "qla_target(%d): SRR for in data for cmd "
3200 "without them (tag %d, SCSI status %d), "
3201 "reject", vha
->vp_idx
, cmd
->tag
,
3202 cmd
->se_cmd
.scsi_status
);
3206 case SRR_IU_DATA_OUT
:
3207 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3208 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf065,
3209 "Unable to process SRR_IU_DATA_OUT due to"
3210 " missing cmd->sg\n");
3214 if (se_cmd
->scsi_status
!= 0) {
3215 ql_dbg(ql_dbg_tgt
, vha
, 0xe02b,
3216 "Rejecting SRR_IU_DATA_OUT"
3217 " with non GOOD scsi_status\n");
3220 cmd
->bufflen
= se_cmd
->data_length
;
3222 if (qlt_has_data(cmd
)) {
3223 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3225 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3226 qlt_send_notify_ack(vha
, ntfy
,
3227 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3228 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3229 if (xmit_type
& QLA_TGT_XMIT_DATA
)
3230 qlt_rdy_to_xfer(cmd
);
3232 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf066,
3233 "qla_target(%d): SRR for out data for cmd "
3234 "without them (tag %d, SCSI status %d), "
3235 "reject", vha
->vp_idx
, cmd
->tag
,
3236 cmd
->se_cmd
.scsi_status
);
3241 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf067,
3242 "qla_target(%d): Unknown srr_ui value %x",
3243 vha
->vp_idx
, srr_ui
);
3247 /* Transmit response in case of status and data-in cases */
3249 qlt_xmit_response(cmd
, xmit_type
, se_cmd
->scsi_status
);
3254 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3255 qlt_send_notify_ack(vha
, ntfy
, 0, 0, 0,
3256 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3257 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3258 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3259 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3260 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3263 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
3264 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3267 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*vha
,
3268 struct qla_tgt_srr_imm
*imm
, int ha_locked
)
3270 struct qla_hw_data
*ha
= vha
->hw
;
3271 unsigned long flags
= 0;
3274 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3276 qlt_send_notify_ack(vha
, (void *)&imm
->imm_ntfy
, 0, 0, 0,
3277 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3278 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3279 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3282 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3287 static void qlt_handle_srr_work(struct work_struct
*work
)
3289 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, srr_work
);
3290 struct scsi_qla_host
*vha
= tgt
->vha
;
3291 struct qla_tgt_srr_ctio
*sctio
;
3292 unsigned long flags
;
3294 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf029, "Entering SRR work (tgt %p)\n",
3298 spin_lock_irqsave(&tgt
->srr_lock
, flags
);
3299 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
, srr_list_entry
) {
3300 struct qla_tgt_srr_imm
*imm
, *i
, *ti
;
3301 struct qla_tgt_cmd
*cmd
;
3302 struct se_cmd
*se_cmd
;
3305 list_for_each_entry_safe(i
, ti
, &tgt
->srr_imm_list
,
3307 if (i
->srr_id
== sctio
->srr_id
) {
3308 list_del(&i
->srr_list_entry
);
3310 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf068,
3311 "qla_target(%d): There must be "
3312 "only one IMM SRR per CTIO SRR "
3313 "(IMM SRR %p, id %d, CTIO %p\n",
3314 vha
->vp_idx
, i
, i
->srr_id
, sctio
);
3315 qlt_reject_free_srr_imm(tgt
->vha
, i
, 0);
3321 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02a,
3322 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm
, sctio
,
3326 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02b,
3327 "Not found matching IMM for SRR CTIO (id %d)\n",
3331 list_del(&sctio
->srr_list_entry
);
3333 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
3337 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
3338 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
3347 se_cmd
= &cmd
->se_cmd
;
3349 cmd
->sg_cnt
= se_cmd
->t_data_nents
;
3350 cmd
->sg
= se_cmd
->t_data_sg
;
3352 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02c,
3353 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3354 "sg_cnt=%d, offset=%d", cmd
, &cmd
->se_cmd
, cmd
->tag
,
3355 se_cmd
->t_task_cdb
[0], cmd
->sg_cnt
, cmd
->offset
);
3357 qlt_handle_srr(vha
, sctio
, imm
);
3363 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
3366 /* ha->hardware_lock supposed to be held on entry */
3367 static void qlt_prepare_srr_imm(struct scsi_qla_host
*vha
,
3368 struct imm_ntfy_from_isp
*iocb
)
3370 struct qla_tgt_srr_imm
*imm
;
3371 struct qla_hw_data
*ha
= vha
->hw
;
3372 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
3373 struct qla_tgt_srr_ctio
*sctio
;
3377 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02d, "qla_target(%d): SRR received\n",
3380 imm
= kzalloc(sizeof(*imm
), GFP_ATOMIC
);
3382 memcpy(&imm
->imm_ntfy
, iocb
, sizeof(imm
->imm_ntfy
));
3384 /* IRQ is already OFF */
3385 spin_lock(&tgt
->srr_lock
);
3386 imm
->srr_id
= tgt
->imm_srr_id
;
3387 list_add_tail(&imm
->srr_list_entry
,
3388 &tgt
->srr_imm_list
);
3389 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02e,
3390 "IMM NTFY SRR %p added (id %d, ui %x)\n",
3391 imm
, imm
->srr_id
, iocb
->u
.isp24
.srr_ui
);
3392 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
3394 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
,
3396 if (sctio
->srr_id
== imm
->srr_id
) {
3402 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02f, "%s",
3403 "Scheduling srr work\n");
3404 schedule_work(&tgt
->srr_work
);
3406 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf030,
3407 "qla_target(%d): imm_srr_id "
3408 "== ctio_srr_id (%d), but there is no "
3409 "corresponding SRR CTIO, deleting IMM "
3410 "SRR %p\n", vha
->vp_idx
, tgt
->ctio_srr_id
,
3412 list_del(&imm
->srr_list_entry
);
3416 spin_unlock(&tgt
->srr_lock
);
3420 spin_unlock(&tgt
->srr_lock
);
3422 struct qla_tgt_srr_ctio
*ts
;
3424 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf069,
3425 "qla_target(%d): Unable to allocate SRR IMM "
3426 "entry, SRR request will be rejected\n", vha
->vp_idx
);
3428 /* IRQ is already OFF */
3429 spin_lock(&tgt
->srr_lock
);
3430 list_for_each_entry_safe(sctio
, ts
, &tgt
->srr_ctio_list
,
3432 if (sctio
->srr_id
== tgt
->imm_srr_id
) {
3433 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf031,
3434 "CTIO SRR %p deleted (id %d)\n",
3435 sctio
, sctio
->srr_id
);
3436 list_del(&sctio
->srr_list_entry
);
3437 qlt_send_term_exchange(vha
, sctio
->cmd
,
3438 &sctio
->cmd
->atio
, 1);
3442 spin_unlock(&tgt
->srr_lock
);
3449 qlt_send_notify_ack(vha
, iocb
, 0, 0, 0,
3450 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3451 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3452 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3456 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3458 static void qlt_handle_imm_notify(struct scsi_qla_host
*vha
,
3459 struct imm_ntfy_from_isp
*iocb
)
3461 struct qla_hw_data
*ha
= vha
->hw
;
3462 uint32_t add_flags
= 0;
3463 int send_notify_ack
= 1;
3466 status
= le16_to_cpu(iocb
->u
.isp2x
.status
);
3468 case IMM_NTFY_LIP_RESET
:
3470 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf032,
3471 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
3472 vha
->vp_idx
, le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
3473 iocb
->u
.isp24
.status_subcode
);
3475 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
3476 send_notify_ack
= 0;
3480 case IMM_NTFY_LIP_LINK_REINIT
:
3482 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
3483 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf033,
3484 "qla_target(%d): LINK REINIT (loop %#x, "
3485 "subcode %x)\n", vha
->vp_idx
,
3486 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
3487 iocb
->u
.isp24
.status_subcode
);
3488 if (tgt
->link_reinit_iocb_pending
) {
3489 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
3492 memcpy(&tgt
->link_reinit_iocb
, iocb
, sizeof(*iocb
));
3493 tgt
->link_reinit_iocb_pending
= 1;
3495 * QLogic requires to wait after LINK REINIT for possible
3496 * PDISC or ADISC ELS commands
3498 send_notify_ack
= 0;
3502 case IMM_NTFY_PORT_LOGOUT
:
3503 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf034,
3504 "qla_target(%d): Port logout (loop "
3505 "%#x, subcode %x)\n", vha
->vp_idx
,
3506 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
3507 iocb
->u
.isp24
.status_subcode
);
3509 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
) == 0)
3510 send_notify_ack
= 0;
3511 /* The sessions will be cleared in the callback, if needed */
3514 case IMM_NTFY_GLBL_TPRLO
:
3515 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf035,
3516 "qla_target(%d): Global TPRLO (%x)\n", vha
->vp_idx
, status
);
3517 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
3518 send_notify_ack
= 0;
3519 /* The sessions will be cleared in the callback, if needed */
3522 case IMM_NTFY_PORT_CONFIG
:
3523 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf036,
3524 "qla_target(%d): Port config changed (%x)\n", vha
->vp_idx
,
3526 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
3527 send_notify_ack
= 0;
3528 /* The sessions will be cleared in the callback, if needed */
3531 case IMM_NTFY_GLBL_LOGO
:
3532 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06a,
3533 "qla_target(%d): Link failure detected\n",
3535 /* I_T nexus loss */
3536 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
3537 send_notify_ack
= 0;
3540 case IMM_NTFY_IOCB_OVERFLOW
:
3541 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06b,
3542 "qla_target(%d): Cannot provide requested "
3543 "capability (IOCB overflowed the immediate notify "
3544 "resource count)\n", vha
->vp_idx
);
3547 case IMM_NTFY_ABORT_TASK
:
3548 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf037,
3549 "qla_target(%d): Abort Task (S %08x I %#x -> "
3550 "L %#x)\n", vha
->vp_idx
,
3551 le16_to_cpu(iocb
->u
.isp2x
.seq_id
),
3552 GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
),
3553 le16_to_cpu(iocb
->u
.isp2x
.lun
));
3554 if (qlt_abort_task(vha
, iocb
) == 0)
3555 send_notify_ack
= 0;
3558 case IMM_NTFY_RESOURCE
:
3559 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06c,
3560 "qla_target(%d): Out of resources, host %ld\n",
3561 vha
->vp_idx
, vha
->host_no
);
3564 case IMM_NTFY_MSG_RX
:
3565 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf038,
3566 "qla_target(%d): Immediate notify task %x\n",
3567 vha
->vp_idx
, iocb
->u
.isp2x
.task_flags
);
3568 if (qlt_handle_task_mgmt(vha
, iocb
) == 0)
3569 send_notify_ack
= 0;
3573 if (qlt_24xx_handle_els(vha
, iocb
) == 0)
3574 send_notify_ack
= 0;
3578 qlt_prepare_srr_imm(vha
, iocb
);
3579 send_notify_ack
= 0;
3583 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06d,
3584 "qla_target(%d): Received unknown immediate "
3585 "notify status %x\n", vha
->vp_idx
, status
);
3589 if (send_notify_ack
)
3590 qlt_send_notify_ack(vha
, iocb
, add_flags
, 0, 0, 0, 0, 0);
3594 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3595 * This function sends busy to ISP 2xxx or 24xx.
3597 static void qlt_send_busy(struct scsi_qla_host
*vha
,
3598 struct atio_from_isp
*atio
, uint16_t status
)
3600 struct ctio7_to_24xx
*ctio24
;
3601 struct qla_hw_data
*ha
= vha
->hw
;
3603 struct qla_tgt_sess
*sess
= NULL
;
3605 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
3606 atio
->u
.isp24
.fcp_hdr
.s_id
);
3608 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
3611 /* Sending marker isn't necessary, since we called from ISR */
3613 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
3615 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06e,
3616 "qla_target(%d): %s failed: unable to allocate "
3617 "request packet", vha
->vp_idx
, __func__
);
3621 pkt
->entry_count
= 1;
3622 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
3624 ctio24
= (struct ctio7_to_24xx
*)pkt
;
3625 ctio24
->entry_type
= CTIO_TYPE7
;
3626 ctio24
->nport_handle
= sess
->loop_id
;
3627 ctio24
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
3628 ctio24
->vp_index
= vha
->vp_idx
;
3629 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
3630 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
3631 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
3632 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
3633 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
3634 __constant_cpu_to_le16(
3635 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
|
3636 CTIO7_FLAGS_DONT_RET_CTIO
);
3638 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
3639 * if the explicit conformation is used.
3641 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
3642 ctio24
->u
.status1
.scsi_status
= cpu_to_le16(status
);
3643 ctio24
->u
.status1
.residual
= get_unaligned((uint32_t *)
3644 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
3645 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]);
3646 if (ctio24
->u
.status1
.residual
!= 0)
3647 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
3649 qla2x00_start_iocbs(vha
, vha
->req
);
3652 /* ha->hardware_lock supposed to be held on entry */
3653 /* called via callback from qla2xxx */
3654 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*vha
,
3655 struct atio_from_isp
*atio
)
3657 struct qla_hw_data
*ha
= vha
->hw
;
3658 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
3661 if (unlikely(tgt
== NULL
)) {
3662 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf039,
3663 "ATIO pkt, but no tgt (ha %p)", ha
);
3666 ql_dbg(ql_dbg_tgt
, vha
, 0xe02c,
3667 "qla_target(%d): ATIO pkt %p: type %02x count %02x",
3668 vha
->vp_idx
, atio
, atio
->u
.raw
.entry_type
,
3669 atio
->u
.raw
.entry_count
);
3671 * In tgt_stop mode we also should allow all requests to pass.
3672 * Otherwise, some commands can stuck.
3675 tgt
->irq_cmd_count
++;
3677 switch (atio
->u
.raw
.entry_type
) {
3679 ql_dbg(ql_dbg_tgt
, vha
, 0xe02d,
3680 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
3681 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
3682 vha
->vp_idx
, atio
->u
.isp24
.fcp_cmnd
.lun
,
3683 atio
->u
.isp24
.fcp_cmnd
.rddata
,
3684 atio
->u
.isp24
.fcp_cmnd
.wrdata
,
3685 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
,
3686 be32_to_cpu(get_unaligned((uint32_t *)
3687 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
3688 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
])),
3689 atio
->u
.isp24
.fcp_hdr
.s_id
[0],
3690 atio
->u
.isp24
.fcp_hdr
.s_id
[1],
3691 atio
->u
.isp24
.fcp_hdr
.s_id
[2]);
3693 if (unlikely(atio
->u
.isp24
.exchange_addr
==
3694 ATIO_EXCHANGE_ADDRESS_UNKNOWN
)) {
3695 ql_dbg(ql_dbg_tgt
, vha
, 0xe058,
3696 "qla_target(%d): ATIO_TYPE7 "
3697 "received with UNKNOWN exchange address, "
3698 "sending QUEUE_FULL\n", vha
->vp_idx
);
3699 qlt_send_busy(vha
, atio
, SAM_STAT_TASK_SET_FULL
);
3702 if (likely(atio
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
== 0))
3703 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
3705 rc
= qlt_handle_task_mgmt(vha
, atio
);
3706 if (unlikely(rc
!= 0)) {
3708 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3709 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
3711 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
3714 if (tgt
->tgt_stop
) {
3715 ql_dbg(ql_dbg_tgt
, vha
, 0xe059,
3716 "qla_target: Unable to send "
3717 "command to target for req, "
3720 ql_dbg(ql_dbg_tgt
, vha
, 0xe05a,
3721 "qla_target(%d): Unable to send "
3722 "command to target, sending BUSY "
3723 "status.\n", vha
->vp_idx
);
3724 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
3730 case IMMED_NOTIFY_TYPE
:
3732 if (unlikely(atio
->u
.isp2x
.entry_status
!= 0)) {
3733 ql_dbg(ql_dbg_tgt
, vha
, 0xe05b,
3734 "qla_target(%d): Received ATIO packet %x "
3735 "with error status %x\n", vha
->vp_idx
,
3736 atio
->u
.raw
.entry_type
,
3737 atio
->u
.isp2x
.entry_status
);
3740 ql_dbg(ql_dbg_tgt
, vha
, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
3741 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)atio
);
3746 ql_dbg(ql_dbg_tgt
, vha
, 0xe05c,
3747 "qla_target(%d): Received unknown ATIO atio "
3748 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
3752 tgt
->irq_cmd_count
--;
3755 /* ha->hardware_lock supposed to be held on entry */
3756 /* called via callback from qla2xxx */
3757 static void qlt_response_pkt(struct scsi_qla_host
*vha
, response_t
*pkt
)
3759 struct qla_hw_data
*ha
= vha
->hw
;
3760 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
3762 if (unlikely(tgt
== NULL
)) {
3763 ql_dbg(ql_dbg_tgt
, vha
, 0xe05d,
3764 "qla_target(%d): Response pkt %x received, but no "
3765 "tgt (ha %p)\n", vha
->vp_idx
, pkt
->entry_type
, ha
);
3769 ql_dbg(ql_dbg_tgt
, vha
, 0xe02f,
3770 "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
3771 "handle %#x\n", vha
->vp_idx
, pkt
, pkt
->entry_type
,
3772 pkt
->entry_count
, pkt
->entry_status
, pkt
->handle
);
3775 * In tgt_stop mode we also should allow all requests to pass.
3776 * Otherwise, some commands can stuck.
3779 tgt
->irq_cmd_count
++;
3781 switch (pkt
->entry_type
) {
3784 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
3785 ql_dbg(ql_dbg_tgt
, vha
, 0xe030, "CTIO_TYPE7: instance %d\n",
3787 qlt_do_ctio_completion(vha
, entry
->handle
,
3788 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
3793 case ACCEPT_TGT_IO_TYPE
:
3795 struct atio_from_isp
*atio
= (struct atio_from_isp
*)pkt
;
3797 ql_dbg(ql_dbg_tgt
, vha
, 0xe031,
3798 "ACCEPT_TGT_IO instance %d status %04x "
3799 "lun %04x read/write %d data_length %04x "
3800 "target_id %02x rx_id %04x\n ", vha
->vp_idx
,
3801 le16_to_cpu(atio
->u
.isp2x
.status
),
3802 le16_to_cpu(atio
->u
.isp2x
.lun
),
3803 atio
->u
.isp2x
.execution_codes
,
3804 le32_to_cpu(atio
->u
.isp2x
.data_length
), GET_TARGET_ID(ha
,
3805 atio
), atio
->u
.isp2x
.rx_id
);
3806 if (atio
->u
.isp2x
.status
!=
3807 __constant_cpu_to_le16(ATIO_CDB_VALID
)) {
3808 ql_dbg(ql_dbg_tgt
, vha
, 0xe05e,
3809 "qla_target(%d): ATIO with error "
3810 "status %x received\n", vha
->vp_idx
,
3811 le16_to_cpu(atio
->u
.isp2x
.status
));
3814 ql_dbg(ql_dbg_tgt
, vha
, 0xe032,
3815 "FCP CDB: 0x%02x, sizeof(cdb): %lu",
3816 atio
->u
.isp2x
.cdb
[0], (unsigned long
3817 int)sizeof(atio
->u
.isp2x
.cdb
));
3819 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
3820 if (unlikely(rc
!= 0)) {
3822 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3823 qlt_send_busy(vha
, atio
, 0);
3825 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
3828 if (tgt
->tgt_stop
) {
3829 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
3830 "qla_target: Unable to send "
3831 "command to target, sending TERM "
3832 "EXCHANGE for rsp\n");
3833 qlt_send_term_exchange(vha
, NULL
,
3836 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
3837 "qla_target(%d): Unable to send "
3838 "command to target, sending BUSY "
3839 "status\n", vha
->vp_idx
);
3840 qlt_send_busy(vha
, atio
, 0);
3847 case CONTINUE_TGT_IO_TYPE
:
3849 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
3850 ql_dbg(ql_dbg_tgt
, vha
, 0xe033,
3851 "CONTINUE_TGT_IO: instance %d\n", vha
->vp_idx
);
3852 qlt_do_ctio_completion(vha
, entry
->handle
,
3853 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
3860 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
3861 ql_dbg(ql_dbg_tgt
, vha
, 0xe034, "CTIO_A64: instance %d\n",
3863 qlt_do_ctio_completion(vha
, entry
->handle
,
3864 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
3869 case IMMED_NOTIFY_TYPE
:
3870 ql_dbg(ql_dbg_tgt
, vha
, 0xe035, "%s", "IMMED_NOTIFY\n");
3871 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)pkt
);
3874 case NOTIFY_ACK_TYPE
:
3875 if (tgt
->notify_ack_expected
> 0) {
3876 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
3877 ql_dbg(ql_dbg_tgt
, vha
, 0xe036,
3878 "NOTIFY_ACK seq %08x status %x\n",
3879 le16_to_cpu(entry
->u
.isp2x
.seq_id
),
3880 le16_to_cpu(entry
->u
.isp2x
.status
));
3881 tgt
->notify_ack_expected
--;
3882 if (entry
->u
.isp2x
.status
!=
3883 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS
)) {
3884 ql_dbg(ql_dbg_tgt
, vha
, 0xe061,
3885 "qla_target(%d): NOTIFY_ACK "
3886 "failed %x\n", vha
->vp_idx
,
3887 le16_to_cpu(entry
->u
.isp2x
.status
));
3890 ql_dbg(ql_dbg_tgt
, vha
, 0xe062,
3891 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
3896 case ABTS_RECV_24XX
:
3897 ql_dbg(ql_dbg_tgt
, vha
, 0xe037,
3898 "ABTS_RECV_24XX: instance %d\n", vha
->vp_idx
);
3899 qlt_24xx_handle_abts(vha
, (struct abts_recv_from_24xx
*)pkt
);
3902 case ABTS_RESP_24XX
:
3903 if (tgt
->abts_resp_expected
> 0) {
3904 struct abts_resp_from_24xx_fw
*entry
=
3905 (struct abts_resp_from_24xx_fw
*)pkt
;
3906 ql_dbg(ql_dbg_tgt
, vha
, 0xe038,
3907 "ABTS_RESP_24XX: compl_status %x\n",
3908 entry
->compl_status
);
3909 tgt
->abts_resp_expected
--;
3910 if (le16_to_cpu(entry
->compl_status
) !=
3911 ABTS_RESP_COMPL_SUCCESS
) {
3912 if ((entry
->error_subcode1
== 0x1E) &&
3913 (entry
->error_subcode2
== 0)) {
3915 * We've got a race here: aborted
3916 * exchange not terminated, i.e.
3917 * response for the aborted command was
3918 * sent between the abort request was
3919 * received and processed.
3920 * Unfortunately, the firmware has a
3921 * silly requirement that all aborted
3922 * exchanges must be explicitely
3923 * terminated, otherwise it refuses to
3924 * send responses for the abort
3925 * requests. So, we have to
3926 * (re)terminate the exchange and retry
3927 * the abort response.
3929 qlt_24xx_retry_term_exchange(vha
,
3932 ql_dbg(ql_dbg_tgt
, vha
, 0xe063,
3933 "qla_target(%d): ABTS_RESP_24XX "
3934 "failed %x (subcode %x:%x)",
3935 vha
->vp_idx
, entry
->compl_status
,
3936 entry
->error_subcode1
,
3937 entry
->error_subcode2
);
3940 ql_dbg(ql_dbg_tgt
, vha
, 0xe064,
3941 "qla_target(%d): Unexpected ABTS_RESP_24XX "
3942 "received\n", vha
->vp_idx
);
3947 ql_dbg(ql_dbg_tgt
, vha
, 0xe065,
3948 "qla_target(%d): Received unknown response pkt "
3949 "type %x\n", vha
->vp_idx
, pkt
->entry_type
);
3953 tgt
->irq_cmd_count
--;
3957 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3959 void qlt_async_event(uint16_t code
, struct scsi_qla_host
*vha
,
3962 struct qla_hw_data
*ha
= vha
->hw
;
3963 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
3966 ql_dbg(ql_dbg_tgt
, vha
, 0xe039,
3967 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
3968 vha
->host_no
, atomic_read(&vha
->loop_state
), vha
->flags
.init_done
,
3969 ha
->operating_mode
, ha
->current_topology
);
3971 if (!ha
->tgt
.tgt_ops
)
3974 if (unlikely(tgt
== NULL
)) {
3975 ql_dbg(ql_dbg_tgt
, vha
, 0xe03a,
3976 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code
, ha
);
3980 if (((code
== MBA_POINT_TO_POINT
) || (code
== MBA_CHG_IN_CONNECTION
)) &&
3984 * In tgt_stop mode we also should allow all requests to pass.
3985 * Otherwise, some commands can stuck.
3988 tgt
->irq_cmd_count
++;
3991 case MBA_RESET
: /* Reset */
3992 case MBA_SYSTEM_ERR
: /* System Error */
3993 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
3994 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
3995 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03a,
3996 "qla_target(%d): System error async event %#x "
3997 "occured", vha
->vp_idx
, code
);
3999 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up. */
4000 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
4005 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03b,
4006 "qla_target(%d): Async LOOP_UP occured "
4007 "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha
->vp_idx
,
4008 le16_to_cpu(mailbox
[1]), le16_to_cpu(mailbox
[2]),
4009 le16_to_cpu(mailbox
[3]), le16_to_cpu(mailbox
[4]));
4010 if (tgt
->link_reinit_iocb_pending
) {
4011 qlt_send_notify_ack(vha
, (void *)&tgt
->link_reinit_iocb
,
4013 tgt
->link_reinit_iocb_pending
= 0;
4018 case MBA_LIP_OCCURRED
:
4021 case MBA_RSCN_UPDATE
:
4022 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03c,
4023 "qla_target(%d): Async event %#x occured "
4024 "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha
->vp_idx
, code
,
4025 le16_to_cpu(mailbox
[1]), le16_to_cpu(mailbox
[2]),
4026 le16_to_cpu(mailbox
[3]), le16_to_cpu(mailbox
[4]));
4029 case MBA_PORT_UPDATE
:
4030 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03d,
4031 "qla_target(%d): Port update async event %#x "
4032 "occured: updating the ports database (m[1]=%x, m[2]=%x, "
4033 "m[3]=%x, m[4]=%x)", vha
->vp_idx
, code
,
4034 le16_to_cpu(mailbox
[1]), le16_to_cpu(mailbox
[2]),
4035 le16_to_cpu(mailbox
[3]), le16_to_cpu(mailbox
[4]));
4036 reason_code
= le16_to_cpu(mailbox
[2]);
4037 if (reason_code
== 0x4)
4038 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03e,
4039 "Async MB 2: Got PLOGI Complete\n");
4040 else if (reason_code
== 0x7)
4041 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03f,
4042 "Async MB 2: Port Logged Out\n");
4046 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf040,
4047 "qla_target(%d): Async event %#x occured: "
4048 "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha
->vp_idx
,
4049 code
, le16_to_cpu(mailbox
[1]), le16_to_cpu(mailbox
[2]),
4050 le16_to_cpu(mailbox
[3]), le16_to_cpu(mailbox
[4]));
4054 tgt
->irq_cmd_count
--;
4057 static fc_port_t
*qlt_get_port_database(struct scsi_qla_host
*vha
,
4063 fcport
= kzalloc(sizeof(*fcport
), GFP_KERNEL
);
4065 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06f,
4066 "qla_target(%d): Allocation of tmp FC port failed",
4071 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf041, "loop_id %d", loop_id
);
4073 fcport
->loop_id
= loop_id
;
4075 rc
= qla2x00_get_port_database(vha
, fcport
, 0);
4076 if (rc
!= QLA_SUCCESS
) {
4077 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf070,
4078 "qla_target(%d): Failed to retrieve fcport "
4079 "information -- get_port_database() returned %x "
4080 "(loop_id=0x%04x)", vha
->vp_idx
, rc
, loop_id
);
4088 /* Must be called under tgt_mutex */
4089 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*vha
,
4092 struct qla_hw_data
*ha
= vha
->hw
;
4093 struct qla_tgt_sess
*sess
= NULL
;
4094 fc_port_t
*fcport
= NULL
;
4095 int rc
, global_resets
;
4096 uint16_t loop_id
= 0;
4099 global_resets
= atomic_read(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
);
4101 rc
= qla24xx_get_loop_id(vha
, s_id
, &loop_id
);
4103 if ((s_id
[0] == 0xFF) &&
4104 (s_id
[1] == 0xFC)) {
4106 * This is Domain Controller, so it should be
4107 * OK to drop SCSI commands from it.
4109 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf042,
4110 "Unable to find initiator with S_ID %x:%x:%x",
4111 s_id
[0], s_id
[1], s_id
[2]);
4113 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf071,
4114 "qla_target(%d): Unable to find "
4115 "initiator with S_ID %x:%x:%x",
4116 vha
->vp_idx
, s_id
[0], s_id
[1],
4121 fcport
= qlt_get_port_database(vha
, loop_id
);
4125 if (global_resets
!=
4126 atomic_read(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
)) {
4127 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf043,
4128 "qla_target(%d): global reset during session discovery "
4129 "(counter was %d, new %d), retrying", vha
->vp_idx
,
4131 atomic_read(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
));
4135 sess
= qlt_create_sess(vha
, fcport
, true);
4141 static void qlt_abort_work(struct qla_tgt
*tgt
,
4142 struct qla_tgt_sess_work_param
*prm
)
4144 struct scsi_qla_host
*vha
= tgt
->vha
;
4145 struct qla_hw_data
*ha
= vha
->hw
;
4146 struct qla_tgt_sess
*sess
= NULL
;
4147 unsigned long flags
;
4152 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4157 s_id
[0] = prm
->abts
.fcp_hdr_le
.s_id
[2];
4158 s_id
[1] = prm
->abts
.fcp_hdr_le
.s_id
[1];
4159 s_id
[2] = prm
->abts
.fcp_hdr_le
.s_id
[0];
4161 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4162 (unsigned char *)&be_s_id
);
4164 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4166 mutex_lock(&ha
->tgt
.tgt_mutex
);
4167 sess
= qlt_make_local_sess(vha
, s_id
);
4168 /* sess has got an extra creation ref */
4169 mutex_unlock(&ha
->tgt
.tgt_mutex
);
4171 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4175 kref_get(&sess
->se_sess
->sess_kref
);
4181 rc
= __qlt_24xx_handle_abts(vha
, &prm
->abts
, sess
);
4184 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4186 ha
->tgt
.tgt_ops
->put_sess(sess
);
4190 qlt_24xx_send_abts_resp(vha
, &prm
->abts
, FCP_TMF_REJECTED
, false);
4191 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4193 ha
->tgt
.tgt_ops
->put_sess(sess
);
4196 static void qlt_tmr_work(struct qla_tgt
*tgt
,
4197 struct qla_tgt_sess_work_param
*prm
)
4199 struct atio_from_isp
*a
= &prm
->tm_iocb2
;
4200 struct scsi_qla_host
*vha
= tgt
->vha
;
4201 struct qla_hw_data
*ha
= vha
->hw
;
4202 struct qla_tgt_sess
*sess
= NULL
;
4203 unsigned long flags
;
4204 uint8_t *s_id
= NULL
; /* to hide compiler warnings */
4206 uint32_t lun
, unpacked_lun
;
4210 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4215 s_id
= prm
->tm_iocb2
.u
.isp24
.fcp_hdr
.s_id
;
4216 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
4218 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4220 mutex_lock(&ha
->tgt
.tgt_mutex
);
4221 sess
= qlt_make_local_sess(vha
, s_id
);
4222 /* sess has got an extra creation ref */
4223 mutex_unlock(&ha
->tgt
.tgt_mutex
);
4225 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4229 kref_get(&sess
->se_sess
->sess_kref
);
4233 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
4234 lun_size
= sizeof(lun
);
4235 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
4236 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
4238 rc
= qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
4241 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4243 ha
->tgt
.tgt_ops
->put_sess(sess
);
4247 qlt_send_term_exchange(vha
, NULL
, &prm
->tm_iocb2
, 1);
4248 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4250 ha
->tgt
.tgt_ops
->put_sess(sess
);
4253 static void qlt_sess_work_fn(struct work_struct
*work
)
4255 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, sess_work
);
4256 struct scsi_qla_host
*vha
= tgt
->vha
;
4257 unsigned long flags
;
4259 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf000, "Sess work (tgt %p)", tgt
);
4261 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
4262 while (!list_empty(&tgt
->sess_works_list
)) {
4263 struct qla_tgt_sess_work_param
*prm
= list_entry(
4264 tgt
->sess_works_list
.next
, typeof(*prm
),
4265 sess_works_list_entry
);
4268 * This work can be scheduled on several CPUs at time, so we
4269 * must delete the entry to eliminate double processing
4271 list_del(&prm
->sess_works_list_entry
);
4273 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
4275 switch (prm
->type
) {
4276 case QLA_TGT_SESS_WORK_ABORT
:
4277 qlt_abort_work(tgt
, prm
);
4279 case QLA_TGT_SESS_WORK_TM
:
4280 qlt_tmr_work(tgt
, prm
);
4287 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
4291 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
4294 /* Must be called under tgt_host_action_mutex */
4295 int qlt_add_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*base_vha
)
4297 struct qla_tgt
*tgt
;
4299 if (!QLA_TGT_MODE_ENABLED())
4302 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe03b,
4303 "Registering target for host %ld(%p)", base_vha
->host_no
, ha
);
4305 BUG_ON((ha
->tgt
.qla_tgt
!= NULL
) || (ha
->tgt
.tgt_ops
!= NULL
));
4307 tgt
= kzalloc(sizeof(struct qla_tgt
), GFP_KERNEL
);
4309 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe066,
4310 "Unable to allocate struct qla_tgt\n");
4314 if (!(base_vha
->host
->hostt
->supported_mode
& MODE_TARGET
))
4315 base_vha
->host
->hostt
->supported_mode
|= MODE_TARGET
;
4318 tgt
->vha
= base_vha
;
4319 init_waitqueue_head(&tgt
->waitQ
);
4320 INIT_LIST_HEAD(&tgt
->sess_list
);
4321 INIT_LIST_HEAD(&tgt
->del_sess_list
);
4322 INIT_DELAYED_WORK(&tgt
->sess_del_work
,
4323 (void (*)(struct work_struct
*))qlt_del_sess_work_fn
);
4324 spin_lock_init(&tgt
->sess_work_lock
);
4325 INIT_WORK(&tgt
->sess_work
, qlt_sess_work_fn
);
4326 INIT_LIST_HEAD(&tgt
->sess_works_list
);
4327 spin_lock_init(&tgt
->srr_lock
);
4328 INIT_LIST_HEAD(&tgt
->srr_ctio_list
);
4329 INIT_LIST_HEAD(&tgt
->srr_imm_list
);
4330 INIT_WORK(&tgt
->srr_work
, qlt_handle_srr_work
);
4331 atomic_set(&tgt
->tgt_global_resets_count
, 0);
4333 ha
->tgt
.qla_tgt
= tgt
;
4335 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe067,
4336 "qla_target(%d): using 64 Bit PCI addressing",
4338 tgt
->tgt_enable_64bit_addr
= 1;
4340 tgt
->sg_tablesize
= QLA_TGT_MAX_SG_24XX(base_vha
->req
->length
- 3);
4341 tgt
->datasegs_per_cmd
= QLA_TGT_DATASEGS_PER_CMD_24XX
;
4342 tgt
->datasegs_per_cont
= QLA_TGT_DATASEGS_PER_CONT_24XX
;
4344 mutex_lock(&qla_tgt_mutex
);
4345 list_add_tail(&tgt
->tgt_list_entry
, &qla_tgt_glist
);
4346 mutex_unlock(&qla_tgt_mutex
);
4351 /* Must be called under tgt_host_action_mutex */
4352 int qlt_remove_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*vha
)
4354 if (!ha
->tgt
.qla_tgt
)
4357 mutex_lock(&qla_tgt_mutex
);
4358 list_del(&ha
->tgt
.qla_tgt
->tgt_list_entry
);
4359 mutex_unlock(&qla_tgt_mutex
);
4361 ql_dbg(ql_dbg_tgt
, vha
, 0xe03c, "Unregistering target for host %ld(%p)",
4363 qlt_release(ha
->tgt
.qla_tgt
);
4368 static void qlt_lport_dump(struct scsi_qla_host
*vha
, u64 wwpn
,
4373 pr_debug("qla2xxx HW vha->node_name: ");
4374 for (i
= 0; i
< WWN_SIZE
; i
++)
4375 pr_debug("%02x ", vha
->node_name
[i
]);
4377 pr_debug("qla2xxx HW vha->port_name: ");
4378 for (i
= 0; i
< WWN_SIZE
; i
++)
4379 pr_debug("%02x ", vha
->port_name
[i
]);
4382 pr_debug("qla2xxx passed configfs WWPN: ");
4383 put_unaligned_be64(wwpn
, b
);
4384 for (i
= 0; i
< WWN_SIZE
; i
++)
4385 pr_debug("%02x ", b
[i
]);
4390 * qla_tgt_lport_register - register lport with external module
4392 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
4393 * @wwpn: Passwd FC target WWPN
4394 * @callback: lport initialization callback for tcm_qla2xxx code
4395 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
4397 int qlt_lport_register(struct qla_tgt_func_tmpl
*qla_tgt_ops
, u64 wwpn
,
4398 int (*callback
)(struct scsi_qla_host
*), void *target_lport_ptr
)
4400 struct qla_tgt
*tgt
;
4401 struct scsi_qla_host
*vha
;
4402 struct qla_hw_data
*ha
;
4403 struct Scsi_Host
*host
;
4404 unsigned long flags
;
4408 mutex_lock(&qla_tgt_mutex
);
4409 list_for_each_entry(tgt
, &qla_tgt_glist
, tgt_list_entry
) {
4417 if (ha
->tgt
.tgt_ops
!= NULL
)
4420 if (!(host
->hostt
->supported_mode
& MODE_TARGET
))
4423 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4424 if (host
->active_mode
& MODE_TARGET
) {
4425 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4427 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4430 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4432 if (!scsi_host_get(host
)) {
4433 ql_dbg(ql_dbg_tgt
, vha
, 0xe068,
4434 "Unable to scsi_host_get() for"
4435 " qla2xxx scsi_host\n");
4438 qlt_lport_dump(vha
, wwpn
, b
);
4440 if (memcmp(vha
->port_name
, b
, WWN_SIZE
)) {
4441 scsi_host_put(host
);
4445 * Setup passed parameters ahead of invoking callback
4447 ha
->tgt
.tgt_ops
= qla_tgt_ops
;
4448 ha
->tgt
.target_lport_ptr
= target_lport_ptr
;
4449 rc
= (*callback
)(vha
);
4451 ha
->tgt
.tgt_ops
= NULL
;
4452 ha
->tgt
.target_lport_ptr
= NULL
;
4454 mutex_unlock(&qla_tgt_mutex
);
4457 mutex_unlock(&qla_tgt_mutex
);
4461 EXPORT_SYMBOL(qlt_lport_register
);
4464 * qla_tgt_lport_deregister - Degister lport
4466 * @vha: Registered scsi_qla_host pointer
4468 void qlt_lport_deregister(struct scsi_qla_host
*vha
)
4470 struct qla_hw_data
*ha
= vha
->hw
;
4471 struct Scsi_Host
*sh
= vha
->host
;
4473 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
4475 ha
->tgt
.target_lport_ptr
= NULL
;
4476 ha
->tgt
.tgt_ops
= NULL
;
4478 * Release the Scsi_Host reference for the underlying qla2xxx host
4482 EXPORT_SYMBOL(qlt_lport_deregister
);
4484 /* Must be called under HW lock */
4485 void qlt_set_mode(struct scsi_qla_host
*vha
)
4487 struct qla_hw_data
*ha
= vha
->hw
;
4489 switch (ql2x_ini_mode
) {
4490 case QLA2XXX_INI_MODE_DISABLED
:
4491 case QLA2XXX_INI_MODE_EXCLUSIVE
:
4492 vha
->host
->active_mode
= MODE_TARGET
;
4494 case QLA2XXX_INI_MODE_ENABLED
:
4495 vha
->host
->active_mode
|= MODE_TARGET
;
4501 if (ha
->tgt
.ini_mode_force_reverse
)
4502 qla_reverse_ini_mode(vha
);
4505 /* Must be called under HW lock */
4506 void qlt_clear_mode(struct scsi_qla_host
*vha
)
4508 struct qla_hw_data
*ha
= vha
->hw
;
4510 switch (ql2x_ini_mode
) {
4511 case QLA2XXX_INI_MODE_DISABLED
:
4512 vha
->host
->active_mode
= MODE_UNKNOWN
;
4514 case QLA2XXX_INI_MODE_EXCLUSIVE
:
4515 vha
->host
->active_mode
= MODE_INITIATOR
;
4517 case QLA2XXX_INI_MODE_ENABLED
:
4518 vha
->host
->active_mode
&= ~MODE_TARGET
;
4524 if (ha
->tgt
.ini_mode_force_reverse
)
4525 qla_reverse_ini_mode(vha
);
4529 * qla_tgt_enable_vha - NO LOCK HELD
4531 * host_reset, bring up w/ Target Mode Enabled
4534 qlt_enable_vha(struct scsi_qla_host
*vha
)
4536 struct qla_hw_data
*ha
= vha
->hw
;
4537 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
4538 unsigned long flags
;
4541 ql_dbg(ql_dbg_tgt
, vha
, 0xe069,
4542 "Unable to locate qla_tgt pointer from"
4543 " struct qla_hw_data\n");
4548 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4549 tgt
->tgt_stopped
= 0;
4551 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4553 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
4554 qla2xxx_wake_dpc(vha
);
4555 qla2x00_wait_for_hba_online(vha
);
4557 EXPORT_SYMBOL(qlt_enable_vha
);
4560 * qla_tgt_disable_vha - NO LOCK HELD
4562 * Disable Target Mode and reset the adapter
4565 qlt_disable_vha(struct scsi_qla_host
*vha
)
4567 struct qla_hw_data
*ha
= vha
->hw
;
4568 struct qla_tgt
*tgt
= ha
->tgt
.qla_tgt
;
4569 unsigned long flags
;
4572 ql_dbg(ql_dbg_tgt
, vha
, 0xe06a,
4573 "Unable to locate qla_tgt pointer from"
4574 " struct qla_hw_data\n");
4579 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4580 qlt_clear_mode(vha
);
4581 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4583 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
4584 qla2xxx_wake_dpc(vha
);
4585 qla2x00_wait_for_hba_online(vha
);
4589 * Called from qla_init.c:qla24xx_vport_create() contex to setup
4590 * the target mode specific struct scsi_qla_host and struct qla_hw_data
4594 qlt_vport_create(struct scsi_qla_host
*vha
, struct qla_hw_data
*ha
)
4596 if (!qla_tgt_mode_enabled(vha
))
4599 mutex_init(&ha
->tgt
.tgt_mutex
);
4600 mutex_init(&ha
->tgt
.tgt_host_action_mutex
);
4602 qlt_clear_mode(vha
);
4605 * NOTE: Currently the value is kept the same for <24xx and
4606 * >=24xx ISPs. If it is necessary to change it,
4607 * the check should be added for specific ISPs,
4608 * assigning the value appropriately.
4610 ha
->tgt
.atio_q_length
= ATIO_ENTRY_CNT_24XX
;
4614 qlt_rff_id(struct scsi_qla_host
*vha
, struct ct_sns_req
*ct_req
)
4617 * FC-4 Feature bit 0 indicates target functionality to the name server.
4619 if (qla_tgt_mode_enabled(vha
)) {
4620 if (qla_ini_mode_enabled(vha
))
4621 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
| BIT_1
;
4623 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
;
4624 } else if (qla_ini_mode_enabled(vha
)) {
4625 ct_req
->req
.rff_id
.fc4_feature
= BIT_1
;
4630 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
4633 * Beginning of ATIO ring has initialization control block already built
4634 * by nvram config routine.
4636 * Returns 0 on success.
4639 qlt_init_atio_q_entries(struct scsi_qla_host
*vha
)
4641 struct qla_hw_data
*ha
= vha
->hw
;
4643 struct atio_from_isp
*pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring
;
4645 if (!qla_tgt_mode_enabled(vha
))
4648 for (cnt
= 0; cnt
< ha
->tgt
.atio_q_length
; cnt
++) {
4649 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
4656 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
4657 * @ha: SCSI driver HA context
4660 qlt_24xx_process_atio_queue(struct scsi_qla_host
*vha
)
4662 struct qla_hw_data
*ha
= vha
->hw
;
4663 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
4664 struct atio_from_isp
*pkt
;
4667 if (!vha
->flags
.online
)
4670 while (ha
->tgt
.atio_ring_ptr
->signature
!= ATIO_PROCESSED
) {
4671 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
4672 cnt
= pkt
->u
.raw
.entry_count
;
4674 qlt_24xx_atio_pkt_all_vps(vha
, (struct atio_from_isp
*)pkt
);
4676 for (i
= 0; i
< cnt
; i
++) {
4677 ha
->tgt
.atio_ring_index
++;
4678 if (ha
->tgt
.atio_ring_index
== ha
->tgt
.atio_q_length
) {
4679 ha
->tgt
.atio_ring_index
= 0;
4680 ha
->tgt
.atio_ring_ptr
= ha
->tgt
.atio_ring
;
4682 ha
->tgt
.atio_ring_ptr
++;
4684 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
4685 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
4690 /* Adjust ring index */
4691 WRT_REG_DWORD(®
->atio_q_out
, ha
->tgt
.atio_ring_index
);
4695 qlt_24xx_config_rings(struct scsi_qla_host
*vha
, device_reg_t __iomem
*reg
)
4697 struct qla_hw_data
*ha
= vha
->hw
;
4699 /* FIXME: atio_q in/out for ha->mqenable=1..? */
4702 WRT_REG_DWORD(®
->isp25mq
.atio_q_in
, 0);
4703 WRT_REG_DWORD(®
->isp25mq
.atio_q_out
, 0);
4704 RD_REG_DWORD(®
->isp25mq
.atio_q_out
);
4707 /* Setup APTIO registers for target mode */
4708 WRT_REG_DWORD(®
->isp24
.atio_q_in
, 0);
4709 WRT_REG_DWORD(®
->isp24
.atio_q_out
, 0);
4710 RD_REG_DWORD(®
->isp24
.atio_q_out
);
4715 qlt_24xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_24xx
*nv
)
4717 struct qla_hw_data
*ha
= vha
->hw
;
4719 if (qla_tgt_mode_enabled(vha
)) {
4720 if (!ha
->tgt
.saved_set
) {
4721 /* We save only once */
4722 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
4723 ha
->tgt
.saved_firmware_options_1
=
4724 nv
->firmware_options_1
;
4725 ha
->tgt
.saved_firmware_options_2
=
4726 nv
->firmware_options_2
;
4727 ha
->tgt
.saved_firmware_options_3
=
4728 nv
->firmware_options_3
;
4729 ha
->tgt
.saved_set
= 1;
4732 nv
->exchange_count
= __constant_cpu_to_le16(0xFFFF);
4734 /* Enable target mode */
4735 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_4
);
4737 /* Disable ini mode, if requested */
4738 if (!qla_ini_mode_enabled(vha
))
4739 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_5
);
4741 /* Disable Full Login after LIP */
4742 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_13
);
4743 /* Enable initial LIP */
4744 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_9
);
4745 /* Enable FC tapes support */
4746 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_12
);
4747 /* Disable Full Login after LIP */
4748 nv
->host_p
&= __constant_cpu_to_le32(~BIT_10
);
4749 /* Enable target PRLI control */
4750 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_14
);
4752 if (ha
->tgt
.saved_set
) {
4753 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
4754 nv
->firmware_options_1
=
4755 ha
->tgt
.saved_firmware_options_1
;
4756 nv
->firmware_options_2
=
4757 ha
->tgt
.saved_firmware_options_2
;
4758 nv
->firmware_options_3
=
4759 ha
->tgt
.saved_firmware_options_3
;
4764 /* out-of-order frames reassembly */
4765 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
4767 if (ha
->tgt
.enable_class_2
) {
4768 if (vha
->flags
.init_done
)
4769 fc_host_supported_classes(vha
->host
) =
4770 FC_COS_CLASS2
| FC_COS_CLASS3
;
4772 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_8
);
4774 if (vha
->flags
.init_done
)
4775 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
4777 nv
->firmware_options_2
&= ~__constant_cpu_to_le32(BIT_8
);
4782 qlt_24xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
4783 struct init_cb_24xx
*icb
)
4785 struct qla_hw_data
*ha
= vha
->hw
;
4787 if (ha
->tgt
.node_name_set
) {
4788 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
4789 icb
->firmware_options_1
|= __constant_cpu_to_le32(BIT_14
);
4794 qlt_24xx_process_response_error(struct scsi_qla_host
*vha
,
4795 struct sts_entry_24xx
*pkt
)
4797 switch (pkt
->entry_type
) {
4798 case ABTS_RECV_24XX
:
4799 case ABTS_RESP_24XX
:
4801 case NOTIFY_ACK_TYPE
:
4809 qlt_modify_vp_config(struct scsi_qla_host
*vha
,
4810 struct vp_config_entry_24xx
*vpmod
)
4812 if (qla_tgt_mode_enabled(vha
))
4813 vpmod
->options_idx1
&= ~BIT_5
;
4814 /* Disable ini mode, if requested */
4815 if (!qla_ini_mode_enabled(vha
))
4816 vpmod
->options_idx1
&= ~BIT_4
;
4820 qlt_probe_one_stage1(struct scsi_qla_host
*base_vha
, struct qla_hw_data
*ha
)
4822 if (!QLA_TGT_MODE_ENABLED())
4825 mutex_init(&ha
->tgt
.tgt_mutex
);
4826 mutex_init(&ha
->tgt
.tgt_host_action_mutex
);
4827 qlt_clear_mode(base_vha
);
4831 qlt_mem_alloc(struct qla_hw_data
*ha
)
4833 if (!QLA_TGT_MODE_ENABLED())
4836 ha
->tgt
.tgt_vp_map
= kzalloc(sizeof(struct qla_tgt_vp_map
) *
4837 MAX_MULTI_ID_FABRIC
, GFP_KERNEL
);
4838 if (!ha
->tgt
.tgt_vp_map
)
4841 ha
->tgt
.atio_ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
4842 (ha
->tgt
.atio_q_length
+ 1) * sizeof(struct atio_from_isp
),
4843 &ha
->tgt
.atio_dma
, GFP_KERNEL
);
4844 if (!ha
->tgt
.atio_ring
) {
4845 kfree(ha
->tgt
.tgt_vp_map
);
4852 qlt_mem_free(struct qla_hw_data
*ha
)
4854 if (!QLA_TGT_MODE_ENABLED())
4857 if (ha
->tgt
.atio_ring
) {
4858 dma_free_coherent(&ha
->pdev
->dev
, (ha
->tgt
.atio_q_length
+ 1) *
4859 sizeof(struct atio_from_isp
), ha
->tgt
.atio_ring
,
4862 kfree(ha
->tgt
.tgt_vp_map
);
4865 /* vport_slock to be held by the caller */
4867 qlt_update_vp_map(struct scsi_qla_host
*vha
, int cmd
)
4869 if (!QLA_TGT_MODE_ENABLED())
4874 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= vha
;
4877 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= vha
->vp_idx
;
4880 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= NULL
;
4883 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= 0;
4888 static int __init
qlt_parse_ini_mode(void)
4890 if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_EXCLUSIVE
) == 0)
4891 ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
4892 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DISABLED
) == 0)
4893 ql2x_ini_mode
= QLA2XXX_INI_MODE_DISABLED
;
4894 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_ENABLED
) == 0)
4895 ql2x_ini_mode
= QLA2XXX_INI_MODE_ENABLED
;
4902 int __init
qlt_init(void)
4906 if (!qlt_parse_ini_mode()) {
4907 ql_log(ql_log_fatal
, NULL
, 0xe06b,
4908 "qlt_parse_ini_mode() failed\n");
4912 if (!QLA_TGT_MODE_ENABLED())
4915 qla_tgt_cmd_cachep
= kmem_cache_create("qla_tgt_cmd_cachep",
4916 sizeof(struct qla_tgt_cmd
), __alignof__(struct qla_tgt_cmd
), 0,
4918 if (!qla_tgt_cmd_cachep
) {
4919 ql_log(ql_log_fatal
, NULL
, 0xe06c,
4920 "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
4924 qla_tgt_mgmt_cmd_cachep
= kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
4925 sizeof(struct qla_tgt_mgmt_cmd
), __alignof__(struct
4926 qla_tgt_mgmt_cmd
), 0, NULL
);
4927 if (!qla_tgt_mgmt_cmd_cachep
) {
4928 ql_log(ql_log_fatal
, NULL
, 0xe06d,
4929 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
4934 qla_tgt_mgmt_cmd_mempool
= mempool_create(25, mempool_alloc_slab
,
4935 mempool_free_slab
, qla_tgt_mgmt_cmd_cachep
);
4936 if (!qla_tgt_mgmt_cmd_mempool
) {
4937 ql_log(ql_log_fatal
, NULL
, 0xe06e,
4938 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
4940 goto out_mgmt_cmd_cachep
;
4943 qla_tgt_wq
= alloc_workqueue("qla_tgt_wq", 0, 0);
4945 ql_log(ql_log_fatal
, NULL
, 0xe06f,
4946 "alloc_workqueue for qla_tgt_wq failed\n");
4948 goto out_cmd_mempool
;
4951 * Return 1 to signal that initiator-mode is being disabled
4953 return (ql2x_ini_mode
== QLA2XXX_INI_MODE_DISABLED
) ? 1 : 0;
4956 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
4957 out_mgmt_cmd_cachep
:
4958 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
4960 kmem_cache_destroy(qla_tgt_cmd_cachep
);
4966 if (!QLA_TGT_MODE_ENABLED())
4969 destroy_workqueue(qla_tgt_wq
);
4970 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
4971 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
4972 kmem_cache_destroy(qla_tgt_cmd_cachep
);