2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static char *qlini_mode
= QLA2XXX_INI_MODE_STR_ENABLED
;
46 module_param(qlini_mode
, charp
, S_IRUGO
);
47 MODULE_PARM_DESC(qlini_mode
,
48 "Determines when initiator mode will be enabled. Possible values: "
49 "\"exclusive\" - initiator mode will be enabled on load, "
50 "disabled on enabling target mode and then on disabling target mode "
52 "\"disabled\" - initiator mode will never be enabled; "
53 "\"enabled\" (default) - initiator mode will always stay enabled.");
55 int ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
58 * From scsi/fc/fc_fcp.h
60 enum fcp_resp_rsp_codes
{
62 FCP_DATA_LEN_INVALID
= 1,
63 FCP_CMND_FIELDS_INVALID
= 2,
64 FCP_DATA_PARAM_MISMATCH
= 3,
67 FCP_TMF_INVALID_LUN
= 9,
71 * fc_pri_ta from scsi/fc/fc_fcp.h
73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
76 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
77 #define FCP_PTA_MASK 7 /* mask for task attribute field */
78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
83 * must be called under HW lock and could unlock/lock it inside.
84 * It isn't an issue, since in the current implementation on the time when
85 * those functions are called:
87 * - Either context is IRQ and only IRQ handler can modify HW data,
88 * including rings related fields,
90 * - Or access to target mode variables from struct qla_tgt doesn't
91 * cross those functions boundaries, except tgt_stop, which
92 * additionally protected by irq_cmd_count.
94 /* Predefs for callbacks handed to qla2xxx LLD */
95 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*ha
,
96 struct atio_from_isp
*pkt
);
97 static void qlt_response_pkt(struct scsi_qla_host
*ha
, response_t
*pkt
);
98 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
99 int fn
, void *iocb
, int flags
);
100 static void qlt_send_term_exchange(struct scsi_qla_host
*ha
, struct qla_tgt_cmd
101 *cmd
, struct atio_from_isp
*atio
, int ha_locked
);
102 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*ha
,
103 struct qla_tgt_srr_imm
*imm
, int ha_lock
);
107 static struct kmem_cache
*qla_tgt_mgmt_cmd_cachep
;
108 static mempool_t
*qla_tgt_mgmt_cmd_mempool
;
109 static struct workqueue_struct
*qla_tgt_wq
;
110 static DEFINE_MUTEX(qla_tgt_mutex
);
111 static LIST_HEAD(qla_tgt_glist
);
113 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
114 static struct qla_tgt_sess
*qlt_find_sess_by_port_name(
116 const uint8_t *port_name
)
118 struct qla_tgt_sess
*sess
;
120 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
) {
121 if (!memcmp(sess
->port_name
, port_name
, WWN_SIZE
))
128 /* Might release hw lock, then reaquire!! */
129 static inline int qlt_issue_marker(struct scsi_qla_host
*vha
, int vha_locked
)
131 /* Send marker if required */
132 if (unlikely(vha
->marker_needed
!= 0)) {
133 int rc
= qla2x00_issue_marker(vha
, vha_locked
);
134 if (rc
!= QLA_SUCCESS
) {
135 ql_dbg(ql_dbg_tgt
, vha
, 0xe03d,
136 "qla_target(%d): issue_marker() failed\n",
145 struct scsi_qla_host
*qlt_find_host_by_d_id(struct scsi_qla_host
*vha
,
148 struct qla_hw_data
*ha
= vha
->hw
;
151 if ((vha
->d_id
.b
.area
!= d_id
[1]) || (vha
->d_id
.b
.domain
!= d_id
[0]))
154 if (vha
->d_id
.b
.al_pa
== d_id
[2])
157 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
158 vp_idx
= ha
->tgt
.tgt_vp_map
[d_id
[2]].idx
;
159 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
160 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
166 struct scsi_qla_host
*qlt_find_host_by_vp_idx(struct scsi_qla_host
*vha
,
169 struct qla_hw_data
*ha
= vha
->hw
;
171 if (vha
->vp_idx
== vp_idx
)
174 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
175 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
176 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
181 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host
*vha
,
182 struct atio_from_isp
*atio
)
184 ql_dbg(ql_dbg_tgt
, vha
, 0xe072,
185 "%s: qla_target(%d): type %x ox_id %04x\n",
186 __func__
, vha
->vp_idx
, atio
->u
.raw
.entry_type
,
187 be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
));
189 switch (atio
->u
.raw
.entry_type
) {
192 struct scsi_qla_host
*host
= qlt_find_host_by_d_id(vha
,
193 atio
->u
.isp24
.fcp_hdr
.d_id
);
194 if (unlikely(NULL
== host
)) {
195 ql_dbg(ql_dbg_tgt
, vha
, 0xe03e,
196 "qla_target(%d): Received ATIO_TYPE7 "
197 "with unknown d_id %x:%x:%x\n", vha
->vp_idx
,
198 atio
->u
.isp24
.fcp_hdr
.d_id
[0],
199 atio
->u
.isp24
.fcp_hdr
.d_id
[1],
200 atio
->u
.isp24
.fcp_hdr
.d_id
[2]);
203 qlt_24xx_atio_pkt(host
, atio
);
207 case IMMED_NOTIFY_TYPE
:
209 struct scsi_qla_host
*host
= vha
;
210 struct imm_ntfy_from_isp
*entry
=
211 (struct imm_ntfy_from_isp
*)atio
;
213 if ((entry
->u
.isp24
.vp_index
!= 0xFF) &&
214 (entry
->u
.isp24
.nport_handle
!= 0xFFFF)) {
215 host
= qlt_find_host_by_vp_idx(vha
,
216 entry
->u
.isp24
.vp_index
);
217 if (unlikely(!host
)) {
218 ql_dbg(ql_dbg_tgt
, vha
, 0xe03f,
219 "qla_target(%d): Received "
220 "ATIO (IMMED_NOTIFY_TYPE) "
221 "with unknown vp_index %d\n",
222 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
226 qlt_24xx_atio_pkt(host
, atio
);
231 ql_dbg(ql_dbg_tgt
, vha
, 0xe040,
232 "qla_target(%d): Received unknown ATIO atio "
233 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
240 void qlt_response_pkt_all_vps(struct scsi_qla_host
*vha
, response_t
*pkt
)
242 switch (pkt
->entry_type
) {
244 ql_dbg(ql_dbg_tgt
, vha
, 0xe073,
245 "qla_target(%d):%s: CRC2 Response pkt\n",
246 vha
->vp_idx
, __func__
);
249 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
250 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
252 if (unlikely(!host
)) {
253 ql_dbg(ql_dbg_tgt
, vha
, 0xe041,
254 "qla_target(%d): Response pkt (CTIO_TYPE7) "
255 "received, with unknown vp_index %d\n",
256 vha
->vp_idx
, entry
->vp_index
);
259 qlt_response_pkt(host
, pkt
);
263 case IMMED_NOTIFY_TYPE
:
265 struct scsi_qla_host
*host
= vha
;
266 struct imm_ntfy_from_isp
*entry
=
267 (struct imm_ntfy_from_isp
*)pkt
;
269 host
= qlt_find_host_by_vp_idx(vha
, entry
->u
.isp24
.vp_index
);
270 if (unlikely(!host
)) {
271 ql_dbg(ql_dbg_tgt
, vha
, 0xe042,
272 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
273 "received, with unknown vp_index %d\n",
274 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
277 qlt_response_pkt(host
, pkt
);
281 case NOTIFY_ACK_TYPE
:
283 struct scsi_qla_host
*host
= vha
;
284 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
286 if (0xFF != entry
->u
.isp24
.vp_index
) {
287 host
= qlt_find_host_by_vp_idx(vha
,
288 entry
->u
.isp24
.vp_index
);
289 if (unlikely(!host
)) {
290 ql_dbg(ql_dbg_tgt
, vha
, 0xe043,
291 "qla_target(%d): Response "
292 "pkt (NOTIFY_ACK_TYPE) "
293 "received, with unknown "
294 "vp_index %d\n", vha
->vp_idx
,
295 entry
->u
.isp24
.vp_index
);
299 qlt_response_pkt(host
, pkt
);
305 struct abts_recv_from_24xx
*entry
=
306 (struct abts_recv_from_24xx
*)pkt
;
307 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
309 if (unlikely(!host
)) {
310 ql_dbg(ql_dbg_tgt
, vha
, 0xe044,
311 "qla_target(%d): Response pkt "
312 "(ABTS_RECV_24XX) received, with unknown "
313 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
316 qlt_response_pkt(host
, pkt
);
322 struct abts_resp_to_24xx
*entry
=
323 (struct abts_resp_to_24xx
*)pkt
;
324 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
326 if (unlikely(!host
)) {
327 ql_dbg(ql_dbg_tgt
, vha
, 0xe045,
328 "qla_target(%d): Response pkt "
329 "(ABTS_RECV_24XX) received, with unknown "
330 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
333 qlt_response_pkt(host
, pkt
);
338 qlt_response_pkt(vha
, pkt
);
344 static void qlt_free_session_done(struct work_struct
*work
)
346 struct qla_tgt_sess
*sess
= container_of(work
, struct qla_tgt_sess
,
348 struct qla_tgt
*tgt
= sess
->tgt
;
349 struct scsi_qla_host
*vha
= sess
->vha
;
350 struct qla_hw_data
*ha
= vha
->hw
;
354 * Release the target session for FC Nexus from fabric module code.
356 if (sess
->se_sess
!= NULL
)
357 ha
->tgt
.tgt_ops
->free_session(sess
);
359 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf001,
360 "Unregistration of sess %p finished\n", sess
);
364 * We need to protect against race, when tgt is freed before or
368 if (tgt
->sess_count
== 0)
369 wake_up_all(&tgt
->waitQ
);
372 /* ha->hardware_lock supposed to be held on entry */
373 void qlt_unreg_sess(struct qla_tgt_sess
*sess
)
375 struct scsi_qla_host
*vha
= sess
->vha
;
377 vha
->hw
->tgt
.tgt_ops
->clear_nacl_from_fcport_map(sess
);
379 list_del(&sess
->sess_list_entry
);
381 list_del(&sess
->del_list_entry
);
383 INIT_WORK(&sess
->free_work
, qlt_free_session_done
);
384 schedule_work(&sess
->free_work
);
386 EXPORT_SYMBOL(qlt_unreg_sess
);
388 /* ha->hardware_lock supposed to be held on entry */
389 static int qlt_reset(struct scsi_qla_host
*vha
, void *iocb
, int mcmd
)
391 struct qla_hw_data
*ha
= vha
->hw
;
392 struct qla_tgt_sess
*sess
= NULL
;
393 uint32_t unpacked_lun
, lun
= 0;
396 struct imm_ntfy_from_isp
*n
= (struct imm_ntfy_from_isp
*)iocb
;
397 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
399 loop_id
= le16_to_cpu(n
->u
.isp24
.nport_handle
);
400 if (loop_id
== 0xFFFF) {
401 #if 0 /* FIXME: Re-enable Global event handling.. */
403 atomic_inc(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
);
404 qlt_clear_tgt_db(ha
->tgt
.qla_tgt
, 1);
405 if (!list_empty(&ha
->tgt
.qla_tgt
->sess_list
)) {
406 sess
= list_entry(ha
->tgt
.qla_tgt
->sess_list
.next
,
407 typeof(*sess
), sess_list_entry
);
409 case QLA_TGT_NEXUS_LOSS_SESS
:
410 mcmd
= QLA_TGT_NEXUS_LOSS
;
412 case QLA_TGT_ABORT_ALL_SESS
:
413 mcmd
= QLA_TGT_ABORT_ALL
;
415 case QLA_TGT_NEXUS_LOSS
:
416 case QLA_TGT_ABORT_ALL
:
419 ql_dbg(ql_dbg_tgt
, vha
, 0xe046,
420 "qla_target(%d): Not allowed "
421 "command %x in %s", vha
->vp_idx
,
430 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
433 ql_dbg(ql_dbg_tgt
, vha
, 0xe000,
434 "Using sess for qla_tgt_reset: %p\n", sess
);
440 ql_dbg(ql_dbg_tgt
, vha
, 0xe047,
441 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
442 "loop_id %d)\n", vha
->host_no
, sess
, sess
->port_name
,
445 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
446 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
448 return qlt_issue_task_mgmt(sess
, unpacked_lun
, mcmd
,
449 iocb
, QLA24XX_MGMT_SEND_NACK
);
452 /* ha->hardware_lock supposed to be held on entry */
453 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess
*sess
,
456 struct qla_tgt
*tgt
= sess
->tgt
;
457 uint32_t dev_loss_tmo
= tgt
->ha
->port_down_retry_count
+ 5;
462 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe001,
463 "Scheduling sess %p for deletion\n", sess
);
464 list_add_tail(&sess
->del_list_entry
, &tgt
->del_sess_list
);
470 sess
->expires
= jiffies
+ dev_loss_tmo
* HZ
;
472 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe048,
473 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
474 "deletion in %u secs (expires: %lu) immed: %d\n",
475 sess
->vha
->vp_idx
, sess
->port_name
, sess
->loop_id
, dev_loss_tmo
,
476 sess
->expires
, immediate
);
479 schedule_delayed_work(&tgt
->sess_del_work
, 0);
481 schedule_delayed_work(&tgt
->sess_del_work
,
482 sess
->expires
- jiffies
);
485 /* ha->hardware_lock supposed to be held on entry */
486 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
, bool local_only
)
488 struct qla_tgt_sess
*sess
;
490 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
)
491 qlt_schedule_sess_for_deletion(sess
, true);
493 /* At this point tgt could be already dead */
496 static int qla24xx_get_loop_id(struct scsi_qla_host
*vha
, const uint8_t *s_id
,
499 struct qla_hw_data
*ha
= vha
->hw
;
500 dma_addr_t gid_list_dma
;
501 struct gid_list_info
*gid_list
;
506 gid_list
= dma_alloc_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
507 &gid_list_dma
, GFP_KERNEL
);
509 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf044,
510 "qla_target(%d): DMA Alloc failed of %u\n",
511 vha
->vp_idx
, qla2x00_gid_list_size(ha
));
515 /* Get list of logged in devices */
516 rc
= qla2x00_get_id_list(vha
, gid_list
, gid_list_dma
, &entries
);
517 if (rc
!= QLA_SUCCESS
) {
518 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf045,
519 "qla_target(%d): get_id_list() failed: %x\n",
522 goto out_free_id_list
;
525 id_iter
= (char *)gid_list
;
527 for (i
= 0; i
< entries
; i
++) {
528 struct gid_list_info
*gid
= (struct gid_list_info
*)id_iter
;
529 if ((gid
->al_pa
== s_id
[2]) &&
530 (gid
->area
== s_id
[1]) &&
531 (gid
->domain
== s_id
[0])) {
532 *loop_id
= le16_to_cpu(gid
->loop_id
);
536 id_iter
+= ha
->gid_list_info_size
;
540 dma_free_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
541 gid_list
, gid_list_dma
);
545 /* ha->hardware_lock supposed to be held on entry */
546 static void qlt_undelete_sess(struct qla_tgt_sess
*sess
)
548 BUG_ON(!sess
->deleted
);
550 list_del(&sess
->del_list_entry
);
554 static void qlt_del_sess_work_fn(struct delayed_work
*work
)
556 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
,
558 struct scsi_qla_host
*vha
= tgt
->vha
;
559 struct qla_hw_data
*ha
= vha
->hw
;
560 struct qla_tgt_sess
*sess
;
561 unsigned long flags
, elapsed
;
563 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
564 while (!list_empty(&tgt
->del_sess_list
)) {
565 sess
= list_entry(tgt
->del_sess_list
.next
, typeof(*sess
),
568 if (time_after_eq(elapsed
, sess
->expires
)) {
569 qlt_undelete_sess(sess
);
571 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf004,
572 "Timeout: sess %p about to be deleted\n",
574 ha
->tgt
.tgt_ops
->shutdown_sess(sess
);
575 ha
->tgt
.tgt_ops
->put_sess(sess
);
577 schedule_delayed_work(&tgt
->sess_del_work
,
578 sess
->expires
- elapsed
);
582 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
586 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
587 * Caller must put it.
589 static struct qla_tgt_sess
*qlt_create_sess(
590 struct scsi_qla_host
*vha
,
594 struct qla_hw_data
*ha
= vha
->hw
;
595 struct qla_tgt_sess
*sess
;
597 unsigned char be_sid
[3];
599 /* Check to avoid double sessions */
600 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
601 list_for_each_entry(sess
, &vha
->vha_tgt
.qla_tgt
->sess_list
,
603 if (!memcmp(sess
->port_name
, fcport
->port_name
, WWN_SIZE
)) {
604 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf005,
605 "Double sess %p found (s_id %x:%x:%x, "
606 "loop_id %d), updating to d_id %x:%x:%x, "
607 "loop_id %d", sess
, sess
->s_id
.b
.domain
,
608 sess
->s_id
.b
.al_pa
, sess
->s_id
.b
.area
,
609 sess
->loop_id
, fcport
->d_id
.b
.domain
,
610 fcport
->d_id
.b
.al_pa
, fcport
->d_id
.b
.area
,
614 qlt_undelete_sess(sess
);
616 kref_get(&sess
->se_sess
->sess_kref
);
617 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
618 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
620 if (sess
->local
&& !local
)
622 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
627 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
629 sess
= kzalloc(sizeof(*sess
), GFP_KERNEL
);
631 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04a,
632 "qla_target(%u): session allocation failed, all commands "
633 "from port %8phC will be refused", vha
->vp_idx
,
638 sess
->tgt
= vha
->vha_tgt
.qla_tgt
;
640 sess
->s_id
= fcport
->d_id
;
641 sess
->loop_id
= fcport
->loop_id
;
644 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf006,
645 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
646 sess
, vha
->vha_tgt
.qla_tgt
);
648 be_sid
[0] = sess
->s_id
.b
.domain
;
649 be_sid
[1] = sess
->s_id
.b
.area
;
650 be_sid
[2] = sess
->s_id
.b
.al_pa
;
652 * Determine if this fc_port->port_name is allowed to access
653 * target mode using explict NodeACLs+MappedLUNs, or using
654 * TPG demo mode. If this is successful a target mode FC nexus
657 if (ha
->tgt
.tgt_ops
->check_initiator_node_acl(vha
,
658 &fcport
->port_name
[0], sess
, &be_sid
[0], fcport
->loop_id
) < 0) {
663 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
664 * access across ->hardware_lock reaquire.
666 kref_get(&sess
->se_sess
->sess_kref
);
668 sess
->conf_compl_supported
= (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
);
669 BUILD_BUG_ON(sizeof(sess
->port_name
) != sizeof(fcport
->port_name
));
670 memcpy(sess
->port_name
, fcport
->port_name
, sizeof(sess
->port_name
));
672 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
673 list_add_tail(&sess
->sess_list_entry
, &vha
->vha_tgt
.qla_tgt
->sess_list
);
674 vha
->vha_tgt
.qla_tgt
->sess_count
++;
675 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
677 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04b,
678 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
679 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
680 vha
->vp_idx
, local
? "local " : "", fcport
->port_name
,
681 fcport
->loop_id
, sess
->s_id
.b
.domain
, sess
->s_id
.b
.area
,
682 sess
->s_id
.b
.al_pa
, sess
->conf_compl_supported
? "" : "not ");
688 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
690 void qlt_fc_port_added(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
692 struct qla_hw_data
*ha
= vha
->hw
;
693 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
694 struct qla_tgt_sess
*sess
;
697 if (!vha
->hw
->tgt
.tgt_ops
)
700 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
703 if (qla_ini_mode_enabled(vha
))
706 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
708 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
711 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
713 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
715 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
716 sess
= qlt_create_sess(vha
, fcport
, false);
717 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
719 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
721 kref_get(&sess
->se_sess
->sess_kref
);
724 qlt_undelete_sess(sess
);
726 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04c,
727 "qla_target(%u): %ssession for port %8phC "
728 "(loop ID %d) reappeared\n", vha
->vp_idx
,
729 sess
->local
? "local " : "", sess
->port_name
,
732 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf007,
733 "Reappeared sess %p\n", sess
);
735 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
736 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
739 if (sess
&& sess
->local
) {
740 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04d,
741 "qla_target(%u): local session for "
742 "port %8phC (loop ID %d) became global\n", vha
->vp_idx
,
743 fcport
->port_name
, sess
->loop_id
);
746 ha
->tgt
.tgt_ops
->put_sess(sess
);
747 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
750 void qlt_fc_port_deleted(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
752 struct qla_hw_data
*ha
= vha
->hw
;
753 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
754 struct qla_tgt_sess
*sess
;
757 if (!vha
->hw
->tgt
.tgt_ops
)
760 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
763 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
765 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
768 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
770 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
774 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf008, "qla_tgt_fc_port_deleted %p", sess
);
777 qlt_schedule_sess_for_deletion(sess
, false);
778 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
781 static inline int test_tgt_sess_count(struct qla_tgt
*tgt
)
783 struct qla_hw_data
*ha
= tgt
->ha
;
787 * We need to protect against race, when tgt is freed before or
790 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
791 ql_dbg(ql_dbg_tgt
, tgt
->vha
, 0xe002,
792 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
793 tgt
, list_empty(&tgt
->sess_list
), tgt
->sess_count
);
794 res
= (tgt
->sess_count
== 0);
795 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
800 /* Called by tcm_qla2xxx configfs code */
801 int qlt_stop_phase1(struct qla_tgt
*tgt
)
803 struct scsi_qla_host
*vha
= tgt
->vha
;
804 struct qla_hw_data
*ha
= tgt
->ha
;
807 mutex_lock(&qla_tgt_mutex
);
808 if (!vha
->fc_vport
) {
809 struct Scsi_Host
*sh
= vha
->host
;
810 struct fc_host_attrs
*fc_host
= shost_to_fc_host(sh
);
813 spin_lock_irqsave(sh
->host_lock
, flags
);
814 npiv_vports
= (fc_host
->npiv_vports_inuse
);
815 spin_unlock_irqrestore(sh
->host_lock
, flags
);
818 mutex_unlock(&qla_tgt_mutex
);
822 if (tgt
->tgt_stop
|| tgt
->tgt_stopped
) {
823 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04e,
824 "Already in tgt->tgt_stop or tgt_stopped state\n");
825 mutex_unlock(&qla_tgt_mutex
);
829 ql_dbg(ql_dbg_tgt
, vha
, 0xe003, "Stopping target for host %ld(%p)\n",
832 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
833 * Lock is needed, because we still can get an incoming packet.
835 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
836 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
838 qlt_clear_tgt_db(tgt
, true);
839 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
840 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
841 mutex_unlock(&qla_tgt_mutex
);
843 flush_delayed_work(&tgt
->sess_del_work
);
845 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf009,
846 "Waiting for sess works (tgt %p)", tgt
);
847 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
848 while (!list_empty(&tgt
->sess_works_list
)) {
849 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
850 flush_scheduled_work();
851 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
853 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
855 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00a,
856 "Waiting for tgt %p: list_empty(sess_list)=%d "
857 "sess_count=%d\n", tgt
, list_empty(&tgt
->sess_list
),
860 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
863 if (!ha
->flags
.host_shutting_down
&& qla_tgt_mode_enabled(vha
))
864 qlt_disable_vha(vha
);
866 /* Wait for sessions to clear out (just in case) */
867 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
870 EXPORT_SYMBOL(qlt_stop_phase1
);
872 /* Called by tcm_qla2xxx configfs code */
873 void qlt_stop_phase2(struct qla_tgt
*tgt
)
875 struct qla_hw_data
*ha
= tgt
->ha
;
876 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
879 if (tgt
->tgt_stopped
) {
880 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04f,
881 "Already in tgt->tgt_stopped state\n");
886 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00b,
887 "Waiting for %d IRQ commands to complete (tgt %p)",
888 tgt
->irq_cmd_count
, tgt
);
890 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
891 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
892 while (tgt
->irq_cmd_count
!= 0) {
893 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
895 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
898 tgt
->tgt_stopped
= 1;
899 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
900 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
902 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00c, "Stop of tgt %p finished",
905 EXPORT_SYMBOL(qlt_stop_phase2
);
907 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
908 static void qlt_release(struct qla_tgt
*tgt
)
910 scsi_qla_host_t
*vha
= tgt
->vha
;
912 if ((vha
->vha_tgt
.qla_tgt
!= NULL
) && !tgt
->tgt_stopped
)
913 qlt_stop_phase2(tgt
);
915 vha
->vha_tgt
.qla_tgt
= NULL
;
917 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00d,
918 "Release of tgt %p finished\n", tgt
);
923 /* ha->hardware_lock supposed to be held on entry */
924 static int qlt_sched_sess_work(struct qla_tgt
*tgt
, int type
,
925 const void *param
, unsigned int param_size
)
927 struct qla_tgt_sess_work_param
*prm
;
930 prm
= kzalloc(sizeof(*prm
), GFP_ATOMIC
);
932 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf050,
933 "qla_target(%d): Unable to create session "
934 "work, command will be refused", 0);
938 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00e,
939 "Scheduling work (type %d, prm %p)"
940 " to find session for param %p (size %d, tgt %p)\n",
941 type
, prm
, param
, param_size
, tgt
);
944 memcpy(&prm
->tm_iocb
, param
, param_size
);
946 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
947 list_add_tail(&prm
->sess_works_list_entry
, &tgt
->sess_works_list
);
948 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
950 schedule_work(&tgt
->sess_work
);
956 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
958 static void qlt_send_notify_ack(struct scsi_qla_host
*vha
,
959 struct imm_ntfy_from_isp
*ntfy
,
960 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
961 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
)
963 struct qla_hw_data
*ha
= vha
->hw
;
965 struct nack_to_isp
*nack
;
967 ql_dbg(ql_dbg_tgt
, vha
, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha
);
969 /* Send marker if required */
970 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
973 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
975 ql_dbg(ql_dbg_tgt
, vha
, 0xe049,
976 "qla_target(%d): %s failed: unable to allocate "
977 "request packet\n", vha
->vp_idx
, __func__
);
981 if (vha
->vha_tgt
.qla_tgt
!= NULL
)
982 vha
->vha_tgt
.qla_tgt
->notify_ack_expected
++;
984 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
985 pkt
->entry_count
= 1;
987 nack
= (struct nack_to_isp
*)pkt
;
988 nack
->ox_id
= ntfy
->ox_id
;
990 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
991 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
992 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
993 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
995 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
996 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
997 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
998 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
999 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
1000 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
1001 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
1002 nack
->u
.isp24
.srr_flags
= cpu_to_le16(srr_flags
);
1003 nack
->u
.isp24
.srr_reject_code
= srr_reject_code
;
1004 nack
->u
.isp24
.srr_reject_code_expl
= srr_explan
;
1005 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
1007 ql_dbg(ql_dbg_tgt
, vha
, 0xe005,
1008 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1009 vha
->vp_idx
, nack
->u
.isp24
.status
);
1011 qla2x00_start_iocbs(vha
, vha
->req
);
1015 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1017 static void qlt_24xx_send_abts_resp(struct scsi_qla_host
*vha
,
1018 struct abts_recv_from_24xx
*abts
, uint32_t status
,
1021 struct qla_hw_data
*ha
= vha
->hw
;
1022 struct abts_resp_to_24xx
*resp
;
1026 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1027 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1030 /* Send marker if required */
1031 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1034 resp
= (struct abts_resp_to_24xx
*)qla2x00_alloc_iocbs(vha
, NULL
);
1036 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1037 "qla_target(%d): %s failed: unable to allocate "
1038 "request packet", vha
->vp_idx
, __func__
);
1042 resp
->entry_type
= ABTS_RESP_24XX
;
1043 resp
->entry_count
= 1;
1044 resp
->nport_handle
= abts
->nport_handle
;
1045 resp
->vp_index
= vha
->vp_idx
;
1046 resp
->sof_type
= abts
->sof_type
;
1047 resp
->exchange_address
= abts
->exchange_address
;
1048 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1049 f_ctl
= __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1050 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1051 F_CTL_SEQ_INITIATIVE
);
1052 p
= (uint8_t *)&f_ctl
;
1053 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1054 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1055 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1057 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1058 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1059 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1060 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1061 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1062 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1064 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1065 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1066 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1067 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1068 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1069 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1071 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1072 if (status
== FCP_TMF_CMPL
) {
1073 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1074 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1075 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1076 resp
->payload
.ba_acct
.high_seq_cnt
= 0xFFFF;
1077 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1078 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1080 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1081 resp
->payload
.ba_rjt
.reason_code
=
1082 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1083 /* Other bytes are zero */
1086 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
++;
1088 qla2x00_start_iocbs(vha
, vha
->req
);
1092 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1094 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host
*vha
,
1095 struct abts_resp_from_24xx_fw
*entry
)
1097 struct ctio7_to_24xx
*ctio
;
1099 ql_dbg(ql_dbg_tgt
, vha
, 0xe007,
1100 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha
->hw
);
1101 /* Send marker if required */
1102 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1105 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(vha
, NULL
);
1107 ql_dbg(ql_dbg_tgt
, vha
, 0xe04b,
1108 "qla_target(%d): %s failed: unable to allocate "
1109 "request packet\n", vha
->vp_idx
, __func__
);
1114 * We've got on entrance firmware's response on by us generated
1115 * ABTS response. So, in it ID fields are reversed.
1118 ctio
->entry_type
= CTIO_TYPE7
;
1119 ctio
->entry_count
= 1;
1120 ctio
->nport_handle
= entry
->nport_handle
;
1121 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1122 ctio
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1123 ctio
->vp_index
= vha
->vp_idx
;
1124 ctio
->initiator_id
[0] = entry
->fcp_hdr_le
.d_id
[0];
1125 ctio
->initiator_id
[1] = entry
->fcp_hdr_le
.d_id
[1];
1126 ctio
->initiator_id
[2] = entry
->fcp_hdr_le
.d_id
[2];
1127 ctio
->exchange_addr
= entry
->exchange_addr_to_abort
;
1128 ctio
->u
.status1
.flags
=
1129 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1130 CTIO7_FLAGS_TERMINATE
);
1131 ctio
->u
.status1
.ox_id
= entry
->fcp_hdr_le
.ox_id
;
1133 qla2x00_start_iocbs(vha
, vha
->req
);
1135 qlt_24xx_send_abts_resp(vha
, (struct abts_recv_from_24xx
*)entry
,
1136 FCP_TMF_CMPL
, true);
1139 /* ha->hardware_lock supposed to be held on entry */
1140 static int __qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1141 struct abts_recv_from_24xx
*abts
, struct qla_tgt_sess
*sess
)
1143 struct qla_hw_data
*ha
= vha
->hw
;
1144 struct se_session
*se_sess
= sess
->se_sess
;
1145 struct qla_tgt_mgmt_cmd
*mcmd
;
1146 struct se_cmd
*se_cmd
;
1149 bool found_lun
= false;
1151 spin_lock(&se_sess
->sess_cmd_lock
);
1152 list_for_each_entry(se_cmd
, &se_sess
->sess_cmd_list
, se_cmd_list
) {
1153 struct qla_tgt_cmd
*cmd
=
1154 container_of(se_cmd
, struct qla_tgt_cmd
, se_cmd
);
1155 if (cmd
->tag
== abts
->exchange_addr_to_abort
) {
1156 lun
= cmd
->unpacked_lun
;
1161 spin_unlock(&se_sess
->sess_cmd_lock
);
1166 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00f,
1167 "qla_target(%d): task abort (tag=%d)\n",
1168 vha
->vp_idx
, abts
->exchange_addr_to_abort
);
1170 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
1172 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf051,
1173 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1174 vha
->vp_idx
, __func__
);
1177 memset(mcmd
, 0, sizeof(*mcmd
));
1180 memcpy(&mcmd
->orig_iocb
.abts
, abts
, sizeof(mcmd
->orig_iocb
.abts
));
1182 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, TMR_ABORT_TASK
,
1183 abts
->exchange_addr_to_abort
);
1185 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf052,
1186 "qla_target(%d): tgt_ops->handle_tmr()"
1187 " failed: %d", vha
->vp_idx
, rc
);
1188 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1196 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1198 static void qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1199 struct abts_recv_from_24xx
*abts
)
1201 struct qla_hw_data
*ha
= vha
->hw
;
1202 struct qla_tgt_sess
*sess
;
1203 uint32_t tag
= abts
->exchange_addr_to_abort
;
1207 if (le32_to_cpu(abts
->fcp_hdr_le
.parameter
) & ABTS_PARAM_ABORT_SEQ
) {
1208 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf053,
1209 "qla_target(%d): ABTS: Abort Sequence not "
1210 "supported\n", vha
->vp_idx
);
1211 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1215 if (tag
== ATIO_EXCHANGE_ADDRESS_UNKNOWN
) {
1216 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf010,
1217 "qla_target(%d): ABTS: Unknown Exchange "
1218 "Address received\n", vha
->vp_idx
);
1219 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1223 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf011,
1224 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1225 "tag=%d, param=%x)\n", vha
->vp_idx
, abts
->fcp_hdr_le
.s_id
[2],
1226 abts
->fcp_hdr_le
.s_id
[1], abts
->fcp_hdr_le
.s_id
[0], tag
,
1227 le32_to_cpu(abts
->fcp_hdr_le
.parameter
));
1229 s_id
[0] = abts
->fcp_hdr_le
.s_id
[2];
1230 s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1231 s_id
[2] = abts
->fcp_hdr_le
.s_id
[0];
1233 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
1235 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf012,
1236 "qla_target(%d): task abort for non-existant session\n",
1238 rc
= qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
1239 QLA_TGT_SESS_WORK_ABORT
, abts
, sizeof(*abts
));
1241 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
,
1247 rc
= __qlt_24xx_handle_abts(vha
, abts
, sess
);
1249 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf054,
1250 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1252 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1258 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1260 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host
*ha
,
1261 struct qla_tgt_mgmt_cmd
*mcmd
, uint32_t resp_code
)
1263 struct atio_from_isp
*atio
= &mcmd
->orig_iocb
.atio
;
1264 struct ctio7_to_24xx
*ctio
;
1266 ql_dbg(ql_dbg_tgt
, ha
, 0xe008,
1267 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1268 ha
, atio
, resp_code
);
1270 /* Send marker if required */
1271 if (qlt_issue_marker(ha
, 1) != QLA_SUCCESS
)
1274 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(ha
, NULL
);
1276 ql_dbg(ql_dbg_tgt
, ha
, 0xe04c,
1277 "qla_target(%d): %s failed: unable to allocate "
1278 "request packet\n", ha
->vp_idx
, __func__
);
1282 ctio
->entry_type
= CTIO_TYPE7
;
1283 ctio
->entry_count
= 1;
1284 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1285 ctio
->nport_handle
= mcmd
->sess
->loop_id
;
1286 ctio
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1287 ctio
->vp_index
= ha
->vp_idx
;
1288 ctio
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1289 ctio
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1290 ctio
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1291 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1292 ctio
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
1293 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1294 CTIO7_FLAGS_SEND_STATUS
);
1295 ctio
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1296 ctio
->u
.status1
.scsi_status
=
1297 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
);
1298 ctio
->u
.status1
.response_len
= __constant_cpu_to_le16(8);
1299 ctio
->u
.status1
.sense_data
[0] = resp_code
;
1301 qla2x00_start_iocbs(ha
, ha
->req
);
1304 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd
*mcmd
)
1306 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1308 EXPORT_SYMBOL(qlt_free_mcmd
);
1310 /* callback from target fabric module code */
1311 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd
*mcmd
)
1313 struct scsi_qla_host
*vha
= mcmd
->sess
->vha
;
1314 struct qla_hw_data
*ha
= vha
->hw
;
1315 unsigned long flags
;
1317 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf013,
1318 "TM response mcmd (%p) status %#x state %#x",
1319 mcmd
, mcmd
->fc_tm_rsp
, mcmd
->flags
);
1321 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1322 if (mcmd
->flags
== QLA24XX_MGMT_SEND_NACK
)
1323 qlt_send_notify_ack(vha
, &mcmd
->orig_iocb
.imm_ntfy
,
1326 if (mcmd
->se_cmd
.se_tmr_req
->function
== TMR_ABORT_TASK
)
1327 qlt_24xx_send_abts_resp(vha
, &mcmd
->orig_iocb
.abts
,
1328 mcmd
->fc_tm_rsp
, false);
1330 qlt_24xx_send_task_mgmt_ctio(vha
, mcmd
,
1334 * Make the callback for ->free_mcmd() to queue_work() and invoke
1335 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1336 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1337 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1338 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1339 * qlt_xmit_tm_rsp() returns here..
1341 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
1342 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1344 EXPORT_SYMBOL(qlt_xmit_tm_rsp
);
1347 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm
*prm
)
1349 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
1351 BUG_ON(cmd
->sg_cnt
== 0);
1353 prm
->sg
= (struct scatterlist
*)cmd
->sg
;
1354 prm
->seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
, cmd
->sg
,
1355 cmd
->sg_cnt
, cmd
->dma_data_direction
);
1356 if (unlikely(prm
->seg_cnt
== 0))
1359 prm
->cmd
->sg_mapped
= 1;
1361 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
) {
1363 * If greater than four sg entries then we need to allocate
1364 * the continuation entries
1366 if (prm
->seg_cnt
> prm
->tgt
->datasegs_per_cmd
)
1367 prm
->req_cnt
+= DIV_ROUND_UP(prm
->seg_cnt
-
1368 prm
->tgt
->datasegs_per_cmd
,
1369 prm
->tgt
->datasegs_per_cont
);
1372 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
1373 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
1374 prm
->seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
, cmd
->blk_sz
);
1375 prm
->tot_dsds
= prm
->seg_cnt
;
1377 prm
->tot_dsds
= prm
->seg_cnt
;
1379 if (cmd
->prot_sg_cnt
) {
1380 prm
->prot_sg
= cmd
->prot_sg
;
1381 prm
->prot_seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
,
1382 cmd
->prot_sg
, cmd
->prot_sg_cnt
,
1383 cmd
->dma_data_direction
);
1384 if (unlikely(prm
->prot_seg_cnt
== 0))
1387 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
1388 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
1389 /* Dif Bundling not support here */
1390 prm
->prot_seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
,
1392 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
1394 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
1398 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1399 prm
->seg_cnt
, prm
->req_cnt
);
1403 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe04d,
1404 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1405 0, prm
->cmd
->sg_cnt
);
1409 static inline void qlt_unmap_sg(struct scsi_qla_host
*vha
,
1410 struct qla_tgt_cmd
*cmd
)
1412 struct qla_hw_data
*ha
= vha
->hw
;
1414 BUG_ON(!cmd
->sg_mapped
);
1415 pci_unmap_sg(ha
->pdev
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
1418 if (cmd
->prot_sg_cnt
)
1419 pci_unmap_sg(ha
->pdev
, cmd
->prot_sg
, cmd
->prot_sg_cnt
,
1420 cmd
->dma_data_direction
);
1422 if (cmd
->ctx_dsd_alloced
)
1423 qla2x00_clean_dsd_pool(ha
, NULL
, cmd
);
1426 dma_pool_free(ha
->dl_dma_pool
, cmd
->ctx
, cmd
->ctx
->crc_ctx_dma
);
1429 static int qlt_check_reserve_free_req(struct scsi_qla_host
*vha
,
1432 struct qla_hw_data
*ha
= vha
->hw
;
1433 device_reg_t __iomem
*reg
= ha
->iobase
;
1436 if (vha
->req
->cnt
< (req_cnt
+ 2)) {
1437 cnt
= (uint16_t)RD_REG_DWORD(®
->isp24
.req_q_out
);
1439 ql_dbg(ql_dbg_tgt
, vha
, 0xe00a,
1440 "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1441 "vha->req->cnt=%d, req_cnt=%d\n", cnt
,
1442 vha
->req
->ring_index
, vha
->req
->cnt
, req_cnt
);
1443 if (vha
->req
->ring_index
< cnt
)
1444 vha
->req
->cnt
= cnt
- vha
->req
->ring_index
;
1446 vha
->req
->cnt
= vha
->req
->length
-
1447 (vha
->req
->ring_index
- cnt
);
1450 if (unlikely(vha
->req
->cnt
< (req_cnt
+ 2))) {
1451 ql_dbg(ql_dbg_tgt
, vha
, 0xe00b,
1452 "qla_target(%d): There is no room in the "
1453 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
1454 "req_cnt=%d\n", vha
->vp_idx
, vha
->req
->ring_index
,
1455 vha
->req
->cnt
, req_cnt
);
1458 vha
->req
->cnt
-= req_cnt
;
1464 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1466 static inline void *qlt_get_req_pkt(struct scsi_qla_host
*vha
)
1468 /* Adjust ring index. */
1469 vha
->req
->ring_index
++;
1470 if (vha
->req
->ring_index
== vha
->req
->length
) {
1471 vha
->req
->ring_index
= 0;
1472 vha
->req
->ring_ptr
= vha
->req
->ring
;
1474 vha
->req
->ring_ptr
++;
1476 return (cont_entry_t
*)vha
->req
->ring_ptr
;
1479 /* ha->hardware_lock supposed to be held on entry */
1480 static inline uint32_t qlt_make_handle(struct scsi_qla_host
*vha
)
1482 struct qla_hw_data
*ha
= vha
->hw
;
1485 h
= ha
->tgt
.current_handle
;
1486 /* always increment cmd handle */
1489 if (h
> DEFAULT_OUTSTANDING_COMMANDS
)
1490 h
= 1; /* 0 is QLA_TGT_NULL_HANDLE */
1491 if (h
== ha
->tgt
.current_handle
) {
1492 ql_dbg(ql_dbg_tgt
, vha
, 0xe04e,
1493 "qla_target(%d): Ran out of "
1494 "empty cmd slots in ha %p\n", vha
->vp_idx
, ha
);
1495 h
= QLA_TGT_NULL_HANDLE
;
1498 } while ((h
== QLA_TGT_NULL_HANDLE
) ||
1499 (h
== QLA_TGT_SKIP_HANDLE
) ||
1500 (ha
->tgt
.cmds
[h
-1] != NULL
));
1502 if (h
!= QLA_TGT_NULL_HANDLE
)
1503 ha
->tgt
.current_handle
= h
;
1508 /* ha->hardware_lock supposed to be held on entry */
1509 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm
*prm
,
1510 struct scsi_qla_host
*vha
)
1513 struct ctio7_to_24xx
*pkt
;
1514 struct qla_hw_data
*ha
= vha
->hw
;
1515 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
1517 pkt
= (struct ctio7_to_24xx
*)vha
->req
->ring_ptr
;
1519 memset(pkt
, 0, sizeof(*pkt
));
1521 pkt
->entry_type
= CTIO_TYPE7
;
1522 pkt
->entry_count
= (uint8_t)prm
->req_cnt
;
1523 pkt
->vp_index
= vha
->vp_idx
;
1525 h
= qlt_make_handle(vha
);
1526 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
1528 * CTIO type 7 from the firmware doesn't provide a way to
1529 * know the initiator's LOOP ID, hence we can't find
1530 * the session and, so, the command.
1534 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
1536 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
1537 pkt
->nport_handle
= prm
->cmd
->loop_id
;
1538 pkt
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1539 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1540 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1541 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1542 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1543 pkt
->u
.status0
.flags
|= (atio
->u
.isp24
.attr
<< 9);
1544 pkt
->u
.status0
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1545 pkt
->u
.status0
.relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
1547 ql_dbg(ql_dbg_tgt
, vha
, 0xe00c,
1548 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1549 vha
->vp_idx
, pkt
->handle
, QLA_TGT_TIMEOUT
,
1550 le16_to_cpu(pkt
->u
.status0
.ox_id
));
1555 * ha->hardware_lock supposed to be held on entry. We have already made sure
1556 * that there is sufficient amount of request entries to not drop it.
1558 static void qlt_load_cont_data_segments(struct qla_tgt_prm
*prm
,
1559 struct scsi_qla_host
*vha
)
1562 uint32_t *dword_ptr
;
1563 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1565 /* Build continuation packets */
1566 while (prm
->seg_cnt
> 0) {
1567 cont_a64_entry_t
*cont_pkt64
=
1568 (cont_a64_entry_t
*)qlt_get_req_pkt(vha
);
1571 * Make sure that from cont_pkt64 none of
1572 * 64-bit specific fields used for 32-bit
1573 * addressing. Cast to (cont_entry_t *) for
1577 memset(cont_pkt64
, 0, sizeof(*cont_pkt64
));
1579 cont_pkt64
->entry_count
= 1;
1580 cont_pkt64
->sys_define
= 0;
1582 if (enable_64bit_addressing
) {
1583 cont_pkt64
->entry_type
= CONTINUE_A64_TYPE
;
1585 (uint32_t *)&cont_pkt64
->dseg_0_address
;
1587 cont_pkt64
->entry_type
= CONTINUE_TYPE
;
1589 (uint32_t *)&((cont_entry_t
*)
1590 cont_pkt64
)->dseg_0_address
;
1593 /* Load continuation entry data segments */
1595 cnt
< prm
->tgt
->datasegs_per_cont
&& prm
->seg_cnt
;
1596 cnt
++, prm
->seg_cnt
--) {
1598 cpu_to_le32(pci_dma_lo32
1599 (sg_dma_address(prm
->sg
)));
1600 if (enable_64bit_addressing
) {
1602 cpu_to_le32(pci_dma_hi32
1606 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1608 ql_dbg(ql_dbg_tgt
, vha
, 0xe00d,
1609 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1610 (long long unsigned int)
1611 pci_dma_hi32(sg_dma_address(prm
->sg
)),
1612 (long long unsigned int)
1613 pci_dma_lo32(sg_dma_address(prm
->sg
)),
1614 (int)sg_dma_len(prm
->sg
));
1616 prm
->sg
= sg_next(prm
->sg
);
1622 * ha->hardware_lock supposed to be held on entry. We have already made sure
1623 * that there is sufficient amount of request entries to not drop it.
1625 static void qlt_load_data_segments(struct qla_tgt_prm
*prm
,
1626 struct scsi_qla_host
*vha
)
1629 uint32_t *dword_ptr
;
1630 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1631 struct ctio7_to_24xx
*pkt24
= (struct ctio7_to_24xx
*)prm
->pkt
;
1633 ql_dbg(ql_dbg_tgt
, vha
, 0xe00e,
1634 "iocb->scsi_status=%x, iocb->flags=%x\n",
1635 le16_to_cpu(pkt24
->u
.status0
.scsi_status
),
1636 le16_to_cpu(pkt24
->u
.status0
.flags
));
1638 pkt24
->u
.status0
.transfer_length
= cpu_to_le32(prm
->cmd
->bufflen
);
1640 /* Setup packet address segment pointer */
1641 dword_ptr
= pkt24
->u
.status0
.dseg_0_address
;
1643 /* Set total data segment count */
1645 pkt24
->dseg_count
= cpu_to_le16(prm
->seg_cnt
);
1647 if (prm
->seg_cnt
== 0) {
1648 /* No data transfer */
1654 /* If scatter gather */
1655 ql_dbg(ql_dbg_tgt
, vha
, 0xe00f, "%s", "Building S/G data segments...");
1657 /* Load command entry data segments */
1659 (cnt
< prm
->tgt
->datasegs_per_cmd
) && prm
->seg_cnt
;
1660 cnt
++, prm
->seg_cnt
--) {
1662 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm
->sg
)));
1663 if (enable_64bit_addressing
) {
1665 cpu_to_le32(pci_dma_hi32(
1666 sg_dma_address(prm
->sg
)));
1668 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1670 ql_dbg(ql_dbg_tgt
, vha
, 0xe010,
1671 "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1672 (long long unsigned int)pci_dma_hi32(sg_dma_address(
1674 (long long unsigned int)pci_dma_lo32(sg_dma_address(
1676 (int)sg_dma_len(prm
->sg
));
1678 prm
->sg
= sg_next(prm
->sg
);
1681 qlt_load_cont_data_segments(prm
, vha
);
1684 static inline int qlt_has_data(struct qla_tgt_cmd
*cmd
)
1686 return cmd
->bufflen
> 0;
1690 * Called without ha->hardware_lock held
1692 static int qlt_pre_xmit_response(struct qla_tgt_cmd
*cmd
,
1693 struct qla_tgt_prm
*prm
, int xmit_type
, uint8_t scsi_status
,
1694 uint32_t *full_req_cnt
)
1696 struct qla_tgt
*tgt
= cmd
->tgt
;
1697 struct scsi_qla_host
*vha
= tgt
->vha
;
1698 struct qla_hw_data
*ha
= vha
->hw
;
1699 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1701 if (unlikely(cmd
->aborted
)) {
1702 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf014,
1703 "qla_target(%d): terminating exchange "
1704 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha
->vp_idx
, cmd
,
1707 cmd
->state
= QLA_TGT_STATE_ABORTED
;
1709 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 0);
1711 /* !! At this point cmd could be already freed !! */
1712 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
;
1715 ql_dbg(ql_dbg_tgt
, vha
, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
1716 vha
->vp_idx
, cmd
->tag
,
1717 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
1721 prm
->rq_result
= scsi_status
;
1722 prm
->sense_buffer
= &cmd
->sense_buffer
[0];
1723 prm
->sense_buffer_len
= TRANSPORT_SENSE_BUFFER
;
1727 prm
->add_status_pkt
= 0;
1729 ql_dbg(ql_dbg_tgt
, vha
, 0xe012, "rq_result=%x, xmit_type=%x\n",
1730 prm
->rq_result
, xmit_type
);
1732 /* Send marker if required */
1733 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
1736 ql_dbg(ql_dbg_tgt
, vha
, 0xe013, "CTIO start: vha(%d)\n", vha
->vp_idx
);
1738 if ((xmit_type
& QLA_TGT_XMIT_DATA
) && qlt_has_data(cmd
)) {
1739 if (qlt_pci_map_calc_cnt(prm
) != 0)
1743 *full_req_cnt
= prm
->req_cnt
;
1745 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
1746 prm
->residual
= se_cmd
->residual_count
;
1747 ql_dbg(ql_dbg_tgt
, vha
, 0xe014,
1748 "Residual underflow: %d (tag %d, "
1749 "op %x, bufflen %d, rq_result %x)\n", prm
->residual
,
1750 cmd
->tag
, se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1751 cmd
->bufflen
, prm
->rq_result
);
1752 prm
->rq_result
|= SS_RESIDUAL_UNDER
;
1753 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
1754 prm
->residual
= se_cmd
->residual_count
;
1755 ql_dbg(ql_dbg_tgt
, vha
, 0xe015,
1756 "Residual overflow: %d (tag %d, "
1757 "op %x, bufflen %d, rq_result %x)\n", prm
->residual
,
1758 cmd
->tag
, se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1759 cmd
->bufflen
, prm
->rq_result
);
1760 prm
->rq_result
|= SS_RESIDUAL_OVER
;
1763 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
1765 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1766 * ignored in *xmit_response() below
1768 if (qlt_has_data(cmd
)) {
1769 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
) ||
1770 (IS_FWI2_CAPABLE(ha
) &&
1771 (prm
->rq_result
!= 0))) {
1772 prm
->add_status_pkt
= 1;
1778 ql_dbg(ql_dbg_tgt
, vha
, 0xe016,
1779 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1780 prm
->req_cnt
, *full_req_cnt
, prm
->add_status_pkt
);
1785 static inline int qlt_need_explicit_conf(struct qla_hw_data
*ha
,
1786 struct qla_tgt_cmd
*cmd
, int sending_sense
)
1788 if (ha
->tgt
.enable_class_2
)
1792 return cmd
->conf_compl_supported
;
1794 return ha
->tgt
.enable_explicit_conf
&&
1795 cmd
->conf_compl_supported
;
1798 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1800 * Original taken from the XFS code
1802 static unsigned long qlt_srr_random(void)
1805 static unsigned long RandomValue
;
1806 static DEFINE_SPINLOCK(lock
);
1807 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1811 unsigned long flags
;
1813 spin_lock_irqsave(&lock
, flags
);
1815 RandomValue
= jiffies
;
1821 rv
= 16807 * lo
- 2836 * hi
;
1825 spin_unlock_irqrestore(&lock
, flags
);
1829 static void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1831 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1832 if ((*xmit_type
& QLA_TGT_XMIT_STATUS
) && (qlt_srr_random() % 200)
1834 *xmit_type
&= ~QLA_TGT_XMIT_STATUS
;
1835 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf015,
1836 "Dropping cmd %p (tag %d) status", cmd
, cmd
->tag
);
1840 * It's currently not possible to simulate SRRs for FCP_WRITE without
1841 * a physical link layer failure, so don't even try here..
1843 if (cmd
->dma_data_direction
!= DMA_FROM_DEVICE
)
1846 if (qlt_has_data(cmd
) && (cmd
->sg_cnt
> 1) &&
1847 ((qlt_srr_random() % 100) == 20)) {
1849 unsigned int tot_len
= 0;
1852 leave
= qlt_srr_random() % cmd
->sg_cnt
;
1854 for (i
= 0; i
< leave
; i
++)
1855 tot_len
+= cmd
->sg
[i
].length
;
1857 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf016,
1858 "Cutting cmd %p (tag %d) buffer"
1859 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1860 " cmd->sg_cnt %d)", cmd
, cmd
->tag
, tot_len
, leave
,
1861 cmd
->bufflen
, cmd
->sg_cnt
);
1863 cmd
->bufflen
= tot_len
;
1864 cmd
->sg_cnt
= leave
;
1867 if (qlt_has_data(cmd
) && ((qlt_srr_random() % 100) == 70)) {
1868 unsigned int offset
= qlt_srr_random() % cmd
->bufflen
;
1870 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf017,
1871 "Cutting cmd %p (tag %d) buffer head "
1872 "to offset %d (cmd->bufflen %d)", cmd
, cmd
->tag
, offset
,
1875 *xmit_type
&= ~QLA_TGT_XMIT_DATA
;
1876 else if (qlt_set_data_offset(cmd
, offset
)) {
1877 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf018,
1878 "qlt_set_data_offset() failed (tag %d)", cmd
->tag
);
1883 static inline void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1887 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx
*ctio
,
1888 struct qla_tgt_prm
*prm
)
1890 prm
->sense_buffer_len
= min_t(uint32_t, prm
->sense_buffer_len
,
1891 (uint32_t)sizeof(ctio
->u
.status1
.sense_data
));
1892 ctio
->u
.status0
.flags
|=
1893 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS
);
1894 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 0)) {
1895 ctio
->u
.status0
.flags
|= __constant_cpu_to_le16(
1896 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1897 CTIO7_FLAGS_CONFORM_REQ
);
1899 ctio
->u
.status0
.residual
= cpu_to_le32(prm
->residual
);
1900 ctio
->u
.status0
.scsi_status
= cpu_to_le16(prm
->rq_result
);
1901 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
)) {
1904 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 1)) {
1905 if (prm
->cmd
->se_cmd
.scsi_status
!= 0) {
1906 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe017,
1907 "Skipping EXPLICIT_CONFORM and "
1908 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1909 "non GOOD status\n");
1910 goto skip_explict_conf
;
1912 ctio
->u
.status1
.flags
|= __constant_cpu_to_le16(
1913 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1914 CTIO7_FLAGS_CONFORM_REQ
);
1917 ctio
->u
.status1
.flags
&=
1918 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
1919 ctio
->u
.status1
.flags
|=
1920 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
1921 ctio
->u
.status1
.scsi_status
|=
1922 __constant_cpu_to_le16(SS_SENSE_LEN_VALID
);
1923 ctio
->u
.status1
.sense_length
=
1924 cpu_to_le16(prm
->sense_buffer_len
);
1925 for (i
= 0; i
< prm
->sense_buffer_len
/4; i
++)
1926 ((uint32_t *)ctio
->u
.status1
.sense_data
)[i
] =
1927 cpu_to_be32(((uint32_t *)prm
->sense_buffer
)[i
]);
1929 if (unlikely((prm
->sense_buffer_len
% 4) != 0)) {
1932 ql_dbg(ql_dbg_tgt
, vha
, 0xe04f,
1933 "qla_target(%d): %d bytes of sense "
1934 "lost", prm
->tgt
->ha
->vp_idx
,
1935 prm
->sense_buffer_len
% 4);
1941 ctio
->u
.status1
.flags
&=
1942 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
1943 ctio
->u
.status1
.flags
|=
1944 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
1945 ctio
->u
.status1
.sense_length
= 0;
1946 memset(ctio
->u
.status1
.sense_data
, 0,
1947 sizeof(ctio
->u
.status1
.sense_data
));
1950 /* Sense with len > 24, is it possible ??? */
1957 qlt_hba_err_chk_enabled(struct se_cmd
*se_cmd
)
1960 * Uncomment when corresponding SCSI changes are done.
1962 if (!sp->cmd->prot_chk)
1966 switch (se_cmd
->prot_op
) {
1967 case TARGET_PROT_DOUT_INSERT
:
1968 case TARGET_PROT_DIN_STRIP
:
1969 if (ql2xenablehba_err_chk
>= 1)
1972 case TARGET_PROT_DOUT_PASS
:
1973 case TARGET_PROT_DIN_PASS
:
1974 if (ql2xenablehba_err_chk
>= 2)
1977 case TARGET_PROT_DIN_INSERT
:
1978 case TARGET_PROT_DOUT_STRIP
:
1987 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
1991 qlt_set_t10dif_tags(struct se_cmd
*se_cmd
, struct crc_context
*ctx
)
1993 uint32_t lba
= 0xffffffff & se_cmd
->t_task_lba
;
1995 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
1996 * have been immplemented by TCM, before AppTag is avail.
1997 * Look for modesense_handlers[]
1999 ctx
->app_tag
= __constant_cpu_to_le16(0);
2000 ctx
->app_tag_mask
[0] = 0x0;
2001 ctx
->app_tag_mask
[1] = 0x0;
2003 switch (se_cmd
->prot_type
) {
2004 case TARGET_DIF_TYPE0_PROT
:
2006 * No check for ql2xenablehba_err_chk, as it would be an
2007 * I/O error if hba tag generation is not done.
2009 ctx
->ref_tag
= cpu_to_le32(lba
);
2011 if (!qlt_hba_err_chk_enabled(se_cmd
))
2014 /* enable ALL bytes of the ref tag */
2015 ctx
->ref_tag_mask
[0] = 0xff;
2016 ctx
->ref_tag_mask
[1] = 0xff;
2017 ctx
->ref_tag_mask
[2] = 0xff;
2018 ctx
->ref_tag_mask
[3] = 0xff;
2021 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2024 case TARGET_DIF_TYPE1_PROT
:
2025 ctx
->ref_tag
= cpu_to_le32(lba
);
2027 if (!qlt_hba_err_chk_enabled(se_cmd
))
2030 /* enable ALL bytes of the ref tag */
2031 ctx
->ref_tag_mask
[0] = 0xff;
2032 ctx
->ref_tag_mask
[1] = 0xff;
2033 ctx
->ref_tag_mask
[2] = 0xff;
2034 ctx
->ref_tag_mask
[3] = 0xff;
2037 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2038 * match LBA in CDB + N
2040 case TARGET_DIF_TYPE2_PROT
:
2041 ctx
->ref_tag
= cpu_to_le32(lba
);
2043 if (!qlt_hba_err_chk_enabled(se_cmd
))
2046 /* enable ALL bytes of the ref tag */
2047 ctx
->ref_tag_mask
[0] = 0xff;
2048 ctx
->ref_tag_mask
[1] = 0xff;
2049 ctx
->ref_tag_mask
[2] = 0xff;
2050 ctx
->ref_tag_mask
[3] = 0xff;
2053 /* For Type 3 protection: 16 bit GUARD only */
2054 case TARGET_DIF_TYPE3_PROT
:
2055 ctx
->ref_tag_mask
[0] = ctx
->ref_tag_mask
[1] =
2056 ctx
->ref_tag_mask
[2] = ctx
->ref_tag_mask
[3] = 0x00;
2063 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm
*prm
, scsi_qla_host_t
*vha
)
2067 uint32_t transfer_length
= 0;
2068 uint32_t data_bytes
;
2070 uint8_t bundling
= 1;
2072 struct crc_context
*crc_ctx_pkt
= NULL
;
2073 struct qla_hw_data
*ha
;
2074 struct ctio_crc2_to_fw
*pkt
;
2075 dma_addr_t crc_ctx_dma
;
2076 uint16_t fw_prot_opts
= 0;
2077 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
2078 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2080 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
2085 pkt
= (struct ctio_crc2_to_fw
*)vha
->req
->ring_ptr
;
2087 memset(pkt
, 0, sizeof(*pkt
));
2089 ql_dbg(ql_dbg_tgt
, vha
, 0xe071,
2090 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2091 vha
->vp_idx
, __func__
, se_cmd
, se_cmd
->prot_op
,
2092 prm
->prot_sg
, prm
->prot_seg_cnt
, se_cmd
->t_task_lba
);
2094 if ((se_cmd
->prot_op
== TARGET_PROT_DIN_INSERT
) ||
2095 (se_cmd
->prot_op
== TARGET_PROT_DOUT_STRIP
))
2098 /* Compute dif len and adjust data len to incude protection */
2099 data_bytes
= cmd
->bufflen
;
2100 dif_bytes
= (data_bytes
/ cmd
->blk_sz
) * 8;
2102 switch (se_cmd
->prot_op
) {
2103 case TARGET_PROT_DIN_INSERT
:
2104 case TARGET_PROT_DOUT_STRIP
:
2105 transfer_length
= data_bytes
;
2106 data_bytes
+= dif_bytes
;
2109 case TARGET_PROT_DIN_STRIP
:
2110 case TARGET_PROT_DOUT_INSERT
:
2111 case TARGET_PROT_DIN_PASS
:
2112 case TARGET_PROT_DOUT_PASS
:
2113 transfer_length
= data_bytes
+ dif_bytes
;
2121 if (!qlt_hba_err_chk_enabled(se_cmd
))
2122 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
2123 /* HBA error checking enabled */
2124 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
2125 if ((se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
) ||
2126 (se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
))
2127 fw_prot_opts
|= PO_DIS_VALD_APP_ESC
;
2128 else if (se_cmd
->prot_type
== TARGET_DIF_TYPE3_PROT
)
2129 fw_prot_opts
|= PO_DIS_VALD_APP_REF_ESC
;
2132 switch (se_cmd
->prot_op
) {
2133 case TARGET_PROT_DIN_INSERT
:
2134 case TARGET_PROT_DOUT_INSERT
:
2135 fw_prot_opts
|= PO_MODE_DIF_INSERT
;
2137 case TARGET_PROT_DIN_STRIP
:
2138 case TARGET_PROT_DOUT_STRIP
:
2139 fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
2141 case TARGET_PROT_DIN_PASS
:
2142 case TARGET_PROT_DOUT_PASS
:
2143 fw_prot_opts
|= PO_MODE_DIF_PASS
;
2144 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2146 default:/* Normal Request */
2147 fw_prot_opts
|= PO_MODE_DIF_PASS
;
2153 /* Update entry type to indicate Command Type CRC_2 IOCB */
2154 pkt
->entry_type
= CTIO_CRC2
;
2155 pkt
->entry_count
= 1;
2156 pkt
->vp_index
= vha
->vp_idx
;
2158 h
= qlt_make_handle(vha
);
2159 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
2161 * CTIO type 7 from the firmware doesn't provide a way to
2162 * know the initiator's LOOP ID, hence we can't find
2163 * the session and, so, the command.
2167 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
2170 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
2171 pkt
->nport_handle
= prm
->cmd
->loop_id
;
2172 pkt
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
2173 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2174 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2175 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2176 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2177 pkt
->ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2178 pkt
->flags
|= (atio
->u
.isp24
.attr
<< 9);
2179 pkt
->relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
2181 /* Set transfer direction */
2182 if (cmd
->dma_data_direction
== DMA_TO_DEVICE
)
2183 pkt
->flags
= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN
);
2184 else if (cmd
->dma_data_direction
== DMA_FROM_DEVICE
)
2185 pkt
->flags
= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT
);
2188 pkt
->dseg_count
= prm
->tot_dsds
;
2189 /* Fibre channel byte count */
2190 pkt
->transfer_length
= cpu_to_le32(transfer_length
);
2193 /* ----- CRC context -------- */
2195 /* Allocate CRC context from global pool */
2196 crc_ctx_pkt
= cmd
->ctx
=
2197 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
2200 goto crc_queuing_error
;
2202 /* Zero out CTX area. */
2203 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
2204 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
2206 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
2207 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
2210 crc_ctx_pkt
->handle
= pkt
->handle
;
2212 qlt_set_t10dif_tags(se_cmd
, crc_ctx_pkt
);
2214 pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
2215 pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
2216 pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
2220 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
2223 * Configure Bundling if we need to fetch interlaving
2224 * protection PCI accesses
2226 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
2227 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
2228 crc_ctx_pkt
->u
.bundling
.dseg_count
=
2229 cpu_to_le16(prm
->tot_dsds
- prm
->prot_seg_cnt
);
2230 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
2233 /* Finish the common fields of CRC pkt */
2234 crc_ctx_pkt
->blk_size
= cpu_to_le16(cmd
->blk_sz
);
2235 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
2236 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
2237 crc_ctx_pkt
->guard_seed
= __constant_cpu_to_le16(0);
2240 /* Walks data segments */
2241 pkt
->flags
|= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR
);
2243 if (!bundling
&& prm
->prot_seg_cnt
) {
2244 if (qla24xx_walk_and_build_sglist_no_difb(ha
, NULL
, cur_dsd
,
2245 prm
->tot_dsds
, cmd
))
2246 goto crc_queuing_error
;
2247 } else if (qla24xx_walk_and_build_sglist(ha
, NULL
, cur_dsd
,
2248 (prm
->tot_dsds
- prm
->prot_seg_cnt
), cmd
))
2249 goto crc_queuing_error
;
2251 if (bundling
&& prm
->prot_seg_cnt
) {
2252 /* Walks dif segments */
2254 __constant_cpu_to_le16(CTIO_CRC2_AF_DIF_DSD_ENA
);
2256 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
2257 if (qla24xx_walk_and_build_prot_sglist(ha
, NULL
, cur_dsd
,
2258 prm
->prot_seg_cnt
, cmd
))
2259 goto crc_queuing_error
;
2264 /* Cleanup will be performed by the caller */
2266 return QLA_FUNCTION_FAILED
;
2271 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2272 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2274 int qlt_xmit_response(struct qla_tgt_cmd
*cmd
, int xmit_type
,
2275 uint8_t scsi_status
)
2277 struct scsi_qla_host
*vha
= cmd
->vha
;
2278 struct qla_hw_data
*ha
= vha
->hw
;
2279 struct ctio7_to_24xx
*pkt
;
2280 struct qla_tgt_prm prm
;
2281 uint32_t full_req_cnt
= 0;
2282 unsigned long flags
= 0;
2285 memset(&prm
, 0, sizeof(prm
));
2286 qlt_check_srr_debug(cmd
, &xmit_type
);
2288 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe018,
2289 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2290 (xmit_type
& QLA_TGT_XMIT_STATUS
) ?
2291 1 : 0, cmd
->bufflen
, cmd
->sg_cnt
, cmd
->dma_data_direction
,
2294 res
= qlt_pre_xmit_response(cmd
, &prm
, xmit_type
, scsi_status
,
2296 if (unlikely(res
!= 0)) {
2297 if (res
== QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
)
2303 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2305 /* Does F/W have an IOCBs for this request */
2306 res
= qlt_check_reserve_free_req(vha
, full_req_cnt
);
2308 goto out_unmap_unlock
;
2310 if (cmd
->se_cmd
.prot_op
&& (xmit_type
& QLA_TGT_XMIT_DATA
))
2311 res
= qlt_build_ctio_crc2_pkt(&prm
, vha
);
2313 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2314 if (unlikely(res
!= 0))
2315 goto out_unmap_unlock
;
2318 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2320 if (qlt_has_data(cmd
) && (xmit_type
& QLA_TGT_XMIT_DATA
)) {
2321 pkt
->u
.status0
.flags
|=
2322 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN
|
2323 CTIO7_FLAGS_STATUS_MODE_0
);
2325 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
2326 qlt_load_data_segments(&prm
, vha
);
2328 if (prm
.add_status_pkt
== 0) {
2329 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
2330 pkt
->u
.status0
.scsi_status
=
2331 cpu_to_le16(prm
.rq_result
);
2332 pkt
->u
.status0
.residual
=
2333 cpu_to_le32(prm
.residual
);
2334 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(
2335 CTIO7_FLAGS_SEND_STATUS
);
2336 if (qlt_need_explicit_conf(ha
, cmd
, 0)) {
2337 pkt
->u
.status0
.flags
|=
2338 __constant_cpu_to_le16(
2339 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2340 CTIO7_FLAGS_CONFORM_REQ
);
2346 * We have already made sure that there is sufficient
2347 * amount of request entries to not drop HW lock in
2350 struct ctio7_to_24xx
*ctio
=
2351 (struct ctio7_to_24xx
*)qlt_get_req_pkt(vha
);
2353 ql_dbg(ql_dbg_tgt
, vha
, 0xe019,
2354 "Building additional status packet\n");
2357 * T10Dif: ctio_crc2_to_fw overlay ontop of
2360 memcpy(ctio
, pkt
, sizeof(*ctio
));
2361 /* reset back to CTIO7 */
2362 ctio
->entry_count
= 1;
2363 ctio
->entry_type
= CTIO_TYPE7
;
2364 ctio
->dseg_count
= 0;
2365 ctio
->u
.status1
.flags
&= ~__constant_cpu_to_le16(
2366 CTIO7_FLAGS_DATA_IN
);
2368 /* Real finish is ctio_m1's finish */
2369 pkt
->handle
|= CTIO_INTERMEDIATE_HANDLE_MARK
;
2370 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(
2371 CTIO7_FLAGS_DONT_RET_CTIO
);
2373 /* qlt_24xx_init_ctio_to_isp will correct
2374 * all neccessary fields that's part of CTIO7.
2375 * There should be no residual of CTIO-CRC2 data.
2377 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx
*)ctio
,
2379 pr_debug("Status CTIO7: %p\n", ctio
);
2382 qlt_24xx_init_ctio_to_isp(pkt
, &prm
);
2385 cmd
->state
= QLA_TGT_STATE_PROCESSED
; /* Mid-level is done processing */
2387 ql_dbg(ql_dbg_tgt
, vha
, 0xe01a,
2388 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
2391 qla2x00_start_iocbs(vha
, vha
->req
);
2392 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2398 qlt_unmap_sg(vha
, cmd
);
2399 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2403 EXPORT_SYMBOL(qlt_xmit_response
);
2405 int qlt_rdy_to_xfer(struct qla_tgt_cmd
*cmd
)
2407 struct ctio7_to_24xx
*pkt
;
2408 struct scsi_qla_host
*vha
= cmd
->vha
;
2409 struct qla_hw_data
*ha
= vha
->hw
;
2410 struct qla_tgt
*tgt
= cmd
->tgt
;
2411 struct qla_tgt_prm prm
;
2412 unsigned long flags
;
2415 memset(&prm
, 0, sizeof(prm
));
2421 /* Send marker if required */
2422 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
2425 ql_dbg(ql_dbg_tgt
, vha
, 0xe01b,
2426 "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
2427 __func__
, (int)vha
->vp_idx
, &cmd
->se_cmd
,
2428 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
2430 /* Calculate number of entries and segments required */
2431 if (qlt_pci_map_calc_cnt(&prm
) != 0)
2434 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2436 /* Does F/W have an IOCBs for this request */
2437 res
= qlt_check_reserve_free_req(vha
, prm
.req_cnt
);
2439 goto out_unlock_free_unmap
;
2440 if (cmd
->se_cmd
.prot_op
)
2441 res
= qlt_build_ctio_crc2_pkt(&prm
, vha
);
2443 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2445 if (unlikely(res
!= 0))
2446 goto out_unlock_free_unmap
;
2447 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2448 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT
|
2449 CTIO7_FLAGS_STATUS_MODE_0
);
2451 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
2452 qlt_load_data_segments(&prm
, vha
);
2454 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
2456 qla2x00_start_iocbs(vha
, vha
->req
);
2457 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2461 out_unlock_free_unmap
:
2463 qlt_unmap_sg(vha
, cmd
);
2464 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2468 EXPORT_SYMBOL(qlt_rdy_to_xfer
);
2472 * Checks the guard or meta-data for the type of error
2473 * detected by the HBA.
2476 qlt_handle_dif_error(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
,
2477 struct ctio_crc_from_fw
*sts
)
2479 uint8_t *ap
= &sts
->actual_dif
[0];
2480 uint8_t *ep
= &sts
->expected_dif
[0];
2481 uint32_t e_ref_tag
, a_ref_tag
;
2482 uint16_t e_app_tag
, a_app_tag
;
2483 uint16_t e_guard
, a_guard
;
2484 uint64_t lba
= cmd
->se_cmd
.t_task_lba
;
2486 a_guard
= be16_to_cpu(*(uint16_t *)(ap
+ 0));
2487 a_app_tag
= be16_to_cpu(*(uint16_t *)(ap
+ 2));
2488 a_ref_tag
= be32_to_cpu(*(uint32_t *)(ap
+ 4));
2490 e_guard
= be16_to_cpu(*(uint16_t *)(ep
+ 0));
2491 e_app_tag
= be16_to_cpu(*(uint16_t *)(ep
+ 2));
2492 e_ref_tag
= be32_to_cpu(*(uint32_t *)(ep
+ 4));
2494 ql_dbg(ql_dbg_tgt
, vha
, 0xe075,
2495 "iocb(s) %p Returned STATUS.\n", sts
);
2497 ql_dbg(ql_dbg_tgt
, vha
, 0xf075,
2498 "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2499 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2500 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
, a_guard
, e_guard
);
2504 * For type 3: ref & app tag is all 'f's
2505 * For type 0,1,2: app tag is all 'f's
2507 if ((a_app_tag
== 0xffff) &&
2508 ((cmd
->se_cmd
.prot_type
!= TARGET_DIF_TYPE3_PROT
) ||
2509 (a_ref_tag
== 0xffffffff))) {
2510 uint32_t blocks_done
;
2512 /* 2TB boundary case covered automatically with this */
2513 blocks_done
= e_ref_tag
- (uint32_t)lba
+ 1;
2514 cmd
->se_cmd
.bad_sector
= e_ref_tag
;
2515 cmd
->se_cmd
.pi_err
= 0;
2516 ql_dbg(ql_dbg_tgt
, vha
, 0xf074,
2517 "need to return scsi good\n");
2519 /* Update protection tag */
2520 if (cmd
->prot_sg_cnt
) {
2521 uint32_t i
, j
= 0, k
= 0, num_ent
;
2522 struct scatterlist
*sg
, *sgl
;
2527 /* Patch the corresponding protection tags */
2528 for_each_sg(sgl
, sg
, cmd
->prot_sg_cnt
, i
) {
2529 num_ent
= sg_dma_len(sg
) / 8;
2530 if (k
+ num_ent
< blocks_done
) {
2534 j
= blocks_done
- k
- 1;
2539 if (k
!= blocks_done
) {
2540 ql_log(ql_log_warn
, vha
, 0xf076,
2541 "unexpected tag values tag:lba=%u:%llu)\n",
2542 e_ref_tag
, (unsigned long long)lba
);
2547 struct sd_dif_tuple
*spt
;
2549 * This section came from initiator. Is it valid here?
2550 * should ulp be override with actual val???
2552 spt
= page_address(sg_page(sg
)) + sg
->offset
;
2555 spt
->app_tag
= 0xffff;
2556 if (cmd
->se_cmd
.prot_type
== SCSI_PROT_DIF_TYPE3
)
2557 spt
->ref_tag
= 0xffffffff;
2565 if (e_guard
!= a_guard
) {
2566 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
2567 cmd
->se_cmd
.bad_sector
= cmd
->se_cmd
.t_task_lba
;
2569 ql_log(ql_log_warn
, vha
, 0xe076,
2570 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2571 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2572 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2573 a_guard
, e_guard
, cmd
);
2578 if (e_ref_tag
!= a_ref_tag
) {
2579 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
2580 cmd
->se_cmd
.bad_sector
= e_ref_tag
;
2582 ql_log(ql_log_warn
, vha
, 0xe077,
2583 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2584 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2585 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2586 a_guard
, e_guard
, cmd
);
2590 /* check appl tag */
2591 if (e_app_tag
!= a_app_tag
) {
2592 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
2593 cmd
->se_cmd
.bad_sector
= cmd
->se_cmd
.t_task_lba
;
2595 ql_log(ql_log_warn
, vha
, 0xe078,
2596 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2597 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2598 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2599 a_guard
, e_guard
, cmd
);
2607 /* If hardware_lock held on entry, might drop it, then reaquire */
2608 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2609 static int __qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2610 struct qla_tgt_cmd
*cmd
,
2611 struct atio_from_isp
*atio
)
2613 struct ctio7_to_24xx
*ctio24
;
2614 struct qla_hw_data
*ha
= vha
->hw
;
2618 ql_dbg(ql_dbg_tgt
, vha
, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha
);
2620 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
2622 ql_dbg(ql_dbg_tgt
, vha
, 0xe050,
2623 "qla_target(%d): %s failed: unable to allocate "
2624 "request packet\n", vha
->vp_idx
, __func__
);
2629 if (cmd
->state
< QLA_TGT_STATE_PROCESSED
) {
2630 ql_dbg(ql_dbg_tgt
, vha
, 0xe051,
2631 "qla_target(%d): Terminating cmd %p with "
2632 "incorrect state %d\n", vha
->vp_idx
, cmd
,
2638 pkt
->entry_count
= 1;
2639 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
2641 ctio24
= (struct ctio7_to_24xx
*)pkt
;
2642 ctio24
->entry_type
= CTIO_TYPE7
;
2643 ctio24
->nport_handle
= cmd
? cmd
->loop_id
: CTIO7_NHANDLE_UNRECOGNIZED
;
2644 ctio24
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
2645 ctio24
->vp_index
= vha
->vp_idx
;
2646 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2647 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2648 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2649 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2650 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
2651 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
2652 CTIO7_FLAGS_TERMINATE
);
2653 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2655 /* Most likely, it isn't needed */
2656 ctio24
->u
.status1
.residual
= get_unaligned((uint32_t *)
2657 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
2658 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]);
2659 if (ctio24
->u
.status1
.residual
!= 0)
2660 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
2662 qla2x00_start_iocbs(vha
, vha
->req
);
2666 static void qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2667 struct qla_tgt_cmd
*cmd
, struct atio_from_isp
*atio
, int ha_locked
)
2669 unsigned long flags
;
2672 if (qlt_issue_marker(vha
, ha_locked
) < 0)
2676 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2679 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
2680 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2681 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
2684 * Terminate exchange will tell fw to release any active CTIO
2685 * that's in FW posession and cleanup the exchange.
2687 * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
2688 * down at FW. Free the cmd later when CTIO comes back later
2689 * w/aborted(0x2) status.
2691 * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
2692 * back w/some err. Free the cmd now.
2694 if ((rc
== 1) && (cmd
->state
!= QLA_TGT_STATE_ABORTED
)) {
2695 if (!ha_locked
&& !in_interrupt())
2696 msleep(250); /* just in case */
2699 qlt_unmap_sg(vha
, cmd
);
2700 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
2705 void qlt_free_cmd(struct qla_tgt_cmd
*cmd
)
2707 struct qla_tgt_sess
*sess
= cmd
->sess
;
2709 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe074,
2710 "%s: se_cmd[%p] ox_id %04x\n",
2711 __func__
, &cmd
->se_cmd
,
2712 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
2714 BUG_ON(cmd
->sg_mapped
);
2715 if (unlikely(cmd
->free_sg
))
2718 if (!sess
|| !sess
->se_sess
) {
2722 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, cmd
->se_cmd
.map_tag
);
2724 EXPORT_SYMBOL(qlt_free_cmd
);
2726 /* ha->hardware_lock supposed to be held on entry */
2727 static int qlt_prepare_srr_ctio(struct scsi_qla_host
*vha
,
2728 struct qla_tgt_cmd
*cmd
, void *ctio
)
2730 struct qla_tgt_srr_ctio
*sc
;
2731 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
2732 struct qla_tgt_srr_imm
*imm
;
2736 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf019,
2737 "qla_target(%d): CTIO with SRR status received\n", vha
->vp_idx
);
2740 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf055,
2741 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2746 sc
= kzalloc(sizeof(*sc
), GFP_ATOMIC
);
2749 /* IRQ is already OFF */
2750 spin_lock(&tgt
->srr_lock
);
2751 sc
->srr_id
= tgt
->ctio_srr_id
;
2752 list_add_tail(&sc
->srr_list_entry
,
2753 &tgt
->srr_ctio_list
);
2754 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01a,
2755 "CTIO SRR %p added (id %d)\n", sc
, sc
->srr_id
);
2756 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
2758 list_for_each_entry(imm
, &tgt
->srr_imm_list
,
2760 if (imm
->srr_id
== sc
->srr_id
) {
2766 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01b,
2767 "Scheduling srr work\n");
2768 schedule_work(&tgt
->srr_work
);
2770 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf056,
2771 "qla_target(%d): imm_srr_id "
2772 "== ctio_srr_id (%d), but there is no "
2773 "corresponding SRR IMM, deleting CTIO "
2774 "SRR %p\n", vha
->vp_idx
,
2775 tgt
->ctio_srr_id
, sc
);
2776 list_del(&sc
->srr_list_entry
);
2777 spin_unlock(&tgt
->srr_lock
);
2783 spin_unlock(&tgt
->srr_lock
);
2785 struct qla_tgt_srr_imm
*ti
;
2787 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf057,
2788 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2790 spin_lock(&tgt
->srr_lock
);
2791 list_for_each_entry_safe(imm
, ti
, &tgt
->srr_imm_list
,
2793 if (imm
->srr_id
== tgt
->ctio_srr_id
) {
2794 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01c,
2795 "IMM SRR %p deleted (id %d)\n",
2797 list_del(&imm
->srr_list_entry
);
2798 qlt_reject_free_srr_imm(vha
, imm
, 1);
2801 spin_unlock(&tgt
->srr_lock
);
2810 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2812 static int qlt_term_ctio_exchange(struct scsi_qla_host
*vha
, void *ctio
,
2813 struct qla_tgt_cmd
*cmd
, uint32_t status
)
2818 struct ctio7_from_24xx
*c
= (struct ctio7_from_24xx
*)ctio
;
2820 __constant_cpu_to_le16(OF_TERM_EXCH
));
2825 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
2830 /* ha->hardware_lock supposed to be held on entry */
2831 static inline struct qla_tgt_cmd
*qlt_get_cmd(struct scsi_qla_host
*vha
,
2834 struct qla_hw_data
*ha
= vha
->hw
;
2837 if (ha
->tgt
.cmds
[handle
] != NULL
) {
2838 struct qla_tgt_cmd
*cmd
= ha
->tgt
.cmds
[handle
];
2839 ha
->tgt
.cmds
[handle
] = NULL
;
2845 /* ha->hardware_lock supposed to be held on entry */
2846 static struct qla_tgt_cmd
*qlt_ctio_to_cmd(struct scsi_qla_host
*vha
,
2847 uint32_t handle
, void *ctio
)
2849 struct qla_tgt_cmd
*cmd
= NULL
;
2851 /* Clear out internal marks */
2852 handle
&= ~(CTIO_COMPLETION_HANDLE_MARK
|
2853 CTIO_INTERMEDIATE_HANDLE_MARK
);
2855 if (handle
!= QLA_TGT_NULL_HANDLE
) {
2856 if (unlikely(handle
== QLA_TGT_SKIP_HANDLE
)) {
2857 ql_dbg(ql_dbg_tgt
, vha
, 0xe01d, "%s",
2858 "SKIP_HANDLE CTIO\n");
2861 /* handle-1 is actually used */
2862 if (unlikely(handle
> DEFAULT_OUTSTANDING_COMMANDS
)) {
2863 ql_dbg(ql_dbg_tgt
, vha
, 0xe052,
2864 "qla_target(%d): Wrong handle %x received\n",
2865 vha
->vp_idx
, handle
);
2868 cmd
= qlt_get_cmd(vha
, handle
);
2869 if (unlikely(cmd
== NULL
)) {
2870 ql_dbg(ql_dbg_tgt
, vha
, 0xe053,
2871 "qla_target(%d): Suspicious: unable to "
2872 "find the command with handle %x\n", vha
->vp_idx
,
2876 } else if (ctio
!= NULL
) {
2877 /* We can't get loop ID from CTIO7 */
2878 ql_dbg(ql_dbg_tgt
, vha
, 0xe054,
2879 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2880 "support NULL handles\n", vha
->vp_idx
);
2888 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2890 static void qlt_do_ctio_completion(struct scsi_qla_host
*vha
, uint32_t handle
,
2891 uint32_t status
, void *ctio
)
2893 struct qla_hw_data
*ha
= vha
->hw
;
2894 struct se_cmd
*se_cmd
;
2895 struct target_core_fabric_ops
*tfo
;
2896 struct qla_tgt_cmd
*cmd
;
2898 ql_dbg(ql_dbg_tgt
, vha
, 0xe01e,
2899 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2900 vha
->vp_idx
, ctio
, status
, handle
);
2902 if (handle
& CTIO_INTERMEDIATE_HANDLE_MARK
) {
2903 /* That could happen only in case of an error/reset/abort */
2904 if (status
!= CTIO_SUCCESS
) {
2905 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01d,
2906 "Intermediate CTIO received"
2907 " (status %x)\n", status
);
2912 cmd
= qlt_ctio_to_cmd(vha
, handle
, ctio
);
2916 se_cmd
= &cmd
->se_cmd
;
2917 tfo
= se_cmd
->se_tfo
;
2920 qlt_unmap_sg(vha
, cmd
);
2922 if (unlikely(status
!= CTIO_SUCCESS
)) {
2923 switch (status
& 0xFFFF) {
2924 case CTIO_LIP_RESET
:
2925 case CTIO_TARGET_RESET
:
2927 /* driver request abort via Terminate exchange */
2929 case CTIO_INVALID_RX_ID
:
2931 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf058,
2932 "qla_target(%d): CTIO with "
2933 "status %#x received, state %x, se_cmd %p, "
2934 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2935 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha
->vp_idx
,
2936 status
, cmd
->state
, se_cmd
);
2939 case CTIO_PORT_LOGGED_OUT
:
2940 case CTIO_PORT_UNAVAILABLE
:
2941 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf059,
2942 "qla_target(%d): CTIO with PORT LOGGED "
2943 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2944 "received (state %x, se_cmd %p)\n", vha
->vp_idx
,
2945 status
, cmd
->state
, se_cmd
);
2948 case CTIO_SRR_RECEIVED
:
2949 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05a,
2950 "qla_target(%d): CTIO with SRR_RECEIVED"
2951 " status %x received (state %x, se_cmd %p)\n",
2952 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
2953 if (qlt_prepare_srr_ctio(vha
, cmd
, ctio
) != 0)
2958 case CTIO_DIF_ERROR
: {
2959 struct ctio_crc_from_fw
*crc
=
2960 (struct ctio_crc_from_fw
*)ctio
;
2961 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf073,
2962 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
2963 vha
->vp_idx
, status
, cmd
->state
, se_cmd
,
2964 *((u64
*)&crc
->actual_dif
[0]),
2965 *((u64
*)&crc
->expected_dif
[0]));
2967 if (qlt_handle_dif_error(vha
, cmd
, ctio
)) {
2968 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
2969 /* scsi Write/xfer rdy complete */
2972 /* scsi read/xmit respond complete
2973 * call handle dif to send scsi status
2974 * rather than terminate exchange.
2976 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
2977 ha
->tgt
.tgt_ops
->handle_dif_err(cmd
);
2981 /* Need to generate a SCSI good completion.
2982 * because FW did not send scsi status.
2990 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05b,
2991 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
2992 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
2997 /* "cmd->state == QLA_TGT_STATE_ABORTED" means
2998 * cmd is already aborted/terminated, we don't
2999 * need to terminate again. The exchange is already
3000 * cleaned up/freed at FW level. Just cleanup at driver
3003 if ((cmd
->state
!= QLA_TGT_STATE_NEED_DATA
) &&
3004 (cmd
->state
!= QLA_TGT_STATE_ABORTED
)) {
3005 if (qlt_term_ctio_exchange(vha
, ctio
, cmd
, status
))
3011 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
3012 ql_dbg(ql_dbg_tgt
, vha
, 0xe01f, "Command %p finished\n", cmd
);
3013 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3016 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3018 if (unlikely(status
!= CTIO_SUCCESS
))
3021 cmd
->write_data_transferred
= 1;
3023 ql_dbg(ql_dbg_tgt
, vha
, 0xe020,
3024 "Data received, context %x, rx_status %d\n",
3027 ha
->tgt
.tgt_ops
->handle_data(cmd
);
3029 } else if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
3030 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01e,
3031 "Aborted command %p (tag %d) finished\n", cmd
, cmd
->tag
);
3033 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05c,
3034 "qla_target(%d): A command in state (%d) should "
3035 "not return a CTIO complete\n", vha
->vp_idx
, cmd
->state
);
3038 if (unlikely(status
!= CTIO_SUCCESS
) &&
3039 (cmd
->state
!= QLA_TGT_STATE_ABORTED
)) {
3040 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01f, "Finishing failed CTIO\n");
3044 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
3047 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host
*vha
,
3052 switch (task_codes
) {
3053 case ATIO_SIMPLE_QUEUE
:
3054 fcp_task_attr
= MSG_SIMPLE_TAG
;
3056 case ATIO_HEAD_OF_QUEUE
:
3057 fcp_task_attr
= MSG_HEAD_TAG
;
3059 case ATIO_ORDERED_QUEUE
:
3060 fcp_task_attr
= MSG_ORDERED_TAG
;
3062 case ATIO_ACA_QUEUE
:
3063 fcp_task_attr
= MSG_ACA_TAG
;
3066 fcp_task_attr
= MSG_SIMPLE_TAG
;
3069 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05d,
3070 "qla_target: unknown task code %x, use ORDERED instead\n",
3072 fcp_task_attr
= MSG_ORDERED_TAG
;
3076 return fcp_task_attr
;
3079 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*,
3082 * Process context for I/O path into tcm_qla2xxx code
3084 static void __qlt_do_work(struct qla_tgt_cmd
*cmd
)
3086 scsi_qla_host_t
*vha
= cmd
->vha
;
3087 struct qla_hw_data
*ha
= vha
->hw
;
3088 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3089 struct qla_tgt_sess
*sess
= cmd
->sess
;
3090 struct atio_from_isp
*atio
= &cmd
->atio
;
3092 unsigned long flags
;
3093 uint32_t data_length
;
3094 int ret
, fcp_task_attr
, data_dir
, bidi
= 0;
3099 cdb
= &atio
->u
.isp24
.fcp_cmnd
.cdb
[0];
3100 cmd
->tag
= atio
->u
.isp24
.exchange_addr
;
3101 cmd
->unpacked_lun
= scsilun_to_int(
3102 (struct scsi_lun
*)&atio
->u
.isp24
.fcp_cmnd
.lun
);
3104 if (atio
->u
.isp24
.fcp_cmnd
.rddata
&&
3105 atio
->u
.isp24
.fcp_cmnd
.wrdata
) {
3107 data_dir
= DMA_TO_DEVICE
;
3108 } else if (atio
->u
.isp24
.fcp_cmnd
.rddata
)
3109 data_dir
= DMA_FROM_DEVICE
;
3110 else if (atio
->u
.isp24
.fcp_cmnd
.wrdata
)
3111 data_dir
= DMA_TO_DEVICE
;
3113 data_dir
= DMA_NONE
;
3115 fcp_task_attr
= qlt_get_fcp_task_attr(vha
,
3116 atio
->u
.isp24
.fcp_cmnd
.task_attr
);
3117 data_length
= be32_to_cpu(get_unaligned((uint32_t *)
3118 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
3119 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]));
3121 ql_dbg(ql_dbg_tgt
, vha
, 0xe022,
3122 "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
3123 cmd
, &cmd
->se_cmd
, cmd
->unpacked_lun
, cmd
->tag
, data_length
,
3124 cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
);
3126 ret
= ha
->tgt
.tgt_ops
->handle_cmd(vha
, cmd
, cdb
, data_length
,
3127 fcp_task_attr
, data_dir
, bidi
);
3131 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3133 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3134 ha
->tgt
.tgt_ops
->put_sess(sess
);
3135 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3139 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf020, "Terminating work cmd %p", cmd
);
3141 * cmd has not sent to target yet, so pass NULL as the second
3142 * argument to qlt_send_term_exchange() and free the memory here.
3144 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3145 qlt_send_term_exchange(vha
, NULL
, &cmd
->atio
, 1);
3146 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, cmd
->se_cmd
.map_tag
);
3147 ha
->tgt
.tgt_ops
->put_sess(sess
);
3148 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3151 static void qlt_do_work(struct work_struct
*work
)
3153 struct qla_tgt_cmd
*cmd
= container_of(work
, struct qla_tgt_cmd
, work
);
3158 static struct qla_tgt_cmd
*qlt_get_tag(scsi_qla_host_t
*vha
,
3159 struct qla_tgt_sess
*sess
,
3160 struct atio_from_isp
*atio
)
3162 struct se_session
*se_sess
= sess
->se_sess
;
3163 struct qla_tgt_cmd
*cmd
;
3166 tag
= percpu_ida_alloc(&se_sess
->sess_tag_pool
, TASK_RUNNING
);
3170 cmd
= &((struct qla_tgt_cmd
*)se_sess
->sess_cmd_map
)[tag
];
3171 memset(cmd
, 0, sizeof(struct qla_tgt_cmd
));
3173 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
3174 cmd
->state
= QLA_TGT_STATE_NEW
;
3175 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
3177 cmd
->se_cmd
.map_tag
= tag
;
3179 cmd
->loop_id
= sess
->loop_id
;
3180 cmd
->conf_compl_supported
= sess
->conf_compl_supported
;
3185 static void qlt_send_busy(struct scsi_qla_host
*, struct atio_from_isp
*,
3188 static void qlt_create_sess_from_atio(struct work_struct
*work
)
3190 struct qla_tgt_sess_op
*op
= container_of(work
,
3191 struct qla_tgt_sess_op
, work
);
3192 scsi_qla_host_t
*vha
= op
->vha
;
3193 struct qla_hw_data
*ha
= vha
->hw
;
3194 struct qla_tgt_sess
*sess
;
3195 struct qla_tgt_cmd
*cmd
;
3196 unsigned long flags
;
3197 uint8_t *s_id
= op
->atio
.u
.isp24
.fcp_hdr
.s_id
;
3199 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf022,
3200 "qla_target(%d): Unable to find wwn login"
3201 " (s_id %x:%x:%x), trying to create it manually\n",
3202 vha
->vp_idx
, s_id
[0], s_id
[1], s_id
[2]);
3204 if (op
->atio
.u
.raw
.entry_count
> 1) {
3205 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf023,
3206 "Dropping multy entry atio %p\n", &op
->atio
);
3210 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
3211 sess
= qlt_make_local_sess(vha
, s_id
);
3212 /* sess has an extra creation ref. */
3213 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
3218 * Now obtain a pre-allocated session tag using the original op->atio
3219 * packet header, and dispatch into __qlt_do_work() using the existing
3222 cmd
= qlt_get_tag(vha
, sess
, &op
->atio
);
3224 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3225 qlt_send_busy(vha
, &op
->atio
, SAM_STAT_BUSY
);
3226 ha
->tgt
.tgt_ops
->put_sess(sess
);
3227 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3232 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3233 * the extra reference taken above by qlt_make_local_sess()
3240 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3241 qlt_send_term_exchange(vha
, NULL
, &op
->atio
, 1);
3242 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3247 /* ha->hardware_lock supposed to be held on entry */
3248 static int qlt_handle_cmd_for_atio(struct scsi_qla_host
*vha
,
3249 struct atio_from_isp
*atio
)
3251 struct qla_hw_data
*ha
= vha
->hw
;
3252 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3253 struct qla_tgt_sess
*sess
;
3254 struct qla_tgt_cmd
*cmd
;
3256 if (unlikely(tgt
->tgt_stop
)) {
3257 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf021,
3258 "New command while device %p is shutting down\n", tgt
);
3262 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
3263 if (unlikely(!sess
)) {
3264 struct qla_tgt_sess_op
*op
= kzalloc(sizeof(struct qla_tgt_sess_op
),
3269 memcpy(&op
->atio
, atio
, sizeof(*atio
));
3270 INIT_WORK(&op
->work
, qlt_create_sess_from_atio
);
3271 queue_work(qla_tgt_wq
, &op
->work
);
3275 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3277 kref_get(&sess
->se_sess
->sess_kref
);
3279 cmd
= qlt_get_tag(vha
, sess
, atio
);
3281 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05e,
3282 "qla_target(%d): Allocation of cmd failed\n", vha
->vp_idx
);
3283 ha
->tgt
.tgt_ops
->put_sess(sess
);
3287 INIT_WORK(&cmd
->work
, qlt_do_work
);
3288 queue_work(qla_tgt_wq
, &cmd
->work
);
3293 /* ha->hardware_lock supposed to be held on entry */
3294 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
3295 int fn
, void *iocb
, int flags
)
3297 struct scsi_qla_host
*vha
= sess
->vha
;
3298 struct qla_hw_data
*ha
= vha
->hw
;
3299 struct qla_tgt_mgmt_cmd
*mcmd
;
3303 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
3305 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10009,
3306 "qla_target(%d): Allocation of management "
3307 "command failed, some commands and their data could "
3308 "leak\n", vha
->vp_idx
);
3311 memset(mcmd
, 0, sizeof(*mcmd
));
3315 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
3316 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
3318 mcmd
->tmr_func
= fn
;
3319 mcmd
->flags
= flags
;
3322 case QLA_TGT_CLEAR_ACA
:
3323 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10000,
3324 "qla_target(%d): CLEAR_ACA received\n", sess
->vha
->vp_idx
);
3325 tmr_func
= TMR_CLEAR_ACA
;
3328 case QLA_TGT_TARGET_RESET
:
3329 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10001,
3330 "qla_target(%d): TARGET_RESET received\n",
3332 tmr_func
= TMR_TARGET_WARM_RESET
;
3335 case QLA_TGT_LUN_RESET
:
3336 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10002,
3337 "qla_target(%d): LUN_RESET received\n", sess
->vha
->vp_idx
);
3338 tmr_func
= TMR_LUN_RESET
;
3341 case QLA_TGT_CLEAR_TS
:
3342 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10003,
3343 "qla_target(%d): CLEAR_TS received\n", sess
->vha
->vp_idx
);
3344 tmr_func
= TMR_CLEAR_TASK_SET
;
3347 case QLA_TGT_ABORT_TS
:
3348 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10004,
3349 "qla_target(%d): ABORT_TS received\n", sess
->vha
->vp_idx
);
3350 tmr_func
= TMR_ABORT_TASK_SET
;
3353 case QLA_TGT_ABORT_ALL
:
3354 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10005,
3355 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
3360 case QLA_TGT_ABORT_ALL_SESS
:
3361 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10006,
3362 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
3367 case QLA_TGT_NEXUS_LOSS_SESS
:
3368 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10007,
3369 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
3374 case QLA_TGT_NEXUS_LOSS
:
3375 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10008,
3376 "qla_target(%d): Doing NEXUS_LOSS\n", sess
->vha
->vp_idx
);
3381 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000a,
3382 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
3383 sess
->vha
->vp_idx
, fn
);
3384 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3388 res
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, tmr_func
, 0);
3390 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000b,
3391 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
3392 sess
->vha
->vp_idx
, res
);
3393 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3400 /* ha->hardware_lock supposed to be held on entry */
3401 static int qlt_handle_task_mgmt(struct scsi_qla_host
*vha
, void *iocb
)
3403 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
3404 struct qla_hw_data
*ha
= vha
->hw
;
3405 struct qla_tgt
*tgt
;
3406 struct qla_tgt_sess
*sess
;
3407 uint32_t lun
, unpacked_lun
;
3410 tgt
= vha
->vha_tgt
.qla_tgt
;
3412 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
3413 lun_size
= sizeof(a
->u
.isp24
.fcp_cmnd
.lun
);
3414 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
3415 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
3416 a
->u
.isp24
.fcp_hdr
.s_id
);
3417 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
3420 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf024,
3421 "qla_target(%d): task mgmt fn 0x%x for "
3422 "non-existant session\n", vha
->vp_idx
, fn
);
3423 return qlt_sched_sess_work(tgt
, QLA_TGT_SESS_WORK_TM
, iocb
,
3424 sizeof(struct atio_from_isp
));
3427 return qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
3430 /* ha->hardware_lock supposed to be held on entry */
3431 static int __qlt_abort_task(struct scsi_qla_host
*vha
,
3432 struct imm_ntfy_from_isp
*iocb
, struct qla_tgt_sess
*sess
)
3434 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
3435 struct qla_hw_data
*ha
= vha
->hw
;
3436 struct qla_tgt_mgmt_cmd
*mcmd
;
3437 uint32_t lun
, unpacked_lun
;
3440 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
3442 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05f,
3443 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
3444 vha
->vp_idx
, __func__
);
3447 memset(mcmd
, 0, sizeof(*mcmd
));
3450 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
3451 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
3453 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
3454 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
3456 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, unpacked_lun
, TMR_ABORT_TASK
,
3457 le16_to_cpu(iocb
->u
.isp2x
.seq_id
));
3459 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf060,
3460 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
3462 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3469 /* ha->hardware_lock supposed to be held on entry */
3470 static int qlt_abort_task(struct scsi_qla_host
*vha
,
3471 struct imm_ntfy_from_isp
*iocb
)
3473 struct qla_hw_data
*ha
= vha
->hw
;
3474 struct qla_tgt_sess
*sess
;
3477 loop_id
= GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
);
3479 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
3481 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf025,
3482 "qla_target(%d): task abort for unexisting "
3483 "session\n", vha
->vp_idx
);
3484 return qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
3485 QLA_TGT_SESS_WORK_ABORT
, iocb
, sizeof(*iocb
));
3488 return __qlt_abort_task(vha
, iocb
, sess
);
3492 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3494 static int qlt_24xx_handle_els(struct scsi_qla_host
*vha
,
3495 struct imm_ntfy_from_isp
*iocb
)
3499 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf026,
3500 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3501 vha
->vp_idx
, iocb
->u
.isp24
.port_id
, iocb
->u
.isp24
.status_subcode
);
3503 switch (iocb
->u
.isp24
.status_subcode
) {
3509 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
3514 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3515 if (tgt
->link_reinit_iocb_pending
) {
3516 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
3518 tgt
->link_reinit_iocb_pending
= 0;
3520 res
= 1; /* send notify ack */
3525 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf061,
3526 "qla_target(%d): Unsupported ELS command %x "
3527 "received\n", vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
);
3528 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
3535 static int qlt_set_data_offset(struct qla_tgt_cmd
*cmd
, uint32_t offset
)
3537 struct scatterlist
*sg
, *sgp
, *sg_srr
, *sg_srr_start
= NULL
;
3538 size_t first_offset
= 0, rem_offset
= offset
, tmp
= 0;
3539 int i
, sg_srr_cnt
, bufflen
= 0;
3541 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe023,
3542 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3543 "cmd->sg_cnt: %u, direction: %d\n",
3544 cmd
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
3547 * FIXME: Reject non zero SRR relative offset until we can test
3548 * this code properly.
3550 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset
);
3553 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3554 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe055,
3555 "Missing cmd->sg or zero cmd->sg_cnt in"
3556 " qla_tgt_set_data_offset\n");
3560 * Walk the current cmd->sg list until we locate the new sg_srr_start
3562 for_each_sg(cmd
->sg
, sg
, cmd
->sg_cnt
, i
) {
3563 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe024,
3564 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3565 i
, sg
, sg_page(sg
), sg
->length
, sg
->offset
);
3567 if ((sg
->length
+ tmp
) > offset
) {
3568 first_offset
= rem_offset
;
3570 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe025,
3571 "Found matching sg[%d], using %p as sg_srr_start, "
3572 "and using first_offset: %zu\n", i
, sg
,
3577 rem_offset
-= sg
->length
;
3580 if (!sg_srr_start
) {
3581 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe056,
3582 "Unable to locate sg_srr_start for offset: %u\n", offset
);
3585 sg_srr_cnt
= (cmd
->sg_cnt
- i
);
3587 sg_srr
= kzalloc(sizeof(struct scatterlist
) * sg_srr_cnt
, GFP_KERNEL
);
3589 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe057,
3590 "Unable to allocate sgp\n");
3593 sg_init_table(sg_srr
, sg_srr_cnt
);
3596 * Walk the remaining list for sg_srr_start, mapping to the newly
3597 * allocated sg_srr taking first_offset into account.
3599 for_each_sg(sg_srr_start
, sg
, sg_srr_cnt
, i
) {
3601 sg_set_page(sgp
, sg_page(sg
),
3602 (sg
->length
- first_offset
), first_offset
);
3605 sg_set_page(sgp
, sg_page(sg
), sg
->length
, 0);
3607 bufflen
+= sgp
->length
;
3615 cmd
->sg_cnt
= sg_srr_cnt
;
3616 cmd
->bufflen
= bufflen
;
3617 cmd
->offset
+= offset
;
3620 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe026, "New cmd->sg: %p\n", cmd
->sg
);
3621 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe027, "New cmd->sg_cnt: %u\n",
3623 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe028, "New cmd->bufflen: %u\n",
3625 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe029, "New cmd->offset: %u\n",
3628 if (cmd
->sg_cnt
< 0)
3631 if (cmd
->bufflen
< 0)
3637 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd
*cmd
,
3638 uint32_t srr_rel_offs
, int *xmit_type
)
3640 int res
= 0, rel_offs
;
3642 rel_offs
= srr_rel_offs
- cmd
->offset
;
3643 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3644 srr_rel_offs
, rel_offs
);
3646 *xmit_type
= QLA_TGT_XMIT_ALL
;
3649 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf062,
3650 "qla_target(%d): SRR rel_offs (%d) < 0",
3651 cmd
->vha
->vp_idx
, rel_offs
);
3653 } else if (rel_offs
== cmd
->bufflen
)
3654 *xmit_type
= QLA_TGT_XMIT_STATUS
;
3655 else if (rel_offs
> 0)
3656 res
= qlt_set_data_offset(cmd
, rel_offs
);
3661 /* No locks, thread context */
3662 static void qlt_handle_srr(struct scsi_qla_host
*vha
,
3663 struct qla_tgt_srr_ctio
*sctio
, struct qla_tgt_srr_imm
*imm
)
3665 struct imm_ntfy_from_isp
*ntfy
=
3666 (struct imm_ntfy_from_isp
*)&imm
->imm_ntfy
;
3667 struct qla_hw_data
*ha
= vha
->hw
;
3668 struct qla_tgt_cmd
*cmd
= sctio
->cmd
;
3669 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3670 unsigned long flags
;
3671 int xmit_type
= 0, resp
= 0;
3675 offset
= le32_to_cpu(ntfy
->u
.isp24
.srr_rel_offs
);
3676 srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3678 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf028, "SRR cmd %p, srr_ui %x\n",
3683 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3684 qlt_send_notify_ack(vha
, ntfy
,
3685 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3686 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3687 xmit_type
= QLA_TGT_XMIT_STATUS
;
3690 case SRR_IU_DATA_IN
:
3691 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3692 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf063,
3693 "Unable to process SRR_IU_DATA_IN due to"
3694 " missing cmd->sg, state: %d\n", cmd
->state
);
3698 if (se_cmd
->scsi_status
!= 0) {
3699 ql_dbg(ql_dbg_tgt
, vha
, 0xe02a,
3700 "Rejecting SRR_IU_DATA_IN with non GOOD "
3704 cmd
->bufflen
= se_cmd
->data_length
;
3706 if (qlt_has_data(cmd
)) {
3707 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3709 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3710 qlt_send_notify_ack(vha
, ntfy
,
3711 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3712 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3715 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf064,
3716 "qla_target(%d): SRR for in data for cmd "
3717 "without them (tag %d, SCSI status %d), "
3718 "reject", vha
->vp_idx
, cmd
->tag
,
3719 cmd
->se_cmd
.scsi_status
);
3723 case SRR_IU_DATA_OUT
:
3724 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3725 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf065,
3726 "Unable to process SRR_IU_DATA_OUT due to"
3727 " missing cmd->sg\n");
3731 if (se_cmd
->scsi_status
!= 0) {
3732 ql_dbg(ql_dbg_tgt
, vha
, 0xe02b,
3733 "Rejecting SRR_IU_DATA_OUT"
3734 " with non GOOD scsi_status\n");
3737 cmd
->bufflen
= se_cmd
->data_length
;
3739 if (qlt_has_data(cmd
)) {
3740 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3742 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3743 qlt_send_notify_ack(vha
, ntfy
,
3744 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3745 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3746 if (xmit_type
& QLA_TGT_XMIT_DATA
)
3747 qlt_rdy_to_xfer(cmd
);
3749 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf066,
3750 "qla_target(%d): SRR for out data for cmd "
3751 "without them (tag %d, SCSI status %d), "
3752 "reject", vha
->vp_idx
, cmd
->tag
,
3753 cmd
->se_cmd
.scsi_status
);
3758 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf067,
3759 "qla_target(%d): Unknown srr_ui value %x",
3760 vha
->vp_idx
, srr_ui
);
3764 /* Transmit response in case of status and data-in cases */
3766 qlt_xmit_response(cmd
, xmit_type
, se_cmd
->scsi_status
);
3771 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3772 qlt_send_notify_ack(vha
, ntfy
, 0, 0, 0,
3773 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3774 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3775 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3776 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3777 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3780 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
3781 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3784 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*vha
,
3785 struct qla_tgt_srr_imm
*imm
, int ha_locked
)
3787 struct qla_hw_data
*ha
= vha
->hw
;
3788 unsigned long flags
= 0;
3791 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3793 qlt_send_notify_ack(vha
, (void *)&imm
->imm_ntfy
, 0, 0, 0,
3794 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3795 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3796 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3799 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3804 static void qlt_handle_srr_work(struct work_struct
*work
)
3806 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, srr_work
);
3807 struct scsi_qla_host
*vha
= tgt
->vha
;
3808 struct qla_tgt_srr_ctio
*sctio
;
3809 unsigned long flags
;
3811 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf029, "Entering SRR work (tgt %p)\n",
3815 spin_lock_irqsave(&tgt
->srr_lock
, flags
);
3816 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
, srr_list_entry
) {
3817 struct qla_tgt_srr_imm
*imm
, *i
, *ti
;
3818 struct qla_tgt_cmd
*cmd
;
3819 struct se_cmd
*se_cmd
;
3822 list_for_each_entry_safe(i
, ti
, &tgt
->srr_imm_list
,
3824 if (i
->srr_id
== sctio
->srr_id
) {
3825 list_del(&i
->srr_list_entry
);
3827 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf068,
3828 "qla_target(%d): There must be "
3829 "only one IMM SRR per CTIO SRR "
3830 "(IMM SRR %p, id %d, CTIO %p\n",
3831 vha
->vp_idx
, i
, i
->srr_id
, sctio
);
3832 qlt_reject_free_srr_imm(tgt
->vha
, i
, 0);
3838 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02a,
3839 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm
, sctio
,
3843 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02b,
3844 "Not found matching IMM for SRR CTIO (id %d)\n",
3848 list_del(&sctio
->srr_list_entry
);
3850 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
3854 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
3855 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
3864 se_cmd
= &cmd
->se_cmd
;
3866 cmd
->sg_cnt
= se_cmd
->t_data_nents
;
3867 cmd
->sg
= se_cmd
->t_data_sg
;
3869 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02c,
3870 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3871 "sg_cnt=%d, offset=%d", cmd
, &cmd
->se_cmd
, cmd
->tag
,
3872 se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
3873 cmd
->sg_cnt
, cmd
->offset
);
3875 qlt_handle_srr(vha
, sctio
, imm
);
3881 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
3884 /* ha->hardware_lock supposed to be held on entry */
3885 static void qlt_prepare_srr_imm(struct scsi_qla_host
*vha
,
3886 struct imm_ntfy_from_isp
*iocb
)
3888 struct qla_tgt_srr_imm
*imm
;
3889 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3890 struct qla_tgt_srr_ctio
*sctio
;
3894 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02d, "qla_target(%d): SRR received\n",
3897 imm
= kzalloc(sizeof(*imm
), GFP_ATOMIC
);
3899 memcpy(&imm
->imm_ntfy
, iocb
, sizeof(imm
->imm_ntfy
));
3901 /* IRQ is already OFF */
3902 spin_lock(&tgt
->srr_lock
);
3903 imm
->srr_id
= tgt
->imm_srr_id
;
3904 list_add_tail(&imm
->srr_list_entry
,
3905 &tgt
->srr_imm_list
);
3906 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02e,
3907 "IMM NTFY SRR %p added (id %d, ui %x)\n",
3908 imm
, imm
->srr_id
, iocb
->u
.isp24
.srr_ui
);
3909 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
3911 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
,
3913 if (sctio
->srr_id
== imm
->srr_id
) {
3919 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02f, "%s",
3920 "Scheduling srr work\n");
3921 schedule_work(&tgt
->srr_work
);
3923 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf030,
3924 "qla_target(%d): imm_srr_id "
3925 "== ctio_srr_id (%d), but there is no "
3926 "corresponding SRR CTIO, deleting IMM "
3927 "SRR %p\n", vha
->vp_idx
, tgt
->ctio_srr_id
,
3929 list_del(&imm
->srr_list_entry
);
3933 spin_unlock(&tgt
->srr_lock
);
3937 spin_unlock(&tgt
->srr_lock
);
3939 struct qla_tgt_srr_ctio
*ts
;
3941 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf069,
3942 "qla_target(%d): Unable to allocate SRR IMM "
3943 "entry, SRR request will be rejected\n", vha
->vp_idx
);
3945 /* IRQ is already OFF */
3946 spin_lock(&tgt
->srr_lock
);
3947 list_for_each_entry_safe(sctio
, ts
, &tgt
->srr_ctio_list
,
3949 if (sctio
->srr_id
== tgt
->imm_srr_id
) {
3950 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf031,
3951 "CTIO SRR %p deleted (id %d)\n",
3952 sctio
, sctio
->srr_id
);
3953 list_del(&sctio
->srr_list_entry
);
3954 qlt_send_term_exchange(vha
, sctio
->cmd
,
3955 &sctio
->cmd
->atio
, 1);
3959 spin_unlock(&tgt
->srr_lock
);
3966 qlt_send_notify_ack(vha
, iocb
, 0, 0, 0,
3967 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3968 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3969 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3973 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3975 static void qlt_handle_imm_notify(struct scsi_qla_host
*vha
,
3976 struct imm_ntfy_from_isp
*iocb
)
3978 struct qla_hw_data
*ha
= vha
->hw
;
3979 uint32_t add_flags
= 0;
3980 int send_notify_ack
= 1;
3983 status
= le16_to_cpu(iocb
->u
.isp2x
.status
);
3985 case IMM_NTFY_LIP_RESET
:
3987 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf032,
3988 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
3989 vha
->vp_idx
, le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
3990 iocb
->u
.isp24
.status_subcode
);
3992 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
3993 send_notify_ack
= 0;
3997 case IMM_NTFY_LIP_LINK_REINIT
:
3999 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4000 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf033,
4001 "qla_target(%d): LINK REINIT (loop %#x, "
4002 "subcode %x)\n", vha
->vp_idx
,
4003 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4004 iocb
->u
.isp24
.status_subcode
);
4005 if (tgt
->link_reinit_iocb_pending
) {
4006 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
4009 memcpy(&tgt
->link_reinit_iocb
, iocb
, sizeof(*iocb
));
4010 tgt
->link_reinit_iocb_pending
= 1;
4012 * QLogic requires to wait after LINK REINIT for possible
4013 * PDISC or ADISC ELS commands
4015 send_notify_ack
= 0;
4019 case IMM_NTFY_PORT_LOGOUT
:
4020 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf034,
4021 "qla_target(%d): Port logout (loop "
4022 "%#x, subcode %x)\n", vha
->vp_idx
,
4023 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4024 iocb
->u
.isp24
.status_subcode
);
4026 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
) == 0)
4027 send_notify_ack
= 0;
4028 /* The sessions will be cleared in the callback, if needed */
4031 case IMM_NTFY_GLBL_TPRLO
:
4032 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf035,
4033 "qla_target(%d): Global TPRLO (%x)\n", vha
->vp_idx
, status
);
4034 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
4035 send_notify_ack
= 0;
4036 /* The sessions will be cleared in the callback, if needed */
4039 case IMM_NTFY_PORT_CONFIG
:
4040 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf036,
4041 "qla_target(%d): Port config changed (%x)\n", vha
->vp_idx
,
4043 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
4044 send_notify_ack
= 0;
4045 /* The sessions will be cleared in the callback, if needed */
4048 case IMM_NTFY_GLBL_LOGO
:
4049 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06a,
4050 "qla_target(%d): Link failure detected\n",
4052 /* I_T nexus loss */
4053 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
4054 send_notify_ack
= 0;
4057 case IMM_NTFY_IOCB_OVERFLOW
:
4058 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06b,
4059 "qla_target(%d): Cannot provide requested "
4060 "capability (IOCB overflowed the immediate notify "
4061 "resource count)\n", vha
->vp_idx
);
4064 case IMM_NTFY_ABORT_TASK
:
4065 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf037,
4066 "qla_target(%d): Abort Task (S %08x I %#x -> "
4067 "L %#x)\n", vha
->vp_idx
,
4068 le16_to_cpu(iocb
->u
.isp2x
.seq_id
),
4069 GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
),
4070 le16_to_cpu(iocb
->u
.isp2x
.lun
));
4071 if (qlt_abort_task(vha
, iocb
) == 0)
4072 send_notify_ack
= 0;
4075 case IMM_NTFY_RESOURCE
:
4076 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06c,
4077 "qla_target(%d): Out of resources, host %ld\n",
4078 vha
->vp_idx
, vha
->host_no
);
4081 case IMM_NTFY_MSG_RX
:
4082 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf038,
4083 "qla_target(%d): Immediate notify task %x\n",
4084 vha
->vp_idx
, iocb
->u
.isp2x
.task_flags
);
4085 if (qlt_handle_task_mgmt(vha
, iocb
) == 0)
4086 send_notify_ack
= 0;
4090 if (qlt_24xx_handle_els(vha
, iocb
) == 0)
4091 send_notify_ack
= 0;
4095 qlt_prepare_srr_imm(vha
, iocb
);
4096 send_notify_ack
= 0;
4100 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06d,
4101 "qla_target(%d): Received unknown immediate "
4102 "notify status %x\n", vha
->vp_idx
, status
);
4106 if (send_notify_ack
)
4107 qlt_send_notify_ack(vha
, iocb
, add_flags
, 0, 0, 0, 0, 0);
4111 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4112 * This function sends busy to ISP 2xxx or 24xx.
4114 static void qlt_send_busy(struct scsi_qla_host
*vha
,
4115 struct atio_from_isp
*atio
, uint16_t status
)
4117 struct ctio7_to_24xx
*ctio24
;
4118 struct qla_hw_data
*ha
= vha
->hw
;
4120 struct qla_tgt_sess
*sess
= NULL
;
4122 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4123 atio
->u
.isp24
.fcp_hdr
.s_id
);
4125 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4128 /* Sending marker isn't necessary, since we called from ISR */
4130 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
4132 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06e,
4133 "qla_target(%d): %s failed: unable to allocate "
4134 "request packet", vha
->vp_idx
, __func__
);
4138 pkt
->entry_count
= 1;
4139 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
4141 ctio24
= (struct ctio7_to_24xx
*)pkt
;
4142 ctio24
->entry_type
= CTIO_TYPE7
;
4143 ctio24
->nport_handle
= sess
->loop_id
;
4144 ctio24
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
4145 ctio24
->vp_index
= vha
->vp_idx
;
4146 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
4147 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
4148 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
4149 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
4150 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
4151 __constant_cpu_to_le16(
4152 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
|
4153 CTIO7_FLAGS_DONT_RET_CTIO
);
4155 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
4156 * if the explicit conformation is used.
4158 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
4159 ctio24
->u
.status1
.scsi_status
= cpu_to_le16(status
);
4160 ctio24
->u
.status1
.residual
= get_unaligned((uint32_t *)
4161 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
4162 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]);
4163 if (ctio24
->u
.status1
.residual
!= 0)
4164 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
4166 qla2x00_start_iocbs(vha
, vha
->req
);
4169 /* ha->hardware_lock supposed to be held on entry */
4170 /* called via callback from qla2xxx */
4171 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*vha
,
4172 struct atio_from_isp
*atio
)
4174 struct qla_hw_data
*ha
= vha
->hw
;
4175 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4178 if (unlikely(tgt
== NULL
)) {
4179 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf039,
4180 "ATIO pkt, but no tgt (ha %p)", ha
);
4183 ql_dbg(ql_dbg_tgt
, vha
, 0xe02c,
4184 "qla_target(%d): ATIO pkt %p: type %02x count %02x",
4185 vha
->vp_idx
, atio
, atio
->u
.raw
.entry_type
,
4186 atio
->u
.raw
.entry_count
);
4188 * In tgt_stop mode we also should allow all requests to pass.
4189 * Otherwise, some commands can stuck.
4192 tgt
->irq_cmd_count
++;
4194 switch (atio
->u
.raw
.entry_type
) {
4196 ql_dbg(ql_dbg_tgt
, vha
, 0xe02d,
4197 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
4198 vha
->vp_idx
, atio
->u
.isp24
.fcp_cmnd
.lun
,
4199 atio
->u
.isp24
.fcp_cmnd
.rddata
,
4200 atio
->u
.isp24
.fcp_cmnd
.wrdata
,
4201 atio
->u
.isp24
.fcp_cmnd
.cdb
[0],
4202 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
,
4203 be32_to_cpu(get_unaligned((uint32_t *)
4204 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
4205 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
])),
4206 atio
->u
.isp24
.fcp_hdr
.s_id
[0],
4207 atio
->u
.isp24
.fcp_hdr
.s_id
[1],
4208 atio
->u
.isp24
.fcp_hdr
.s_id
[2]);
4210 if (unlikely(atio
->u
.isp24
.exchange_addr
==
4211 ATIO_EXCHANGE_ADDRESS_UNKNOWN
)) {
4212 ql_dbg(ql_dbg_tgt
, vha
, 0xe058,
4213 "qla_target(%d): ATIO_TYPE7 "
4214 "received with UNKNOWN exchange address, "
4215 "sending QUEUE_FULL\n", vha
->vp_idx
);
4216 qlt_send_busy(vha
, atio
, SAM_STAT_TASK_SET_FULL
);
4219 if (likely(atio
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
== 0))
4220 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
4222 rc
= qlt_handle_task_mgmt(vha
, atio
);
4223 if (unlikely(rc
!= 0)) {
4225 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4226 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
4228 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4231 if (tgt
->tgt_stop
) {
4232 ql_dbg(ql_dbg_tgt
, vha
, 0xe059,
4233 "qla_target: Unable to send "
4234 "command to target for req, "
4237 ql_dbg(ql_dbg_tgt
, vha
, 0xe05a,
4238 "qla_target(%d): Unable to send "
4239 "command to target, sending BUSY "
4240 "status.\n", vha
->vp_idx
);
4241 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
4247 case IMMED_NOTIFY_TYPE
:
4249 if (unlikely(atio
->u
.isp2x
.entry_status
!= 0)) {
4250 ql_dbg(ql_dbg_tgt
, vha
, 0xe05b,
4251 "qla_target(%d): Received ATIO packet %x "
4252 "with error status %x\n", vha
->vp_idx
,
4253 atio
->u
.raw
.entry_type
,
4254 atio
->u
.isp2x
.entry_status
);
4257 ql_dbg(ql_dbg_tgt
, vha
, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
4258 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)atio
);
4263 ql_dbg(ql_dbg_tgt
, vha
, 0xe05c,
4264 "qla_target(%d): Received unknown ATIO atio "
4265 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
4269 tgt
->irq_cmd_count
--;
4272 /* ha->hardware_lock supposed to be held on entry */
4273 /* called via callback from qla2xxx */
4274 static void qlt_response_pkt(struct scsi_qla_host
*vha
, response_t
*pkt
)
4276 struct qla_hw_data
*ha
= vha
->hw
;
4277 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4279 if (unlikely(tgt
== NULL
)) {
4280 ql_dbg(ql_dbg_tgt
, vha
, 0xe05d,
4281 "qla_target(%d): Response pkt %x received, but no "
4282 "tgt (ha %p)\n", vha
->vp_idx
, pkt
->entry_type
, ha
);
4286 ql_dbg(ql_dbg_tgt
, vha
, 0xe02f,
4287 "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
4288 "handle %#x\n", vha
->vp_idx
, pkt
, pkt
->entry_type
,
4289 pkt
->entry_count
, pkt
->entry_status
, pkt
->handle
);
4292 * In tgt_stop mode we also should allow all requests to pass.
4293 * Otherwise, some commands can stuck.
4296 tgt
->irq_cmd_count
++;
4298 switch (pkt
->entry_type
) {
4302 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
4303 ql_dbg(ql_dbg_tgt
, vha
, 0xe030,
4304 "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
4305 entry
->entry_type
, vha
->vp_idx
);
4306 qlt_do_ctio_completion(vha
, entry
->handle
,
4307 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
4312 case ACCEPT_TGT_IO_TYPE
:
4314 struct atio_from_isp
*atio
= (struct atio_from_isp
*)pkt
;
4316 ql_dbg(ql_dbg_tgt
, vha
, 0xe031,
4317 "ACCEPT_TGT_IO instance %d status %04x "
4318 "lun %04x read/write %d data_length %04x "
4319 "target_id %02x rx_id %04x\n ", vha
->vp_idx
,
4320 le16_to_cpu(atio
->u
.isp2x
.status
),
4321 le16_to_cpu(atio
->u
.isp2x
.lun
),
4322 atio
->u
.isp2x
.execution_codes
,
4323 le32_to_cpu(atio
->u
.isp2x
.data_length
), GET_TARGET_ID(ha
,
4324 atio
), atio
->u
.isp2x
.rx_id
);
4325 if (atio
->u
.isp2x
.status
!=
4326 __constant_cpu_to_le16(ATIO_CDB_VALID
)) {
4327 ql_dbg(ql_dbg_tgt
, vha
, 0xe05e,
4328 "qla_target(%d): ATIO with error "
4329 "status %x received\n", vha
->vp_idx
,
4330 le16_to_cpu(atio
->u
.isp2x
.status
));
4333 ql_dbg(ql_dbg_tgt
, vha
, 0xe032,
4334 "FCP CDB: 0x%02x, sizeof(cdb): %lu",
4335 atio
->u
.isp2x
.cdb
[0], (unsigned long
4336 int)sizeof(atio
->u
.isp2x
.cdb
));
4338 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
4339 if (unlikely(rc
!= 0)) {
4341 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4342 qlt_send_busy(vha
, atio
, 0);
4344 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4347 if (tgt
->tgt_stop
) {
4348 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
4349 "qla_target: Unable to send "
4350 "command to target, sending TERM "
4351 "EXCHANGE for rsp\n");
4352 qlt_send_term_exchange(vha
, NULL
,
4355 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
4356 "qla_target(%d): Unable to send "
4357 "command to target, sending BUSY "
4358 "status\n", vha
->vp_idx
);
4359 qlt_send_busy(vha
, atio
, 0);
4366 case CONTINUE_TGT_IO_TYPE
:
4368 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
4369 ql_dbg(ql_dbg_tgt
, vha
, 0xe033,
4370 "CONTINUE_TGT_IO: instance %d\n", vha
->vp_idx
);
4371 qlt_do_ctio_completion(vha
, entry
->handle
,
4372 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
4379 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
4380 ql_dbg(ql_dbg_tgt
, vha
, 0xe034, "CTIO_A64: instance %d\n",
4382 qlt_do_ctio_completion(vha
, entry
->handle
,
4383 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
4388 case IMMED_NOTIFY_TYPE
:
4389 ql_dbg(ql_dbg_tgt
, vha
, 0xe035, "%s", "IMMED_NOTIFY\n");
4390 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)pkt
);
4393 case NOTIFY_ACK_TYPE
:
4394 if (tgt
->notify_ack_expected
> 0) {
4395 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
4396 ql_dbg(ql_dbg_tgt
, vha
, 0xe036,
4397 "NOTIFY_ACK seq %08x status %x\n",
4398 le16_to_cpu(entry
->u
.isp2x
.seq_id
),
4399 le16_to_cpu(entry
->u
.isp2x
.status
));
4400 tgt
->notify_ack_expected
--;
4401 if (entry
->u
.isp2x
.status
!=
4402 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS
)) {
4403 ql_dbg(ql_dbg_tgt
, vha
, 0xe061,
4404 "qla_target(%d): NOTIFY_ACK "
4405 "failed %x\n", vha
->vp_idx
,
4406 le16_to_cpu(entry
->u
.isp2x
.status
));
4409 ql_dbg(ql_dbg_tgt
, vha
, 0xe062,
4410 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
4415 case ABTS_RECV_24XX
:
4416 ql_dbg(ql_dbg_tgt
, vha
, 0xe037,
4417 "ABTS_RECV_24XX: instance %d\n", vha
->vp_idx
);
4418 qlt_24xx_handle_abts(vha
, (struct abts_recv_from_24xx
*)pkt
);
4421 case ABTS_RESP_24XX
:
4422 if (tgt
->abts_resp_expected
> 0) {
4423 struct abts_resp_from_24xx_fw
*entry
=
4424 (struct abts_resp_from_24xx_fw
*)pkt
;
4425 ql_dbg(ql_dbg_tgt
, vha
, 0xe038,
4426 "ABTS_RESP_24XX: compl_status %x\n",
4427 entry
->compl_status
);
4428 tgt
->abts_resp_expected
--;
4429 if (le16_to_cpu(entry
->compl_status
) !=
4430 ABTS_RESP_COMPL_SUCCESS
) {
4431 if ((entry
->error_subcode1
== 0x1E) &&
4432 (entry
->error_subcode2
== 0)) {
4434 * We've got a race here: aborted
4435 * exchange not terminated, i.e.
4436 * response for the aborted command was
4437 * sent between the abort request was
4438 * received and processed.
4439 * Unfortunately, the firmware has a
4440 * silly requirement that all aborted
4441 * exchanges must be explicitely
4442 * terminated, otherwise it refuses to
4443 * send responses for the abort
4444 * requests. So, we have to
4445 * (re)terminate the exchange and retry
4446 * the abort response.
4448 qlt_24xx_retry_term_exchange(vha
,
4451 ql_dbg(ql_dbg_tgt
, vha
, 0xe063,
4452 "qla_target(%d): ABTS_RESP_24XX "
4453 "failed %x (subcode %x:%x)",
4454 vha
->vp_idx
, entry
->compl_status
,
4455 entry
->error_subcode1
,
4456 entry
->error_subcode2
);
4459 ql_dbg(ql_dbg_tgt
, vha
, 0xe064,
4460 "qla_target(%d): Unexpected ABTS_RESP_24XX "
4461 "received\n", vha
->vp_idx
);
4466 ql_dbg(ql_dbg_tgt
, vha
, 0xe065,
4467 "qla_target(%d): Received unknown response pkt "
4468 "type %x\n", vha
->vp_idx
, pkt
->entry_type
);
4472 tgt
->irq_cmd_count
--;
4476 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4478 void qlt_async_event(uint16_t code
, struct scsi_qla_host
*vha
,
4481 struct qla_hw_data
*ha
= vha
->hw
;
4482 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4485 ql_dbg(ql_dbg_tgt
, vha
, 0xe039,
4486 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
4487 vha
->host_no
, atomic_read(&vha
->loop_state
), vha
->flags
.init_done
,
4488 ha
->operating_mode
, ha
->current_topology
);
4490 if (!ha
->tgt
.tgt_ops
)
4493 if (unlikely(tgt
== NULL
)) {
4494 ql_dbg(ql_dbg_tgt
, vha
, 0xe03a,
4495 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code
, ha
);
4499 if (((code
== MBA_POINT_TO_POINT
) || (code
== MBA_CHG_IN_CONNECTION
)) &&
4503 * In tgt_stop mode we also should allow all requests to pass.
4504 * Otherwise, some commands can stuck.
4507 tgt
->irq_cmd_count
++;
4510 case MBA_RESET
: /* Reset */
4511 case MBA_SYSTEM_ERR
: /* System Error */
4512 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
4513 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
4514 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03a,
4515 "qla_target(%d): System error async event %#x "
4516 "occurred", vha
->vp_idx
, code
);
4518 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up. */
4519 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
4524 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03b,
4525 "qla_target(%d): Async LOOP_UP occurred "
4526 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
,
4527 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4528 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4529 if (tgt
->link_reinit_iocb_pending
) {
4530 qlt_send_notify_ack(vha
, (void *)&tgt
->link_reinit_iocb
,
4532 tgt
->link_reinit_iocb_pending
= 0;
4537 case MBA_LIP_OCCURRED
:
4540 case MBA_RSCN_UPDATE
:
4541 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03c,
4542 "qla_target(%d): Async event %#x occurred "
4543 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
4544 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4545 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4548 case MBA_PORT_UPDATE
:
4549 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03d,
4550 "qla_target(%d): Port update async event %#x "
4551 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
4552 "m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
4553 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4554 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4556 login_code
= le16_to_cpu(mailbox
[2]);
4557 if (login_code
== 0x4)
4558 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03e,
4559 "Async MB 2: Got PLOGI Complete\n");
4560 else if (login_code
== 0x7)
4561 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03f,
4562 "Async MB 2: Port Logged Out\n");
4566 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf040,
4567 "qla_target(%d): Async event %#x occurred: "
4568 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
,
4569 code
, le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4570 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4574 tgt
->irq_cmd_count
--;
4577 static fc_port_t
*qlt_get_port_database(struct scsi_qla_host
*vha
,
4583 fcport
= kzalloc(sizeof(*fcport
), GFP_KERNEL
);
4585 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06f,
4586 "qla_target(%d): Allocation of tmp FC port failed",
4591 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf041, "loop_id %d", loop_id
);
4593 fcport
->loop_id
= loop_id
;
4595 rc
= qla2x00_get_port_database(vha
, fcport
, 0);
4596 if (rc
!= QLA_SUCCESS
) {
4597 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf070,
4598 "qla_target(%d): Failed to retrieve fcport "
4599 "information -- get_port_database() returned %x "
4600 "(loop_id=0x%04x)", vha
->vp_idx
, rc
, loop_id
);
4608 /* Must be called under tgt_mutex */
4609 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*vha
,
4612 struct qla_tgt_sess
*sess
= NULL
;
4613 fc_port_t
*fcport
= NULL
;
4614 int rc
, global_resets
;
4615 uint16_t loop_id
= 0;
4619 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
4621 rc
= qla24xx_get_loop_id(vha
, s_id
, &loop_id
);
4623 if ((s_id
[0] == 0xFF) &&
4624 (s_id
[1] == 0xFC)) {
4626 * This is Domain Controller, so it should be
4627 * OK to drop SCSI commands from it.
4629 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf042,
4630 "Unable to find initiator with S_ID %x:%x:%x",
4631 s_id
[0], s_id
[1], s_id
[2]);
4633 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf071,
4634 "qla_target(%d): Unable to find "
4635 "initiator with S_ID %x:%x:%x",
4636 vha
->vp_idx
, s_id
[0], s_id
[1],
4641 fcport
= qlt_get_port_database(vha
, loop_id
);
4645 if (global_resets
!=
4646 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
)) {
4647 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf043,
4648 "qla_target(%d): global reset during session discovery "
4649 "(counter was %d, new %d), retrying", vha
->vp_idx
,
4651 atomic_read(&vha
->vha_tgt
.
4652 qla_tgt
->tgt_global_resets_count
));
4656 sess
= qlt_create_sess(vha
, fcport
, true);
4662 static void qlt_abort_work(struct qla_tgt
*tgt
,
4663 struct qla_tgt_sess_work_param
*prm
)
4665 struct scsi_qla_host
*vha
= tgt
->vha
;
4666 struct qla_hw_data
*ha
= vha
->hw
;
4667 struct qla_tgt_sess
*sess
= NULL
;
4668 unsigned long flags
;
4673 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4678 s_id
[0] = prm
->abts
.fcp_hdr_le
.s_id
[2];
4679 s_id
[1] = prm
->abts
.fcp_hdr_le
.s_id
[1];
4680 s_id
[2] = prm
->abts
.fcp_hdr_le
.s_id
[0];
4682 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4683 (unsigned char *)&be_s_id
);
4685 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4687 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
4688 sess
= qlt_make_local_sess(vha
, s_id
);
4689 /* sess has got an extra creation ref */
4690 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
4692 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4696 kref_get(&sess
->se_sess
->sess_kref
);
4702 rc
= __qlt_24xx_handle_abts(vha
, &prm
->abts
, sess
);
4706 ha
->tgt
.tgt_ops
->put_sess(sess
);
4707 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4711 qlt_24xx_send_abts_resp(vha
, &prm
->abts
, FCP_TMF_REJECTED
, false);
4713 ha
->tgt
.tgt_ops
->put_sess(sess
);
4714 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4717 static void qlt_tmr_work(struct qla_tgt
*tgt
,
4718 struct qla_tgt_sess_work_param
*prm
)
4720 struct atio_from_isp
*a
= &prm
->tm_iocb2
;
4721 struct scsi_qla_host
*vha
= tgt
->vha
;
4722 struct qla_hw_data
*ha
= vha
->hw
;
4723 struct qla_tgt_sess
*sess
= NULL
;
4724 unsigned long flags
;
4725 uint8_t *s_id
= NULL
; /* to hide compiler warnings */
4727 uint32_t lun
, unpacked_lun
;
4731 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4736 s_id
= prm
->tm_iocb2
.u
.isp24
.fcp_hdr
.s_id
;
4737 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
4739 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4741 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
4742 sess
= qlt_make_local_sess(vha
, s_id
);
4743 /* sess has got an extra creation ref */
4744 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
4746 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4750 kref_get(&sess
->se_sess
->sess_kref
);
4754 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
4755 lun_size
= sizeof(lun
);
4756 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
4757 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
4759 rc
= qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
4763 ha
->tgt
.tgt_ops
->put_sess(sess
);
4764 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4768 qlt_send_term_exchange(vha
, NULL
, &prm
->tm_iocb2
, 1);
4770 ha
->tgt
.tgt_ops
->put_sess(sess
);
4771 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4774 static void qlt_sess_work_fn(struct work_struct
*work
)
4776 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, sess_work
);
4777 struct scsi_qla_host
*vha
= tgt
->vha
;
4778 unsigned long flags
;
4780 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf000, "Sess work (tgt %p)", tgt
);
4782 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
4783 while (!list_empty(&tgt
->sess_works_list
)) {
4784 struct qla_tgt_sess_work_param
*prm
= list_entry(
4785 tgt
->sess_works_list
.next
, typeof(*prm
),
4786 sess_works_list_entry
);
4789 * This work can be scheduled on several CPUs at time, so we
4790 * must delete the entry to eliminate double processing
4792 list_del(&prm
->sess_works_list_entry
);
4794 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
4796 switch (prm
->type
) {
4797 case QLA_TGT_SESS_WORK_ABORT
:
4798 qlt_abort_work(tgt
, prm
);
4800 case QLA_TGT_SESS_WORK_TM
:
4801 qlt_tmr_work(tgt
, prm
);
4808 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
4812 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
4815 /* Must be called under tgt_host_action_mutex */
4816 int qlt_add_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*base_vha
)
4818 struct qla_tgt
*tgt
;
4820 if (!QLA_TGT_MODE_ENABLED())
4823 if (!IS_TGT_MODE_CAPABLE(ha
)) {
4824 ql_log(ql_log_warn
, base_vha
, 0xe070,
4825 "This adapter does not support target mode.\n");
4829 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe03b,
4830 "Registering target for host %ld(%p).\n", base_vha
->host_no
, ha
);
4832 BUG_ON(base_vha
->vha_tgt
.qla_tgt
!= NULL
);
4834 tgt
= kzalloc(sizeof(struct qla_tgt
), GFP_KERNEL
);
4836 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe066,
4837 "Unable to allocate struct qla_tgt\n");
4841 if (!(base_vha
->host
->hostt
->supported_mode
& MODE_TARGET
))
4842 base_vha
->host
->hostt
->supported_mode
|= MODE_TARGET
;
4845 tgt
->vha
= base_vha
;
4846 init_waitqueue_head(&tgt
->waitQ
);
4847 INIT_LIST_HEAD(&tgt
->sess_list
);
4848 INIT_LIST_HEAD(&tgt
->del_sess_list
);
4849 INIT_DELAYED_WORK(&tgt
->sess_del_work
,
4850 (void (*)(struct work_struct
*))qlt_del_sess_work_fn
);
4851 spin_lock_init(&tgt
->sess_work_lock
);
4852 INIT_WORK(&tgt
->sess_work
, qlt_sess_work_fn
);
4853 INIT_LIST_HEAD(&tgt
->sess_works_list
);
4854 spin_lock_init(&tgt
->srr_lock
);
4855 INIT_LIST_HEAD(&tgt
->srr_ctio_list
);
4856 INIT_LIST_HEAD(&tgt
->srr_imm_list
);
4857 INIT_WORK(&tgt
->srr_work
, qlt_handle_srr_work
);
4858 atomic_set(&tgt
->tgt_global_resets_count
, 0);
4860 base_vha
->vha_tgt
.qla_tgt
= tgt
;
4862 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe067,
4863 "qla_target(%d): using 64 Bit PCI addressing",
4865 tgt
->tgt_enable_64bit_addr
= 1;
4867 tgt
->sg_tablesize
= QLA_TGT_MAX_SG_24XX(base_vha
->req
->length
- 3);
4868 tgt
->datasegs_per_cmd
= QLA_TGT_DATASEGS_PER_CMD_24XX
;
4869 tgt
->datasegs_per_cont
= QLA_TGT_DATASEGS_PER_CONT_24XX
;
4871 if (base_vha
->fc_vport
)
4874 mutex_lock(&qla_tgt_mutex
);
4875 list_add_tail(&tgt
->tgt_list_entry
, &qla_tgt_glist
);
4876 mutex_unlock(&qla_tgt_mutex
);
4881 /* Must be called under tgt_host_action_mutex */
4882 int qlt_remove_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*vha
)
4884 if (!vha
->vha_tgt
.qla_tgt
)
4887 if (vha
->fc_vport
) {
4888 qlt_release(vha
->vha_tgt
.qla_tgt
);
4891 mutex_lock(&qla_tgt_mutex
);
4892 list_del(&vha
->vha_tgt
.qla_tgt
->tgt_list_entry
);
4893 mutex_unlock(&qla_tgt_mutex
);
4895 ql_dbg(ql_dbg_tgt
, vha
, 0xe03c, "Unregistering target for host %ld(%p)",
4897 qlt_release(vha
->vha_tgt
.qla_tgt
);
4902 static void qlt_lport_dump(struct scsi_qla_host
*vha
, u64 wwpn
,
4907 pr_debug("qla2xxx HW vha->node_name: ");
4908 for (i
= 0; i
< WWN_SIZE
; i
++)
4909 pr_debug("%02x ", vha
->node_name
[i
]);
4911 pr_debug("qla2xxx HW vha->port_name: ");
4912 for (i
= 0; i
< WWN_SIZE
; i
++)
4913 pr_debug("%02x ", vha
->port_name
[i
]);
4916 pr_debug("qla2xxx passed configfs WWPN: ");
4917 put_unaligned_be64(wwpn
, b
);
4918 for (i
= 0; i
< WWN_SIZE
; i
++)
4919 pr_debug("%02x ", b
[i
]);
4924 * qla_tgt_lport_register - register lport with external module
4926 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
4927 * @wwpn: Passwd FC target WWPN
4928 * @callback: lport initialization callback for tcm_qla2xxx code
4929 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
4931 int qlt_lport_register(void *target_lport_ptr
, u64 phys_wwpn
,
4932 u64 npiv_wwpn
, u64 npiv_wwnn
,
4933 int (*callback
)(struct scsi_qla_host
*, void *, u64
, u64
))
4935 struct qla_tgt
*tgt
;
4936 struct scsi_qla_host
*vha
;
4937 struct qla_hw_data
*ha
;
4938 struct Scsi_Host
*host
;
4939 unsigned long flags
;
4943 mutex_lock(&qla_tgt_mutex
);
4944 list_for_each_entry(tgt
, &qla_tgt_glist
, tgt_list_entry
) {
4952 if (!(host
->hostt
->supported_mode
& MODE_TARGET
))
4955 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4956 if ((!npiv_wwpn
|| !npiv_wwnn
) && host
->active_mode
& MODE_TARGET
) {
4957 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4959 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4962 if (tgt
->tgt_stop
) {
4963 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
4965 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4968 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4970 if (!scsi_host_get(host
)) {
4971 ql_dbg(ql_dbg_tgt
, vha
, 0xe068,
4972 "Unable to scsi_host_get() for"
4973 " qla2xxx scsi_host\n");
4976 qlt_lport_dump(vha
, phys_wwpn
, b
);
4978 if (memcmp(vha
->port_name
, b
, WWN_SIZE
)) {
4979 scsi_host_put(host
);
4982 rc
= (*callback
)(vha
, target_lport_ptr
, npiv_wwpn
, npiv_wwnn
);
4984 scsi_host_put(host
);
4986 mutex_unlock(&qla_tgt_mutex
);
4989 mutex_unlock(&qla_tgt_mutex
);
4993 EXPORT_SYMBOL(qlt_lport_register
);
4996 * qla_tgt_lport_deregister - Degister lport
4998 * @vha: Registered scsi_qla_host pointer
5000 void qlt_lport_deregister(struct scsi_qla_host
*vha
)
5002 struct qla_hw_data
*ha
= vha
->hw
;
5003 struct Scsi_Host
*sh
= vha
->host
;
5005 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
5007 vha
->vha_tgt
.target_lport_ptr
= NULL
;
5008 ha
->tgt
.tgt_ops
= NULL
;
5010 * Release the Scsi_Host reference for the underlying qla2xxx host
5014 EXPORT_SYMBOL(qlt_lport_deregister
);
5016 /* Must be called under HW lock */
5017 void qlt_set_mode(struct scsi_qla_host
*vha
)
5019 struct qla_hw_data
*ha
= vha
->hw
;
5021 switch (ql2x_ini_mode
) {
5022 case QLA2XXX_INI_MODE_DISABLED
:
5023 case QLA2XXX_INI_MODE_EXCLUSIVE
:
5024 vha
->host
->active_mode
= MODE_TARGET
;
5026 case QLA2XXX_INI_MODE_ENABLED
:
5027 vha
->host
->active_mode
|= MODE_TARGET
;
5033 if (ha
->tgt
.ini_mode_force_reverse
)
5034 qla_reverse_ini_mode(vha
);
5037 /* Must be called under HW lock */
5038 void qlt_clear_mode(struct scsi_qla_host
*vha
)
5040 struct qla_hw_data
*ha
= vha
->hw
;
5042 switch (ql2x_ini_mode
) {
5043 case QLA2XXX_INI_MODE_DISABLED
:
5044 vha
->host
->active_mode
= MODE_UNKNOWN
;
5046 case QLA2XXX_INI_MODE_EXCLUSIVE
:
5047 vha
->host
->active_mode
= MODE_INITIATOR
;
5049 case QLA2XXX_INI_MODE_ENABLED
:
5050 vha
->host
->active_mode
&= ~MODE_TARGET
;
5056 if (ha
->tgt
.ini_mode_force_reverse
)
5057 qla_reverse_ini_mode(vha
);
5061 * qla_tgt_enable_vha - NO LOCK HELD
5063 * host_reset, bring up w/ Target Mode Enabled
5066 qlt_enable_vha(struct scsi_qla_host
*vha
)
5068 struct qla_hw_data
*ha
= vha
->hw
;
5069 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5070 unsigned long flags
;
5071 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
5074 ql_dbg(ql_dbg_tgt
, vha
, 0xe069,
5075 "Unable to locate qla_tgt pointer from"
5076 " struct qla_hw_data\n");
5081 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5082 tgt
->tgt_stopped
= 0;
5084 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5087 qla24xx_disable_vp(vha
);
5088 qla24xx_enable_vp(vha
);
5090 set_bit(ISP_ABORT_NEEDED
, &base_vha
->dpc_flags
);
5091 qla2xxx_wake_dpc(base_vha
);
5092 qla2x00_wait_for_hba_online(base_vha
);
5095 EXPORT_SYMBOL(qlt_enable_vha
);
5098 * qla_tgt_disable_vha - NO LOCK HELD
5100 * Disable Target Mode and reset the adapter
5103 qlt_disable_vha(struct scsi_qla_host
*vha
)
5105 struct qla_hw_data
*ha
= vha
->hw
;
5106 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5107 unsigned long flags
;
5110 ql_dbg(ql_dbg_tgt
, vha
, 0xe06a,
5111 "Unable to locate qla_tgt pointer from"
5112 " struct qla_hw_data\n");
5117 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5118 qlt_clear_mode(vha
);
5119 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5121 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
5122 qla2xxx_wake_dpc(vha
);
5123 qla2x00_wait_for_hba_online(vha
);
5127 * Called from qla_init.c:qla24xx_vport_create() contex to setup
5128 * the target mode specific struct scsi_qla_host and struct qla_hw_data
5132 qlt_vport_create(struct scsi_qla_host
*vha
, struct qla_hw_data
*ha
)
5134 if (!qla_tgt_mode_enabled(vha
))
5137 vha
->vha_tgt
.qla_tgt
= NULL
;
5139 mutex_init(&vha
->vha_tgt
.tgt_mutex
);
5140 mutex_init(&vha
->vha_tgt
.tgt_host_action_mutex
);
5142 qlt_clear_mode(vha
);
5145 * NOTE: Currently the value is kept the same for <24xx and
5146 * >=24xx ISPs. If it is necessary to change it,
5147 * the check should be added for specific ISPs,
5148 * assigning the value appropriately.
5150 ha
->tgt
.atio_q_length
= ATIO_ENTRY_CNT_24XX
;
5152 qlt_add_target(ha
, vha
);
5156 qlt_rff_id(struct scsi_qla_host
*vha
, struct ct_sns_req
*ct_req
)
5159 * FC-4 Feature bit 0 indicates target functionality to the name server.
5161 if (qla_tgt_mode_enabled(vha
)) {
5162 if (qla_ini_mode_enabled(vha
))
5163 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
| BIT_1
;
5165 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
;
5166 } else if (qla_ini_mode_enabled(vha
)) {
5167 ct_req
->req
.rff_id
.fc4_feature
= BIT_1
;
5172 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
5175 * Beginning of ATIO ring has initialization control block already built
5176 * by nvram config routine.
5178 * Returns 0 on success.
5181 qlt_init_atio_q_entries(struct scsi_qla_host
*vha
)
5183 struct qla_hw_data
*ha
= vha
->hw
;
5185 struct atio_from_isp
*pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring
;
5187 if (!qla_tgt_mode_enabled(vha
))
5190 for (cnt
= 0; cnt
< ha
->tgt
.atio_q_length
; cnt
++) {
5191 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
5198 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
5199 * @ha: SCSI driver HA context
5202 qlt_24xx_process_atio_queue(struct scsi_qla_host
*vha
)
5204 struct qla_hw_data
*ha
= vha
->hw
;
5205 struct atio_from_isp
*pkt
;
5208 if (!vha
->flags
.online
)
5211 while (ha
->tgt
.atio_ring_ptr
->signature
!= ATIO_PROCESSED
) {
5212 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
5213 cnt
= pkt
->u
.raw
.entry_count
;
5215 qlt_24xx_atio_pkt_all_vps(vha
, (struct atio_from_isp
*)pkt
);
5217 for (i
= 0; i
< cnt
; i
++) {
5218 ha
->tgt
.atio_ring_index
++;
5219 if (ha
->tgt
.atio_ring_index
== ha
->tgt
.atio_q_length
) {
5220 ha
->tgt
.atio_ring_index
= 0;
5221 ha
->tgt
.atio_ring_ptr
= ha
->tgt
.atio_ring
;
5223 ha
->tgt
.atio_ring_ptr
++;
5225 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
5226 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
5231 /* Adjust ring index */
5232 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), ha
->tgt
.atio_ring_index
);
5236 qlt_24xx_config_rings(struct scsi_qla_host
*vha
)
5238 struct qla_hw_data
*ha
= vha
->hw
;
5239 if (!QLA_TGT_MODE_ENABLED())
5242 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha
), 0);
5243 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), 0);
5244 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha
));
5246 if (IS_ATIO_MSIX_CAPABLE(ha
)) {
5247 struct qla_msix_entry
*msix
= &ha
->msix_entries
[2];
5248 struct init_cb_24xx
*icb
= (struct init_cb_24xx
*)ha
->init_cb
;
5250 icb
->msix_atio
= cpu_to_le16(msix
->entry
);
5251 ql_dbg(ql_dbg_init
, vha
, 0xf072,
5252 "Registering ICB vector 0x%x for atio que.\n",
5258 qlt_24xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_24xx
*nv
)
5260 struct qla_hw_data
*ha
= vha
->hw
;
5262 if (qla_tgt_mode_enabled(vha
)) {
5263 if (!ha
->tgt
.saved_set
) {
5264 /* We save only once */
5265 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
5266 ha
->tgt
.saved_firmware_options_1
=
5267 nv
->firmware_options_1
;
5268 ha
->tgt
.saved_firmware_options_2
=
5269 nv
->firmware_options_2
;
5270 ha
->tgt
.saved_firmware_options_3
=
5271 nv
->firmware_options_3
;
5272 ha
->tgt
.saved_set
= 1;
5275 nv
->exchange_count
= __constant_cpu_to_le16(0xFFFF);
5277 /* Enable target mode */
5278 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_4
);
5280 /* Disable ini mode, if requested */
5281 if (!qla_ini_mode_enabled(vha
))
5282 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_5
);
5284 /* Disable Full Login after LIP */
5285 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_13
);
5286 /* Enable initial LIP */
5287 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_9
);
5288 /* Enable FC tapes support */
5289 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_12
);
5290 /* Disable Full Login after LIP */
5291 nv
->host_p
&= __constant_cpu_to_le32(~BIT_10
);
5292 /* Enable target PRLI control */
5293 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_14
);
5295 if (ha
->tgt
.saved_set
) {
5296 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
5297 nv
->firmware_options_1
=
5298 ha
->tgt
.saved_firmware_options_1
;
5299 nv
->firmware_options_2
=
5300 ha
->tgt
.saved_firmware_options_2
;
5301 nv
->firmware_options_3
=
5302 ha
->tgt
.saved_firmware_options_3
;
5307 /* out-of-order frames reassembly */
5308 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
5310 if (ha
->tgt
.enable_class_2
) {
5311 if (vha
->flags
.init_done
)
5312 fc_host_supported_classes(vha
->host
) =
5313 FC_COS_CLASS2
| FC_COS_CLASS3
;
5315 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_8
);
5317 if (vha
->flags
.init_done
)
5318 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
5320 nv
->firmware_options_2
&= ~__constant_cpu_to_le32(BIT_8
);
5325 qlt_24xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
5326 struct init_cb_24xx
*icb
)
5328 struct qla_hw_data
*ha
= vha
->hw
;
5330 if (ha
->tgt
.node_name_set
) {
5331 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
5332 icb
->firmware_options_1
|= __constant_cpu_to_le32(BIT_14
);
5337 qlt_81xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_81xx
*nv
)
5339 struct qla_hw_data
*ha
= vha
->hw
;
5341 if (!QLA_TGT_MODE_ENABLED())
5344 if (qla_tgt_mode_enabled(vha
)) {
5345 if (!ha
->tgt
.saved_set
) {
5346 /* We save only once */
5347 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
5348 ha
->tgt
.saved_firmware_options_1
=
5349 nv
->firmware_options_1
;
5350 ha
->tgt
.saved_firmware_options_2
=
5351 nv
->firmware_options_2
;
5352 ha
->tgt
.saved_firmware_options_3
=
5353 nv
->firmware_options_3
;
5354 ha
->tgt
.saved_set
= 1;
5357 nv
->exchange_count
= __constant_cpu_to_le16(0xFFFF);
5359 /* Enable target mode */
5360 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_4
);
5362 /* Disable ini mode, if requested */
5363 if (!qla_ini_mode_enabled(vha
))
5364 nv
->firmware_options_1
|=
5365 __constant_cpu_to_le32(BIT_5
);
5367 /* Disable Full Login after LIP */
5368 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_13
);
5369 /* Enable initial LIP */
5370 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_9
);
5371 /* Enable FC tapes support */
5372 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_12
);
5373 /* Disable Full Login after LIP */
5374 nv
->host_p
&= __constant_cpu_to_le32(~BIT_10
);
5375 /* Enable target PRLI control */
5376 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_14
);
5378 if (ha
->tgt
.saved_set
) {
5379 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
5380 nv
->firmware_options_1
=
5381 ha
->tgt
.saved_firmware_options_1
;
5382 nv
->firmware_options_2
=
5383 ha
->tgt
.saved_firmware_options_2
;
5384 nv
->firmware_options_3
=
5385 ha
->tgt
.saved_firmware_options_3
;
5390 /* out-of-order frames reassembly */
5391 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
5393 if (ha
->tgt
.enable_class_2
) {
5394 if (vha
->flags
.init_done
)
5395 fc_host_supported_classes(vha
->host
) =
5396 FC_COS_CLASS2
| FC_COS_CLASS3
;
5398 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_8
);
5400 if (vha
->flags
.init_done
)
5401 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
5403 nv
->firmware_options_2
&= ~__constant_cpu_to_le32(BIT_8
);
5408 qlt_81xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
5409 struct init_cb_81xx
*icb
)
5411 struct qla_hw_data
*ha
= vha
->hw
;
5413 if (!QLA_TGT_MODE_ENABLED())
5416 if (ha
->tgt
.node_name_set
) {
5417 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
5418 icb
->firmware_options_1
|= __constant_cpu_to_le32(BIT_14
);
5423 qlt_83xx_iospace_config(struct qla_hw_data
*ha
)
5425 if (!QLA_TGT_MODE_ENABLED())
5428 ha
->msix_count
+= 1; /* For ATIO Q */
5432 qlt_24xx_process_response_error(struct scsi_qla_host
*vha
,
5433 struct sts_entry_24xx
*pkt
)
5435 switch (pkt
->entry_type
) {
5436 case ABTS_RECV_24XX
:
5437 case ABTS_RESP_24XX
:
5439 case NOTIFY_ACK_TYPE
:
5448 qlt_modify_vp_config(struct scsi_qla_host
*vha
,
5449 struct vp_config_entry_24xx
*vpmod
)
5451 if (qla_tgt_mode_enabled(vha
))
5452 vpmod
->options_idx1
&= ~BIT_5
;
5453 /* Disable ini mode, if requested */
5454 if (!qla_ini_mode_enabled(vha
))
5455 vpmod
->options_idx1
&= ~BIT_4
;
5459 qlt_probe_one_stage1(struct scsi_qla_host
*base_vha
, struct qla_hw_data
*ha
)
5461 if (!QLA_TGT_MODE_ENABLED())
5464 if (ha
->mqenable
|| IS_QLA83XX(ha
)) {
5465 ISP_ATIO_Q_IN(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_in
;
5466 ISP_ATIO_Q_OUT(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_out
;
5468 ISP_ATIO_Q_IN(base_vha
) = &ha
->iobase
->isp24
.atio_q_in
;
5469 ISP_ATIO_Q_OUT(base_vha
) = &ha
->iobase
->isp24
.atio_q_out
;
5472 mutex_init(&base_vha
->vha_tgt
.tgt_mutex
);
5473 mutex_init(&base_vha
->vha_tgt
.tgt_host_action_mutex
);
5474 qlt_clear_mode(base_vha
);
5478 qla83xx_msix_atio_q(int irq
, void *dev_id
)
5480 struct rsp_que
*rsp
;
5481 scsi_qla_host_t
*vha
;
5482 struct qla_hw_data
*ha
;
5483 unsigned long flags
;
5485 rsp
= (struct rsp_que
*) dev_id
;
5487 vha
= pci_get_drvdata(ha
->pdev
);
5489 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5491 qlt_24xx_process_atio_queue(vha
);
5492 qla24xx_process_response_queue(vha
, rsp
);
5494 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5500 qlt_mem_alloc(struct qla_hw_data
*ha
)
5502 if (!QLA_TGT_MODE_ENABLED())
5505 ha
->tgt
.tgt_vp_map
= kzalloc(sizeof(struct qla_tgt_vp_map
) *
5506 MAX_MULTI_ID_FABRIC
, GFP_KERNEL
);
5507 if (!ha
->tgt
.tgt_vp_map
)
5510 ha
->tgt
.atio_ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
5511 (ha
->tgt
.atio_q_length
+ 1) * sizeof(struct atio_from_isp
),
5512 &ha
->tgt
.atio_dma
, GFP_KERNEL
);
5513 if (!ha
->tgt
.atio_ring
) {
5514 kfree(ha
->tgt
.tgt_vp_map
);
5521 qlt_mem_free(struct qla_hw_data
*ha
)
5523 if (!QLA_TGT_MODE_ENABLED())
5526 if (ha
->tgt
.atio_ring
) {
5527 dma_free_coherent(&ha
->pdev
->dev
, (ha
->tgt
.atio_q_length
+ 1) *
5528 sizeof(struct atio_from_isp
), ha
->tgt
.atio_ring
,
5531 kfree(ha
->tgt
.tgt_vp_map
);
5534 /* vport_slock to be held by the caller */
5536 qlt_update_vp_map(struct scsi_qla_host
*vha
, int cmd
)
5538 if (!QLA_TGT_MODE_ENABLED())
5543 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= vha
;
5546 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= vha
->vp_idx
;
5549 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= NULL
;
5552 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= 0;
5557 static int __init
qlt_parse_ini_mode(void)
5559 if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_EXCLUSIVE
) == 0)
5560 ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
5561 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DISABLED
) == 0)
5562 ql2x_ini_mode
= QLA2XXX_INI_MODE_DISABLED
;
5563 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_ENABLED
) == 0)
5564 ql2x_ini_mode
= QLA2XXX_INI_MODE_ENABLED
;
5571 int __init
qlt_init(void)
5575 if (!qlt_parse_ini_mode()) {
5576 ql_log(ql_log_fatal
, NULL
, 0xe06b,
5577 "qlt_parse_ini_mode() failed\n");
5581 if (!QLA_TGT_MODE_ENABLED())
5584 qla_tgt_mgmt_cmd_cachep
= kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
5585 sizeof(struct qla_tgt_mgmt_cmd
), __alignof__(struct
5586 qla_tgt_mgmt_cmd
), 0, NULL
);
5587 if (!qla_tgt_mgmt_cmd_cachep
) {
5588 ql_log(ql_log_fatal
, NULL
, 0xe06d,
5589 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
5593 qla_tgt_mgmt_cmd_mempool
= mempool_create(25, mempool_alloc_slab
,
5594 mempool_free_slab
, qla_tgt_mgmt_cmd_cachep
);
5595 if (!qla_tgt_mgmt_cmd_mempool
) {
5596 ql_log(ql_log_fatal
, NULL
, 0xe06e,
5597 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
5599 goto out_mgmt_cmd_cachep
;
5602 qla_tgt_wq
= alloc_workqueue("qla_tgt_wq", 0, 0);
5604 ql_log(ql_log_fatal
, NULL
, 0xe06f,
5605 "alloc_workqueue for qla_tgt_wq failed\n");
5607 goto out_cmd_mempool
;
5610 * Return 1 to signal that initiator-mode is being disabled
5612 return (ql2x_ini_mode
== QLA2XXX_INI_MODE_DISABLED
) ? 1 : 0;
5615 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
5616 out_mgmt_cmd_cachep
:
5617 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
5623 if (!QLA_TGT_MODE_ENABLED())
5626 destroy_workqueue(qla_tgt_wq
);
5627 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
5628 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);