]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/scsi/qla2xxx/qla_target.c
scsi: qla2xxx: Drop superfluous INIT_WORK of del_work
[mirror_ubuntu-hirsute-kernel.git] / drivers / scsi / qla2xxx / qla_target.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 *
5 * based on qla2x00t.c code:
6 *
7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
8 * Copyright (C) 2004 - 2005 Leonid Stoljar
9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
10 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 *
12 * Forward port and refactoring to modern qla2xxx and target/configfs
13 *
14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 */
16
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/blkdev.h>
21 #include <linux/interrupt.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/list.h>
25 #include <linux/workqueue.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <target/target_core_base.h>
31 #include <target/target_core_fabric.h>
32
33 #include "qla_def.h"
34 #include "qla_target.h"
35
36 static int ql2xtgt_tape_enable;
37 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
38 MODULE_PARM_DESC(ql2xtgt_tape_enable,
39 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
40
41 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
42 module_param(qlini_mode, charp, S_IRUGO);
43 MODULE_PARM_DESC(qlini_mode,
44 "Determines when initiator mode will be enabled. Possible values: "
45 "\"exclusive\" - initiator mode will be enabled on load, "
46 "disabled on enabling target mode and then on disabling target mode "
47 "enabled back; "
48 "\"disabled\" - initiator mode will never be enabled; "
49 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
50 "when ready "
51 "\"enabled\" (default) - initiator mode will always stay enabled.");
52
53 static int ql_dm_tgt_ex_pct = 0;
54 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
55 MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
56 "For Dual Mode (qlini_mode=dual), this parameter determines "
57 "the percentage of exchanges/cmds FW will allocate resources "
58 "for Target mode.");
59
60 int ql2xuctrlirq = 1;
61 module_param(ql2xuctrlirq, int, 0644);
62 MODULE_PARM_DESC(ql2xuctrlirq,
63 "User to control IRQ placement via smp_affinity."
64 "Valid with qlini_mode=disabled."
65 "1(default): enable");
66
67 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
68
69 static int qla_sam_status = SAM_STAT_BUSY;
70 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
71
72 /*
73 * From scsi/fc/fc_fcp.h
74 */
75 enum fcp_resp_rsp_codes {
76 FCP_TMF_CMPL = 0,
77 FCP_DATA_LEN_INVALID = 1,
78 FCP_CMND_FIELDS_INVALID = 2,
79 FCP_DATA_PARAM_MISMATCH = 3,
80 FCP_TMF_REJECTED = 4,
81 FCP_TMF_FAILED = 5,
82 FCP_TMF_INVALID_LUN = 9,
83 };
84
85 /*
86 * fc_pri_ta from scsi/fc/fc_fcp.h
87 */
88 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
89 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
90 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
91 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
92 #define FCP_PTA_MASK 7 /* mask for task attribute field */
93 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
94 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
95
96 /*
97 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
98 * must be called under HW lock and could unlock/lock it inside.
99 * It isn't an issue, since in the current implementation on the time when
100 * those functions are called:
101 *
102 * - Either context is IRQ and only IRQ handler can modify HW data,
103 * including rings related fields,
104 *
105 * - Or access to target mode variables from struct qla_tgt doesn't
106 * cross those functions boundaries, except tgt_stop, which
107 * additionally protected by irq_cmd_count.
108 */
109 /* Predefs for callbacks handed to qla2xxx LLD */
110 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
111 struct atio_from_isp *pkt, uint8_t);
112 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
113 response_t *pkt);
114 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
115 int fn, void *iocb, int flags);
116 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
117 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
118 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
119 struct atio_from_isp *atio, uint16_t status, int qfull);
120 static void qlt_disable_vha(struct scsi_qla_host *vha);
121 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
122 static void qlt_send_notify_ack(struct qla_qpair *qpair,
123 struct imm_ntfy_from_isp *ntfy,
124 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
125 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
126 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
127 struct imm_ntfy_from_isp *imm, int ha_locked);
128 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
129 fc_port_t *fcport, bool local);
130 void qlt_unreg_sess(struct fc_port *sess);
131 static void qlt_24xx_handle_abts(struct scsi_qla_host *,
132 struct abts_recv_from_24xx *);
133 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
134 uint16_t);
135 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
136 static inline uint32_t qlt_make_handle(struct qla_qpair *);
137
138 /*
139 * Global Variables
140 */
141 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
142 struct kmem_cache *qla_tgt_plogi_cachep;
143 static mempool_t *qla_tgt_mgmt_cmd_mempool;
144 static struct workqueue_struct *qla_tgt_wq;
145 static DEFINE_MUTEX(qla_tgt_mutex);
146 static LIST_HEAD(qla_tgt_glist);
147
148 static const char *prot_op_str(u32 prot_op)
149 {
150 switch (prot_op) {
151 case TARGET_PROT_NORMAL: return "NORMAL";
152 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
153 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
154 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
155 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
156 case TARGET_PROT_DIN_PASS: return "DIN_PASS";
157 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
158 default: return "UNKNOWN";
159 }
160 }
161
162 /* This API intentionally takes dest as a parameter, rather than returning
163 * int value to avoid caller forgetting to issue wmb() after the store */
164 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
165 {
166 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
167 *dest = atomic_inc_return(&base_vha->generation_tick);
168 /* memory barrier */
169 wmb();
170 }
171
172 /* Might release hw lock, then reaquire!! */
173 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
174 {
175 /* Send marker if required */
176 if (unlikely(vha->marker_needed != 0)) {
177 int rc = qla2x00_issue_marker(vha, vha_locked);
178
179 if (rc != QLA_SUCCESS) {
180 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
181 "qla_target(%d): issue_marker() failed\n",
182 vha->vp_idx);
183 }
184 return rc;
185 }
186 return QLA_SUCCESS;
187 }
188
189 static inline
190 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
191 be_id_t d_id)
192 {
193 struct scsi_qla_host *host;
194 uint32_t key;
195
196 if (vha->d_id.b.area == d_id.area &&
197 vha->d_id.b.domain == d_id.domain &&
198 vha->d_id.b.al_pa == d_id.al_pa)
199 return vha;
200
201 key = be_to_port_id(d_id).b24;
202
203 host = btree_lookup32(&vha->hw->tgt.host_map, key);
204 if (!host)
205 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
206 "Unable to find host %06x\n", key);
207
208 return host;
209 }
210
211 static inline
212 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
213 uint16_t vp_idx)
214 {
215 struct qla_hw_data *ha = vha->hw;
216
217 if (vha->vp_idx == vp_idx)
218 return vha;
219
220 BUG_ON(ha->tgt.tgt_vp_map == NULL);
221 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
222 return ha->tgt.tgt_vp_map[vp_idx].vha;
223
224 return NULL;
225 }
226
227 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
228 {
229 unsigned long flags;
230
231 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
232
233 vha->hw->tgt.num_pend_cmds++;
234 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
235 vha->qla_stats.stat_max_pend_cmds =
236 vha->hw->tgt.num_pend_cmds;
237 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
238 }
239 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
240 {
241 unsigned long flags;
242
243 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
244 vha->hw->tgt.num_pend_cmds--;
245 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
246 }
247
248
249 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
250 struct atio_from_isp *atio, uint8_t ha_locked)
251 {
252 struct qla_tgt_sess_op *u;
253 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
254 unsigned long flags;
255
256 if (tgt->tgt_stop) {
257 ql_dbg(ql_dbg_async, vha, 0x502c,
258 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
259 vha->vp_idx);
260 goto out_term;
261 }
262
263 u = kzalloc(sizeof(*u), GFP_ATOMIC);
264 if (u == NULL)
265 goto out_term;
266
267 u->vha = vha;
268 memcpy(&u->atio, atio, sizeof(*atio));
269 INIT_LIST_HEAD(&u->cmd_list);
270
271 spin_lock_irqsave(&vha->cmd_list_lock, flags);
272 list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
273 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
274
275 schedule_delayed_work(&vha->unknown_atio_work, 1);
276
277 out:
278 return;
279
280 out_term:
281 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
282 goto out;
283 }
284
285 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
286 uint8_t ha_locked)
287 {
288 struct qla_tgt_sess_op *u, *t;
289 scsi_qla_host_t *host;
290 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
291 unsigned long flags;
292 uint8_t queued = 0;
293
294 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
295 if (u->aborted) {
296 ql_dbg(ql_dbg_async, vha, 0x502e,
297 "Freeing unknown %s %p, because of Abort\n",
298 "ATIO_TYPE7", u);
299 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
300 &u->atio, ha_locked, 0);
301 goto abort;
302 }
303
304 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
305 if (host != NULL) {
306 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
307 "Requeuing unknown ATIO_TYPE7 %p\n", u);
308 qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
309 } else if (tgt->tgt_stop) {
310 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
311 "Freeing unknown %s %p, because tgt is being stopped\n",
312 "ATIO_TYPE7", u);
313 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
314 &u->atio, ha_locked, 0);
315 } else {
316 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
317 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
318 if (!queued) {
319 queued = 1;
320 schedule_delayed_work(&vha->unknown_atio_work,
321 1);
322 }
323 continue;
324 }
325
326 abort:
327 spin_lock_irqsave(&vha->cmd_list_lock, flags);
328 list_del(&u->cmd_list);
329 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
330 kfree(u);
331 }
332 }
333
334 void qlt_unknown_atio_work_fn(struct work_struct *work)
335 {
336 struct scsi_qla_host *vha = container_of(to_delayed_work(work),
337 struct scsi_qla_host, unknown_atio_work);
338
339 qlt_try_to_dequeue_unknown_atios(vha, 0);
340 }
341
342 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
343 struct atio_from_isp *atio, uint8_t ha_locked)
344 {
345 ql_dbg(ql_dbg_tgt, vha, 0xe072,
346 "%s: qla_target(%d): type %x ox_id %04x\n",
347 __func__, vha->vp_idx, atio->u.raw.entry_type,
348 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
349
350 switch (atio->u.raw.entry_type) {
351 case ATIO_TYPE7:
352 {
353 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
354 atio->u.isp24.fcp_hdr.d_id);
355 if (unlikely(NULL == host)) {
356 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
357 "qla_target(%d): Received ATIO_TYPE7 "
358 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
359 atio->u.isp24.fcp_hdr.d_id.domain,
360 atio->u.isp24.fcp_hdr.d_id.area,
361 atio->u.isp24.fcp_hdr.d_id.al_pa);
362
363
364 qlt_queue_unknown_atio(vha, atio, ha_locked);
365 break;
366 }
367 if (unlikely(!list_empty(&vha->unknown_atio_list)))
368 qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
369
370 qlt_24xx_atio_pkt(host, atio, ha_locked);
371 break;
372 }
373
374 case IMMED_NOTIFY_TYPE:
375 {
376 struct scsi_qla_host *host = vha;
377 struct imm_ntfy_from_isp *entry =
378 (struct imm_ntfy_from_isp *)atio;
379
380 qlt_issue_marker(vha, ha_locked);
381
382 if ((entry->u.isp24.vp_index != 0xFF) &&
383 (entry->u.isp24.nport_handle != 0xFFFF)) {
384 host = qlt_find_host_by_vp_idx(vha,
385 entry->u.isp24.vp_index);
386 if (unlikely(!host)) {
387 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
388 "qla_target(%d): Received "
389 "ATIO (IMMED_NOTIFY_TYPE) "
390 "with unknown vp_index %d\n",
391 vha->vp_idx, entry->u.isp24.vp_index);
392 break;
393 }
394 }
395 qlt_24xx_atio_pkt(host, atio, ha_locked);
396 break;
397 }
398
399 case VP_RPT_ID_IOCB_TYPE:
400 qla24xx_report_id_acquisition(vha,
401 (struct vp_rpt_id_entry_24xx *)atio);
402 break;
403
404 case ABTS_RECV_24XX:
405 {
406 struct abts_recv_from_24xx *entry =
407 (struct abts_recv_from_24xx *)atio;
408 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
409 entry->vp_index);
410 unsigned long flags;
411
412 if (unlikely(!host)) {
413 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
414 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
415 "received, with unknown vp_index %d\n",
416 vha->vp_idx, entry->vp_index);
417 break;
418 }
419 if (!ha_locked)
420 spin_lock_irqsave(&host->hw->hardware_lock, flags);
421 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
422 if (!ha_locked)
423 spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
424 break;
425 }
426
427 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
428
429 default:
430 ql_dbg(ql_dbg_tgt, vha, 0xe040,
431 "qla_target(%d): Received unknown ATIO atio "
432 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
433 break;
434 }
435
436 return false;
437 }
438
439 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
440 struct rsp_que *rsp, response_t *pkt)
441 {
442 switch (pkt->entry_type) {
443 case CTIO_CRC2:
444 ql_dbg(ql_dbg_tgt, vha, 0xe073,
445 "qla_target(%d):%s: CRC2 Response pkt\n",
446 vha->vp_idx, __func__);
447 /* fall through */
448 case CTIO_TYPE7:
449 {
450 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
451 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
452 entry->vp_index);
453 if (unlikely(!host)) {
454 ql_dbg(ql_dbg_tgt, vha, 0xe041,
455 "qla_target(%d): Response pkt (CTIO_TYPE7) "
456 "received, with unknown vp_index %d\n",
457 vha->vp_idx, entry->vp_index);
458 break;
459 }
460 qlt_response_pkt(host, rsp, pkt);
461 break;
462 }
463
464 case IMMED_NOTIFY_TYPE:
465 {
466 struct scsi_qla_host *host;
467 struct imm_ntfy_from_isp *entry =
468 (struct imm_ntfy_from_isp *)pkt;
469
470 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
471 if (unlikely(!host)) {
472 ql_dbg(ql_dbg_tgt, vha, 0xe042,
473 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
474 "received, with unknown vp_index %d\n",
475 vha->vp_idx, entry->u.isp24.vp_index);
476 break;
477 }
478 qlt_response_pkt(host, rsp, pkt);
479 break;
480 }
481
482 case NOTIFY_ACK_TYPE:
483 {
484 struct scsi_qla_host *host = vha;
485 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
486
487 if (0xFF != entry->u.isp24.vp_index) {
488 host = qlt_find_host_by_vp_idx(vha,
489 entry->u.isp24.vp_index);
490 if (unlikely(!host)) {
491 ql_dbg(ql_dbg_tgt, vha, 0xe043,
492 "qla_target(%d): Response "
493 "pkt (NOTIFY_ACK_TYPE) "
494 "received, with unknown "
495 "vp_index %d\n", vha->vp_idx,
496 entry->u.isp24.vp_index);
497 break;
498 }
499 }
500 qlt_response_pkt(host, rsp, pkt);
501 break;
502 }
503
504 case ABTS_RECV_24XX:
505 {
506 struct abts_recv_from_24xx *entry =
507 (struct abts_recv_from_24xx *)pkt;
508 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
509 entry->vp_index);
510 if (unlikely(!host)) {
511 ql_dbg(ql_dbg_tgt, vha, 0xe044,
512 "qla_target(%d): Response pkt "
513 "(ABTS_RECV_24XX) received, with unknown "
514 "vp_index %d\n", vha->vp_idx, entry->vp_index);
515 break;
516 }
517 qlt_response_pkt(host, rsp, pkt);
518 break;
519 }
520
521 case ABTS_RESP_24XX:
522 {
523 struct abts_resp_to_24xx *entry =
524 (struct abts_resp_to_24xx *)pkt;
525 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
526 entry->vp_index);
527 if (unlikely(!host)) {
528 ql_dbg(ql_dbg_tgt, vha, 0xe045,
529 "qla_target(%d): Response pkt "
530 "(ABTS_RECV_24XX) received, with unknown "
531 "vp_index %d\n", vha->vp_idx, entry->vp_index);
532 break;
533 }
534 qlt_response_pkt(host, rsp, pkt);
535 break;
536 }
537 default:
538 qlt_response_pkt(vha, rsp, pkt);
539 break;
540 }
541
542 }
543
544 /*
545 * All qlt_plogi_ack_t operations are protected by hardware_lock
546 */
547 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
548 struct imm_ntfy_from_isp *ntfy, int type)
549 {
550 struct qla_work_evt *e;
551
552 e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
553 if (!e)
554 return QLA_FUNCTION_FAILED;
555
556 e->u.nack.fcport = fcport;
557 e->u.nack.type = type;
558 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
559 return qla2x00_post_work(vha, e);
560 }
561
562 static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
563 {
564 struct scsi_qla_host *vha = sp->vha;
565 unsigned long flags;
566
567 ql_dbg(ql_dbg_disc, vha, 0x20f2,
568 "Async done-%s res %x %8phC type %d\n",
569 sp->name, res, sp->fcport->port_name, sp->type);
570
571 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
572 sp->fcport->flags &= ~FCF_ASYNC_SENT;
573 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
574
575 switch (sp->type) {
576 case SRB_NACK_PLOGI:
577 sp->fcport->login_gen++;
578 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
579 sp->fcport->logout_on_delete = 1;
580 sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
581 sp->fcport->send_els_logo = 0;
582 break;
583
584 case SRB_NACK_PRLI:
585 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
586 sp->fcport->deleted = 0;
587 sp->fcport->send_els_logo = 0;
588
589 if (!sp->fcport->login_succ &&
590 !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
591 sp->fcport->login_succ = 1;
592
593 vha->fcport_count++;
594 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
595 qla24xx_sched_upd_fcport(sp->fcport);
596 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
597 } else {
598 sp->fcport->login_retry = 0;
599 sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
600 sp->fcport->deleted = 0;
601 sp->fcport->logout_on_delete = 1;
602 }
603 break;
604
605 case SRB_NACK_LOGO:
606 sp->fcport->login_gen++;
607 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
608 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
609 break;
610 }
611 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
612
613 sp->free(sp);
614 }
615
616 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
617 struct imm_ntfy_from_isp *ntfy, int type)
618 {
619 int rval = QLA_FUNCTION_FAILED;
620 srb_t *sp;
621 char *c = NULL;
622
623 fcport->flags |= FCF_ASYNC_SENT;
624 switch (type) {
625 case SRB_NACK_PLOGI:
626 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
627 c = "PLOGI";
628 break;
629 case SRB_NACK_PRLI:
630 fcport->fw_login_state = DSC_LS_PRLI_PEND;
631 fcport->deleted = 0;
632 c = "PRLI";
633 break;
634 case SRB_NACK_LOGO:
635 fcport->fw_login_state = DSC_LS_LOGO_PEND;
636 c = "LOGO";
637 break;
638 }
639
640 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
641 if (!sp)
642 goto done;
643
644 sp->type = type;
645 sp->name = "nack";
646
647 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
648 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
649
650 sp->u.iocb_cmd.u.nack.ntfy = ntfy;
651 sp->done = qla2x00_async_nack_sp_done;
652
653 ql_dbg(ql_dbg_disc, vha, 0x20f4,
654 "Async-%s %8phC hndl %x %s\n",
655 sp->name, fcport->port_name, sp->handle, c);
656
657 rval = qla2x00_start_sp(sp);
658 if (rval != QLA_SUCCESS)
659 goto done_free_sp;
660
661 return rval;
662
663 done_free_sp:
664 sp->free(sp);
665 done:
666 fcport->flags &= ~FCF_ASYNC_SENT;
667 return rval;
668 }
669
670 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
671 {
672 fc_port_t *t;
673
674 switch (e->u.nack.type) {
675 case SRB_NACK_PRLI:
676 t = e->u.nack.fcport;
677 flush_work(&t->del_work);
678 flush_work(&t->free_work);
679 mutex_lock(&vha->vha_tgt.tgt_mutex);
680 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
681 mutex_unlock(&vha->vha_tgt.tgt_mutex);
682 if (t) {
683 ql_log(ql_log_info, vha, 0xd034,
684 "%s create sess success %p", __func__, t);
685 /* create sess has an extra kref */
686 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
687 }
688 break;
689 }
690 qla24xx_async_notify_ack(vha, e->u.nack.fcport,
691 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
692 }
693
694 void qla24xx_delete_sess_fn(struct work_struct *work)
695 {
696 fc_port_t *fcport = container_of(work, struct fc_port, del_work);
697 struct qla_hw_data *ha = fcport->vha->hw;
698
699 if (fcport->se_sess) {
700 ha->tgt.tgt_ops->shutdown_sess(fcport);
701 ha->tgt.tgt_ops->put_sess(fcport);
702 } else {
703 qlt_unreg_sess(fcport);
704 }
705 }
706
707 /*
708 * Called from qla2x00_reg_remote_port()
709 */
710 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
711 {
712 struct qla_hw_data *ha = vha->hw;
713 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
714 struct fc_port *sess = fcport;
715 unsigned long flags;
716
717 if (!vha->hw->tgt.tgt_ops)
718 return;
719
720 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
721 if (tgt->tgt_stop) {
722 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
723 return;
724 }
725
726 if (fcport->disc_state == DSC_DELETE_PEND) {
727 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
728 return;
729 }
730
731 if (!sess->se_sess) {
732 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
733
734 mutex_lock(&vha->vha_tgt.tgt_mutex);
735 sess = qlt_create_sess(vha, fcport, false);
736 mutex_unlock(&vha->vha_tgt.tgt_mutex);
737
738 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
739 } else {
740 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
741 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
742 return;
743 }
744
745 if (!kref_get_unless_zero(&sess->sess_kref)) {
746 ql_dbg(ql_dbg_disc, vha, 0x2107,
747 "%s: kref_get fail sess %8phC \n",
748 __func__, sess->port_name);
749 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
750 return;
751 }
752
753 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
754 "qla_target(%u): %ssession for port %8phC "
755 "(loop ID %d) reappeared\n", vha->vp_idx,
756 sess->local ? "local " : "", sess->port_name, sess->loop_id);
757
758 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
759 "Reappeared sess %p\n", sess);
760
761 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
762 fcport->loop_id,
763 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
764 }
765
766 if (sess && sess->local) {
767 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
768 "qla_target(%u): local session for "
769 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
770 fcport->port_name, sess->loop_id);
771 sess->local = 0;
772 }
773 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
774
775 ha->tgt.tgt_ops->put_sess(sess);
776 }
777
778 /*
779 * This is a zero-base ref-counting solution, since hardware_lock
780 * guarantees that ref_count is not modified concurrently.
781 * Upon successful return content of iocb is undefined
782 */
783 static struct qlt_plogi_ack_t *
784 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
785 struct imm_ntfy_from_isp *iocb)
786 {
787 struct qlt_plogi_ack_t *pla;
788
789 lockdep_assert_held(&vha->hw->hardware_lock);
790
791 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
792 if (pla->id.b24 == id->b24) {
793 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
794 "%s %d %8phC Term INOT due to new INOT",
795 __func__, __LINE__,
796 pla->iocb.u.isp24.port_name);
797 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
798 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
799 return pla;
800 }
801 }
802
803 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
804 if (!pla) {
805 ql_dbg(ql_dbg_async, vha, 0x5088,
806 "qla_target(%d): Allocation of plogi_ack failed\n",
807 vha->vp_idx);
808 return NULL;
809 }
810
811 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
812 pla->id = *id;
813 list_add_tail(&pla->list, &vha->plogi_ack_list);
814
815 return pla;
816 }
817
818 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
819 struct qlt_plogi_ack_t *pla)
820 {
821 struct imm_ntfy_from_isp *iocb = &pla->iocb;
822 port_id_t port_id;
823 uint16_t loop_id;
824 fc_port_t *fcport = pla->fcport;
825
826 BUG_ON(!pla->ref_count);
827 pla->ref_count--;
828
829 if (pla->ref_count)
830 return;
831
832 ql_dbg(ql_dbg_disc, vha, 0x5089,
833 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
834 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
835 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
836 iocb->u.isp24.port_id[0],
837 le16_to_cpu(iocb->u.isp24.nport_handle),
838 iocb->u.isp24.exchange_address, iocb->ox_id);
839
840 port_id.b.domain = iocb->u.isp24.port_id[2];
841 port_id.b.area = iocb->u.isp24.port_id[1];
842 port_id.b.al_pa = iocb->u.isp24.port_id[0];
843 port_id.b.rsvd_1 = 0;
844
845 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
846
847 fcport->loop_id = loop_id;
848 fcport->d_id = port_id;
849 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
850 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
851 else
852 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
853
854 list_for_each_entry(fcport, &vha->vp_fcports, list) {
855 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
856 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
857 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
858 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
859 }
860
861 list_del(&pla->list);
862 kmem_cache_free(qla_tgt_plogi_cachep, pla);
863 }
864
865 void
866 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
867 struct fc_port *sess, enum qlt_plogi_link_t link)
868 {
869 struct imm_ntfy_from_isp *iocb = &pla->iocb;
870 /* Inc ref_count first because link might already be pointing at pla */
871 pla->ref_count++;
872
873 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
874 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
875 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
876 sess, link, sess->port_name,
877 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
878 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
879 pla->ref_count, pla, link);
880
881 if (link == QLT_PLOGI_LINK_CONFLICT) {
882 switch (sess->disc_state) {
883 case DSC_DELETED:
884 case DSC_DELETE_PEND:
885 pla->ref_count--;
886 return;
887 default:
888 break;
889 }
890 }
891
892 if (sess->plogi_link[link])
893 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
894
895 if (link == QLT_PLOGI_LINK_SAME_WWN)
896 pla->fcport = sess;
897
898 sess->plogi_link[link] = pla;
899 }
900
901 typedef struct {
902 /* These fields must be initialized by the caller */
903 port_id_t id;
904 /*
905 * number of cmds dropped while we were waiting for
906 * initiator to ack LOGO initialize to 1 if LOGO is
907 * triggered by a command, otherwise, to 0
908 */
909 int cmd_count;
910
911 /* These fields are used by callee */
912 struct list_head list;
913 } qlt_port_logo_t;
914
915 static void
916 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
917 {
918 qlt_port_logo_t *tmp;
919 int res;
920
921 mutex_lock(&vha->vha_tgt.tgt_mutex);
922
923 list_for_each_entry(tmp, &vha->logo_list, list) {
924 if (tmp->id.b24 == logo->id.b24) {
925 tmp->cmd_count += logo->cmd_count;
926 mutex_unlock(&vha->vha_tgt.tgt_mutex);
927 return;
928 }
929 }
930
931 list_add_tail(&logo->list, &vha->logo_list);
932
933 mutex_unlock(&vha->vha_tgt.tgt_mutex);
934
935 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
936
937 mutex_lock(&vha->vha_tgt.tgt_mutex);
938 list_del(&logo->list);
939 mutex_unlock(&vha->vha_tgt.tgt_mutex);
940
941 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
942 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
943 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
944 logo->cmd_count, res);
945 }
946
947 void qlt_free_session_done(struct work_struct *work)
948 {
949 struct fc_port *sess = container_of(work, struct fc_port,
950 free_work);
951 struct qla_tgt *tgt = sess->tgt;
952 struct scsi_qla_host *vha = sess->vha;
953 struct qla_hw_data *ha = vha->hw;
954 unsigned long flags;
955 bool logout_started = false;
956 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
957 struct qlt_plogi_ack_t *own =
958 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
959
960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
961 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
962 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
963 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
964 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
965 sess->logout_on_delete, sess->keep_nport_handle,
966 sess->send_els_logo);
967
968 if (!IS_SW_RESV_ADDR(sess->d_id)) {
969 qla2x00_mark_device_lost(vha, sess, 0, 0);
970
971 if (sess->send_els_logo) {
972 qlt_port_logo_t logo;
973
974 logo.id = sess->d_id;
975 logo.cmd_count = 0;
976 if (!own)
977 qlt_send_first_logo(vha, &logo);
978 sess->send_els_logo = 0;
979 }
980
981 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
982 int rc;
983
984 if (!own ||
985 (own &&
986 (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
987 rc = qla2x00_post_async_logout_work(vha, sess,
988 NULL);
989 if (rc != QLA_SUCCESS)
990 ql_log(ql_log_warn, vha, 0xf085,
991 "Schedule logo failed sess %p rc %d\n",
992 sess, rc);
993 else
994 logout_started = true;
995 } else if (own && (own->iocb.u.isp24.status_subcode ==
996 ELS_PRLI) && ha->flags.rida_fmt2) {
997 rc = qla2x00_post_async_prlo_work(vha, sess,
998 NULL);
999 if (rc != QLA_SUCCESS)
1000 ql_log(ql_log_warn, vha, 0xf085,
1001 "Schedule PRLO failed sess %p rc %d\n",
1002 sess, rc);
1003 else
1004 logout_started = true;
1005 }
1006 } /* if sess->logout_on_delete */
1007
1008 if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
1009 !(sess->nvme_flag & NVME_FLAG_DELETING)) {
1010 sess->nvme_flag |= NVME_FLAG_DELETING;
1011 qla_nvme_unregister_remote_port(sess);
1012 }
1013 }
1014
1015 /*
1016 * Release the target session for FC Nexus from fabric module code.
1017 */
1018 if (sess->se_sess != NULL)
1019 ha->tgt.tgt_ops->free_session(sess);
1020
1021 if (logout_started) {
1022 bool traced = false;
1023 u16 cnt = 0;
1024
1025 while (!READ_ONCE(sess->logout_completed)) {
1026 if (!traced) {
1027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
1028 "%s: waiting for sess %p logout\n",
1029 __func__, sess);
1030 traced = true;
1031 }
1032 msleep(100);
1033 cnt++;
1034 if (cnt > 200)
1035 break;
1036 }
1037
1038 ql_dbg(ql_dbg_disc, vha, 0xf087,
1039 "%s: sess %p logout completed\n", __func__, sess);
1040 }
1041
1042 if (sess->logo_ack_needed) {
1043 sess->logo_ack_needed = 0;
1044 qla24xx_async_notify_ack(vha, sess,
1045 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1046 }
1047
1048 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1049 if (sess->se_sess) {
1050 sess->se_sess = NULL;
1051 if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1052 tgt->sess_count--;
1053 }
1054
1055 sess->disc_state = DSC_DELETED;
1056 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1057 sess->deleted = QLA_SESS_DELETED;
1058
1059 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1060 vha->fcport_count--;
1061 sess->login_succ = 0;
1062 }
1063
1064 qla2x00_clear_loop_id(sess);
1065
1066 if (sess->conflict) {
1067 sess->conflict->login_pause = 0;
1068 sess->conflict = NULL;
1069 if (!test_bit(UNLOADING, &vha->dpc_flags))
1070 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1071 }
1072
1073 {
1074 struct qlt_plogi_ack_t *con =
1075 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1076 struct imm_ntfy_from_isp *iocb;
1077
1078 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1079
1080 if (con) {
1081 iocb = &con->iocb;
1082 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1083 "se_sess %p / sess %p port %8phC is gone,"
1084 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1085 sess->se_sess, sess, sess->port_name,
1086 own ? "releasing own PLOGI" : "no own PLOGI pending",
1087 own ? own->ref_count : -1,
1088 iocb->u.isp24.port_name, con->ref_count);
1089 qlt_plogi_ack_unref(vha, con);
1090 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1091 } else {
1092 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1093 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1094 sess->se_sess, sess, sess->port_name,
1095 own ? "releasing own PLOGI" :
1096 "no own PLOGI pending",
1097 own ? own->ref_count : -1);
1098 }
1099
1100 if (own) {
1101 sess->fw_login_state = DSC_LS_PLOGI_PEND;
1102 qlt_plogi_ack_unref(vha, own);
1103 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1104 }
1105 }
1106
1107 sess->explicit_logout = 0;
1108 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1109 sess->free_pending = 0;
1110
1111 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
1112 "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1113 sess, sess->port_name, vha->fcport_count);
1114
1115 if (tgt && (tgt->sess_count == 0))
1116 wake_up_all(&tgt->waitQ);
1117
1118 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
1119 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1120 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1121 switch (vha->host->active_mode) {
1122 case MODE_INITIATOR:
1123 case MODE_DUAL:
1124 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1125 qla2xxx_wake_dpc(vha);
1126 break;
1127 case MODE_TARGET:
1128 default:
1129 /* no-op */
1130 break;
1131 }
1132 }
1133
1134 if (vha->fcport_count == 0)
1135 wake_up_all(&vha->fcport_waitQ);
1136 }
1137
1138 /* ha->tgt.sess_lock supposed to be held on entry */
1139 void qlt_unreg_sess(struct fc_port *sess)
1140 {
1141 struct scsi_qla_host *vha = sess->vha;
1142 unsigned long flags;
1143
1144 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1145 "%s sess %p for deletion %8phC\n",
1146 __func__, sess, sess->port_name);
1147
1148 spin_lock_irqsave(&sess->vha->work_lock, flags);
1149 if (sess->free_pending) {
1150 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1151 return;
1152 }
1153 sess->free_pending = 1;
1154 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1155
1156 if (sess->se_sess)
1157 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1158
1159 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1160 sess->disc_state = DSC_DELETE_PEND;
1161 sess->last_rscn_gen = sess->rscn_gen;
1162 sess->last_login_gen = sess->login_gen;
1163
1164 queue_work(sess->vha->hw->wq, &sess->free_work);
1165 }
1166 EXPORT_SYMBOL(qlt_unreg_sess);
1167
1168 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1169 {
1170 struct qla_hw_data *ha = vha->hw;
1171 struct fc_port *sess = NULL;
1172 uint16_t loop_id;
1173 int res = 0;
1174 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1175 unsigned long flags;
1176
1177 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1178 if (loop_id == 0xFFFF) {
1179 /* Global event */
1180 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1181 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1182 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1183 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1184 } else {
1185 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1186 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1187 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1188 }
1189
1190 ql_dbg(ql_dbg_tgt, vha, 0xe000,
1191 "Using sess for qla_tgt_reset: %p\n", sess);
1192 if (!sess) {
1193 res = -ESRCH;
1194 return res;
1195 }
1196
1197 ql_dbg(ql_dbg_tgt, vha, 0xe047,
1198 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1199 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1200 mcmd, loop_id);
1201
1202 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1203 }
1204
1205 static void qla24xx_chk_fcp_state(struct fc_port *sess)
1206 {
1207 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1208 sess->logout_on_delete = 0;
1209 sess->logo_ack_needed = 0;
1210 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1211 }
1212 }
1213
1214 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1215 {
1216 struct qla_tgt *tgt = sess->tgt;
1217 unsigned long flags;
1218 u16 sec;
1219
1220 switch (sess->disc_state) {
1221 case DSC_DELETE_PEND:
1222 return;
1223 case DSC_DELETED:
1224 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
1225 wake_up_all(&tgt->waitQ);
1226 if (sess->vha->fcport_count == 0)
1227 wake_up_all(&sess->vha->fcport_waitQ);
1228
1229 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1230 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1231 return;
1232 break;
1233 case DSC_UPD_FCPORT:
1234 /*
1235 * This port is not done reporting to upper layer.
1236 * let it finish
1237 */
1238 sess->next_disc_state = DSC_DELETE_PEND;
1239 sec = jiffies_to_msecs(jiffies -
1240 sess->jiffies_at_registration)/1000;
1241 if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
1242 sess->sec_since_registration = sec;
1243 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1244 "%s %8phC : Slow Rport registration(%d Sec)\n",
1245 __func__, sess->port_name, sec);
1246 }
1247 return;
1248 default:
1249 break;
1250 }
1251
1252 spin_lock_irqsave(&sess->vha->work_lock, flags);
1253 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1254 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1255 return;
1256 }
1257 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1258 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1259
1260 sess->disc_state = DSC_DELETE_PEND;
1261
1262 qla24xx_chk_fcp_state(sess);
1263
1264 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
1265 "Scheduling sess %p for deletion %8phC\n",
1266 sess, sess->port_name);
1267
1268 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1269 }
1270
1271 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1272 {
1273 struct fc_port *sess;
1274 scsi_qla_host_t *vha = tgt->vha;
1275
1276 list_for_each_entry(sess, &vha->vp_fcports, list) {
1277 if (sess->se_sess)
1278 qlt_schedule_sess_for_deletion(sess);
1279 }
1280
1281 /* At this point tgt could be already dead */
1282 }
1283
1284 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1285 uint16_t *loop_id)
1286 {
1287 struct qla_hw_data *ha = vha->hw;
1288 dma_addr_t gid_list_dma;
1289 struct gid_list_info *gid_list, *gid;
1290 int res, rc, i;
1291 uint16_t entries;
1292
1293 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1294 &gid_list_dma, GFP_KERNEL);
1295 if (!gid_list) {
1296 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1297 "qla_target(%d): DMA Alloc failed of %u\n",
1298 vha->vp_idx, qla2x00_gid_list_size(ha));
1299 return -ENOMEM;
1300 }
1301
1302 /* Get list of logged in devices */
1303 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1304 if (rc != QLA_SUCCESS) {
1305 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1306 "qla_target(%d): get_id_list() failed: %x\n",
1307 vha->vp_idx, rc);
1308 res = -EBUSY;
1309 goto out_free_id_list;
1310 }
1311
1312 gid = gid_list;
1313 res = -ENOENT;
1314 for (i = 0; i < entries; i++) {
1315 if (gid->al_pa == s_id.al_pa &&
1316 gid->area == s_id.area &&
1317 gid->domain == s_id.domain) {
1318 *loop_id = le16_to_cpu(gid->loop_id);
1319 res = 0;
1320 break;
1321 }
1322 gid = (void *)gid + ha->gid_list_info_size;
1323 }
1324
1325 out_free_id_list:
1326 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1327 gid_list, gid_list_dma);
1328 return res;
1329 }
1330
1331 /*
1332 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1333 * Caller must put it.
1334 */
1335 static struct fc_port *qlt_create_sess(
1336 struct scsi_qla_host *vha,
1337 fc_port_t *fcport,
1338 bool local)
1339 {
1340 struct qla_hw_data *ha = vha->hw;
1341 struct fc_port *sess = fcport;
1342 unsigned long flags;
1343
1344 if (vha->vha_tgt.qla_tgt->tgt_stop)
1345 return NULL;
1346
1347 if (fcport->se_sess) {
1348 if (!kref_get_unless_zero(&sess->sess_kref)) {
1349 ql_dbg(ql_dbg_disc, vha, 0x20f6,
1350 "%s: kref_get_unless_zero failed for %8phC\n",
1351 __func__, sess->port_name);
1352 return NULL;
1353 }
1354 return fcport;
1355 }
1356 sess->tgt = vha->vha_tgt.qla_tgt;
1357 sess->local = local;
1358
1359 /*
1360 * Under normal circumstances we want to logout from firmware when
1361 * session eventually ends and release corresponding nport handle.
1362 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1363 * code will adjust these flags as necessary.
1364 */
1365 sess->logout_on_delete = 1;
1366 sess->keep_nport_handle = 0;
1367 sess->logout_completed = 0;
1368
1369 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1370 &fcport->port_name[0], sess) < 0) {
1371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1372 "(%d) %8phC check_initiator_node_acl failed\n",
1373 vha->vp_idx, fcport->port_name);
1374 return NULL;
1375 } else {
1376 kref_init(&fcport->sess_kref);
1377 /*
1378 * Take an extra reference to ->sess_kref here to handle
1379 * fc_port access across ->tgt.sess_lock reaquire.
1380 */
1381 if (!kref_get_unless_zero(&sess->sess_kref)) {
1382 ql_dbg(ql_dbg_disc, vha, 0x20f7,
1383 "%s: kref_get_unless_zero failed for %8phC\n",
1384 __func__, sess->port_name);
1385 return NULL;
1386 }
1387
1388 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1389 if (!IS_SW_RESV_ADDR(sess->d_id))
1390 vha->vha_tgt.qla_tgt->sess_count++;
1391
1392 qlt_do_generation_tick(vha, &sess->generation);
1393 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1394 }
1395
1396 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1397 "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
1398 sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1399 vha->vha_tgt.qla_tgt->sess_count);
1400
1401 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1402 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1403 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1404 vha->vp_idx, local ? "local " : "", fcport->port_name,
1405 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1406 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
1407
1408 return sess;
1409 }
1410
1411 /*
1412 * max_gen - specifies maximum session generation
1413 * at which this deletion requestion is still valid
1414 */
1415 void
1416 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1417 {
1418 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1419 struct fc_port *sess = fcport;
1420 unsigned long flags;
1421
1422 if (!vha->hw->tgt.tgt_ops)
1423 return;
1424
1425 if (!tgt)
1426 return;
1427
1428 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1429 if (tgt->tgt_stop) {
1430 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1431 return;
1432 }
1433 if (!sess->se_sess) {
1434 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1435 return;
1436 }
1437
1438 if (max_gen - sess->generation < 0) {
1439 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1440 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1441 "Ignoring stale deletion request for se_sess %p / sess %p"
1442 " for port %8phC, req_gen %d, sess_gen %d\n",
1443 sess->se_sess, sess, sess->port_name, max_gen,
1444 sess->generation);
1445 return;
1446 }
1447
1448 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1449
1450 sess->local = 1;
1451 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1452 qlt_schedule_sess_for_deletion(sess);
1453 }
1454
1455 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1456 {
1457 struct qla_hw_data *ha = tgt->ha;
1458 unsigned long flags;
1459 int res;
1460 /*
1461 * We need to protect against race, when tgt is freed before or
1462 * inside wake_up()
1463 */
1464 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1465 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1466 "tgt %p, sess_count=%d\n",
1467 tgt, tgt->sess_count);
1468 res = (tgt->sess_count == 0);
1469 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1470
1471 return res;
1472 }
1473
1474 /* Called by tcm_qla2xxx configfs code */
1475 int qlt_stop_phase1(struct qla_tgt *tgt)
1476 {
1477 struct scsi_qla_host *vha = tgt->vha;
1478 struct qla_hw_data *ha = tgt->ha;
1479 unsigned long flags;
1480
1481 mutex_lock(&ha->optrom_mutex);
1482 mutex_lock(&qla_tgt_mutex);
1483
1484 if (tgt->tgt_stop || tgt->tgt_stopped) {
1485 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1486 "Already in tgt->tgt_stop or tgt_stopped state\n");
1487 mutex_unlock(&qla_tgt_mutex);
1488 mutex_unlock(&ha->optrom_mutex);
1489 return -EPERM;
1490 }
1491
1492 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1493 vha->host_no, vha);
1494 /*
1495 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1496 * Lock is needed, because we still can get an incoming packet.
1497 */
1498 mutex_lock(&vha->vha_tgt.tgt_mutex);
1499 tgt->tgt_stop = 1;
1500 qlt_clear_tgt_db(tgt);
1501 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1502 mutex_unlock(&qla_tgt_mutex);
1503
1504 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1505 "Waiting for sess works (tgt %p)", tgt);
1506 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1507 while (!list_empty(&tgt->sess_works_list)) {
1508 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1509 flush_scheduled_work();
1510 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1511 }
1512 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1513
1514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1515 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1516
1517 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1518
1519 /* Big hammer */
1520 if (!ha->flags.host_shutting_down &&
1521 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1522 qlt_disable_vha(vha);
1523
1524 /* Wait for sessions to clear out (just in case) */
1525 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1526 mutex_unlock(&ha->optrom_mutex);
1527
1528 return 0;
1529 }
1530 EXPORT_SYMBOL(qlt_stop_phase1);
1531
1532 /* Called by tcm_qla2xxx configfs code */
1533 void qlt_stop_phase2(struct qla_tgt *tgt)
1534 {
1535 scsi_qla_host_t *vha = tgt->vha;
1536
1537 if (tgt->tgt_stopped) {
1538 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1539 "Already in tgt->tgt_stopped state\n");
1540 dump_stack();
1541 return;
1542 }
1543 if (!tgt->tgt_stop) {
1544 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1545 "%s: phase1 stop is not completed\n", __func__);
1546 dump_stack();
1547 return;
1548 }
1549
1550 mutex_lock(&vha->vha_tgt.tgt_mutex);
1551 tgt->tgt_stop = 0;
1552 tgt->tgt_stopped = 1;
1553 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1554
1555 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1556 tgt);
1557
1558 switch (vha->qlini_mode) {
1559 case QLA2XXX_INI_MODE_EXCLUSIVE:
1560 vha->flags.online = 1;
1561 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1562 break;
1563 default:
1564 break;
1565 }
1566 }
1567 EXPORT_SYMBOL(qlt_stop_phase2);
1568
1569 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1570 static void qlt_release(struct qla_tgt *tgt)
1571 {
1572 scsi_qla_host_t *vha = tgt->vha;
1573 void *node;
1574 u64 key = 0;
1575 u16 i;
1576 struct qla_qpair_hint *h;
1577 struct qla_hw_data *ha = vha->hw;
1578
1579 if (!tgt->tgt_stop && !tgt->tgt_stopped)
1580 qlt_stop_phase1(tgt);
1581
1582 if (!tgt->tgt_stopped)
1583 qlt_stop_phase2(tgt);
1584
1585 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1586 unsigned long flags;
1587
1588 h = &tgt->qphints[i];
1589 if (h->qpair) {
1590 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1591 list_del(&h->hint_elem);
1592 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1593 h->qpair = NULL;
1594 }
1595 }
1596 kfree(tgt->qphints);
1597 mutex_lock(&qla_tgt_mutex);
1598 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1599 mutex_unlock(&qla_tgt_mutex);
1600
1601 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1602 btree_remove64(&tgt->lun_qpair_map, key);
1603
1604 btree_destroy64(&tgt->lun_qpair_map);
1605
1606 if (vha->vp_idx)
1607 if (ha->tgt.tgt_ops &&
1608 ha->tgt.tgt_ops->remove_target &&
1609 vha->vha_tgt.target_lport_ptr)
1610 ha->tgt.tgt_ops->remove_target(vha);
1611
1612 vha->vha_tgt.qla_tgt = NULL;
1613
1614 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1615 "Release of tgt %p finished\n", tgt);
1616
1617 kfree(tgt);
1618 }
1619
1620 /* ha->hardware_lock supposed to be held on entry */
1621 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1622 const void *param, unsigned int param_size)
1623 {
1624 struct qla_tgt_sess_work_param *prm;
1625 unsigned long flags;
1626
1627 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1628 if (!prm) {
1629 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1630 "qla_target(%d): Unable to create session "
1631 "work, command will be refused", 0);
1632 return -ENOMEM;
1633 }
1634
1635 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1636 "Scheduling work (type %d, prm %p)"
1637 " to find session for param %p (size %d, tgt %p)\n",
1638 type, prm, param, param_size, tgt);
1639
1640 prm->type = type;
1641 memcpy(&prm->tm_iocb, param, param_size);
1642
1643 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1644 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1645 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1646
1647 schedule_work(&tgt->sess_work);
1648
1649 return 0;
1650 }
1651
1652 /*
1653 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1654 */
1655 static void qlt_send_notify_ack(struct qla_qpair *qpair,
1656 struct imm_ntfy_from_isp *ntfy,
1657 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1658 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1659 {
1660 struct scsi_qla_host *vha = qpair->vha;
1661 struct qla_hw_data *ha = vha->hw;
1662 request_t *pkt;
1663 struct nack_to_isp *nack;
1664
1665 if (!ha->flags.fw_started)
1666 return;
1667
1668 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1669
1670 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1671 if (!pkt) {
1672 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1673 "qla_target(%d): %s failed: unable to allocate "
1674 "request packet\n", vha->vp_idx, __func__);
1675 return;
1676 }
1677
1678 if (vha->vha_tgt.qla_tgt != NULL)
1679 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1680
1681 pkt->entry_type = NOTIFY_ACK_TYPE;
1682 pkt->entry_count = 1;
1683
1684 nack = (struct nack_to_isp *)pkt;
1685 nack->ox_id = ntfy->ox_id;
1686
1687 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1688 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1689 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1690 nack->u.isp24.flags = ntfy->u.isp24.flags &
1691 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1692 }
1693 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1694 nack->u.isp24.status = ntfy->u.isp24.status;
1695 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1696 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1697 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1698 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1699 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1700 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1701 nack->u.isp24.srr_reject_code = srr_reject_code;
1702 nack->u.isp24.srr_reject_code_expl = srr_explan;
1703 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1704
1705 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1706 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1707 vha->vp_idx, nack->u.isp24.status);
1708
1709 /* Memory Barrier */
1710 wmb();
1711 qla2x00_start_iocbs(vha, qpair->req);
1712 }
1713
1714 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1715 {
1716 struct scsi_qla_host *vha = mcmd->vha;
1717 struct qla_hw_data *ha = vha->hw;
1718 struct abts_resp_to_24xx *resp;
1719 uint32_t f_ctl, h;
1720 uint8_t *p;
1721 int rc;
1722 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
1723 struct qla_qpair *qpair = mcmd->qpair;
1724
1725 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1726 "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1727 ha, mcmd->fc_tm_rsp);
1728
1729 rc = qlt_check_reserve_free_req(qpair, 1);
1730 if (rc) {
1731 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1732 "qla_target(%d): %s failed: unable to allocate request packet\n",
1733 vha->vp_idx, __func__);
1734 return -EAGAIN;
1735 }
1736
1737 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
1738 memset(resp, 0, sizeof(*resp));
1739
1740 h = qlt_make_handle(qpair);
1741 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1742 /*
1743 * CTIO type 7 from the firmware doesn't provide a way to
1744 * know the initiator's LOOP ID, hence we can't find
1745 * the session and, so, the command.
1746 */
1747 return -EAGAIN;
1748 } else {
1749 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
1750 }
1751
1752 resp->handle = MAKE_HANDLE(qpair->req->id, h);
1753 resp->entry_type = ABTS_RESP_24XX;
1754 resp->entry_count = 1;
1755 resp->nport_handle = abts->nport_handle;
1756 resp->vp_index = vha->vp_idx;
1757 resp->sof_type = abts->sof_type;
1758 resp->exchange_address = abts->exchange_address;
1759 resp->fcp_hdr_le = abts->fcp_hdr_le;
1760 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1761 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1762 F_CTL_SEQ_INITIATIVE);
1763 p = (uint8_t *)&f_ctl;
1764 resp->fcp_hdr_le.f_ctl[0] = *p++;
1765 resp->fcp_hdr_le.f_ctl[1] = *p++;
1766 resp->fcp_hdr_le.f_ctl[2] = *p;
1767
1768 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1769 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1770
1771 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1772 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
1773 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1774 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1775 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1776 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1777 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1778 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1779 } else {
1780 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1781 resp->payload.ba_rjt.reason_code =
1782 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1783 /* Other bytes are zero */
1784 }
1785
1786 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1787
1788 /* Memory Barrier */
1789 wmb();
1790 if (qpair->reqq_start_iocbs)
1791 qpair->reqq_start_iocbs(qpair);
1792 else
1793 qla2x00_start_iocbs(vha, qpair->req);
1794
1795 return rc;
1796 }
1797
1798 /*
1799 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1800 */
1801 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1802 struct abts_recv_from_24xx *abts, uint32_t status,
1803 bool ids_reversed)
1804 {
1805 struct scsi_qla_host *vha = qpair->vha;
1806 struct qla_hw_data *ha = vha->hw;
1807 struct abts_resp_to_24xx *resp;
1808 uint32_t f_ctl;
1809 uint8_t *p;
1810
1811 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1812 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1813 ha, abts, status);
1814
1815 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1816 NULL);
1817 if (!resp) {
1818 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1819 "qla_target(%d): %s failed: unable to allocate "
1820 "request packet", vha->vp_idx, __func__);
1821 return;
1822 }
1823
1824 resp->entry_type = ABTS_RESP_24XX;
1825 resp->handle = QLA_TGT_SKIP_HANDLE;
1826 resp->entry_count = 1;
1827 resp->nport_handle = abts->nport_handle;
1828 resp->vp_index = vha->vp_idx;
1829 resp->sof_type = abts->sof_type;
1830 resp->exchange_address = abts->exchange_address;
1831 resp->fcp_hdr_le = abts->fcp_hdr_le;
1832 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1833 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1834 F_CTL_SEQ_INITIATIVE);
1835 p = (uint8_t *)&f_ctl;
1836 resp->fcp_hdr_le.f_ctl[0] = *p++;
1837 resp->fcp_hdr_le.f_ctl[1] = *p++;
1838 resp->fcp_hdr_le.f_ctl[2] = *p;
1839 if (ids_reversed) {
1840 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
1841 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
1842 } else {
1843 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1844 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1845 }
1846 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1847 if (status == FCP_TMF_CMPL) {
1848 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1849 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1850 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1851 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1852 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1853 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1854 } else {
1855 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1856 resp->payload.ba_rjt.reason_code =
1857 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1858 /* Other bytes are zero */
1859 }
1860
1861 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1862
1863 /* Memory Barrier */
1864 wmb();
1865 if (qpair->reqq_start_iocbs)
1866 qpair->reqq_start_iocbs(qpair);
1867 else
1868 qla2x00_start_iocbs(vha, qpair->req);
1869 }
1870
1871 /*
1872 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1873 */
1874 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1875 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1876 {
1877 struct ctio7_to_24xx *ctio;
1878 u16 tmp;
1879 struct abts_recv_from_24xx *entry;
1880
1881 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1882 if (ctio == NULL) {
1883 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1884 "qla_target(%d): %s failed: unable to allocate "
1885 "request packet\n", vha->vp_idx, __func__);
1886 return;
1887 }
1888
1889 if (mcmd)
1890 /* abts from remote port */
1891 entry = &mcmd->orig_iocb.abts;
1892 else
1893 /* abts from this driver. */
1894 entry = (struct abts_recv_from_24xx *)pkt;
1895
1896 /*
1897 * We've got on entrance firmware's response on by us generated
1898 * ABTS response. So, in it ID fields are reversed.
1899 */
1900
1901 ctio->entry_type = CTIO_TYPE7;
1902 ctio->entry_count = 1;
1903 ctio->nport_handle = entry->nport_handle;
1904 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1905 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1906 ctio->vp_index = vha->vp_idx;
1907 ctio->exchange_addr = entry->exchange_addr_to_abort;
1908 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1909
1910 if (mcmd) {
1911 ctio->initiator_id = entry->fcp_hdr_le.s_id;
1912
1913 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1914 tmp |= (mcmd->abort_io_attr << 9);
1915 else if (qpair->retry_term_cnt & 1)
1916 tmp |= (0x4 << 9);
1917 } else {
1918 ctio->initiator_id = entry->fcp_hdr_le.d_id;
1919
1920 if (qpair->retry_term_cnt & 1)
1921 tmp |= (0x4 << 9);
1922 }
1923 ctio->u.status1.flags = cpu_to_le16(tmp);
1924 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1925
1926 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1927 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1928 le16_to_cpu(ctio->u.status1.flags),
1929 le16_to_cpu(ctio->u.status1.ox_id),
1930 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1931
1932 /* Memory Barrier */
1933 wmb();
1934 if (qpair->reqq_start_iocbs)
1935 qpair->reqq_start_iocbs(qpair);
1936 else
1937 qla2x00_start_iocbs(vha, qpair->req);
1938
1939 if (mcmd)
1940 qlt_build_abts_resp_iocb(mcmd);
1941 else
1942 qlt_24xx_send_abts_resp(qpair,
1943 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
1944
1945 }
1946
1947 /* drop cmds for the given lun
1948 * XXX only looks for cmds on the port through which lun reset was recieved
1949 * XXX does not go through the list of other port (which may have cmds
1950 * for the same lun)
1951 */
1952 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
1953 {
1954 struct qla_tgt_sess_op *op;
1955 struct qla_tgt_cmd *cmd;
1956 uint32_t key;
1957 unsigned long flags;
1958
1959 key = sid_to_key(s_id);
1960 spin_lock_irqsave(&vha->cmd_list_lock, flags);
1961 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1962 uint32_t op_key;
1963 u64 op_lun;
1964
1965 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1966 op_lun = scsilun_to_int(
1967 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1968 if (op_key == key && op_lun == lun)
1969 op->aborted = true;
1970 }
1971
1972 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1973 uint32_t op_key;
1974 u64 op_lun;
1975
1976 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1977 op_lun = scsilun_to_int(
1978 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1979 if (op_key == key && op_lun == lun)
1980 op->aborted = true;
1981 }
1982
1983 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1984 uint32_t cmd_key;
1985 u64 cmd_lun;
1986
1987 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1988 cmd_lun = scsilun_to_int(
1989 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1990 if (cmd_key == key && cmd_lun == lun)
1991 cmd->aborted = 1;
1992 }
1993 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1994 }
1995
1996 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
1997 uint64_t unpacked_lun)
1998 {
1999 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2000 struct qla_qpair_hint *h = NULL;
2001
2002 if (vha->flags.qpairs_available) {
2003 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
2004 if (!h)
2005 h = &tgt->qphints[0];
2006 } else {
2007 h = &tgt->qphints[0];
2008 }
2009
2010 return h;
2011 }
2012
2013 static void qlt_do_tmr_work(struct work_struct *work)
2014 {
2015 struct qla_tgt_mgmt_cmd *mcmd =
2016 container_of(work, struct qla_tgt_mgmt_cmd, work);
2017 struct qla_hw_data *ha = mcmd->vha->hw;
2018 int rc = EIO;
2019 uint32_t tag;
2020 unsigned long flags;
2021
2022 switch (mcmd->tmr_func) {
2023 case QLA_TGT_ABTS:
2024 tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
2025 break;
2026 default:
2027 tag = 0;
2028 break;
2029 }
2030
2031 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
2032 mcmd->tmr_func, tag);
2033
2034 if (rc != 0) {
2035 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
2036 switch (mcmd->tmr_func) {
2037 case QLA_TGT_ABTS:
2038 mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
2039 qlt_build_abts_resp_iocb(mcmd);
2040 break;
2041 case QLA_TGT_LUN_RESET:
2042 case QLA_TGT_CLEAR_TS:
2043 case QLA_TGT_ABORT_TS:
2044 case QLA_TGT_CLEAR_ACA:
2045 case QLA_TGT_TARGET_RESET:
2046 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
2047 qla_sam_status);
2048 break;
2049
2050 case QLA_TGT_ABORT_ALL:
2051 case QLA_TGT_NEXUS_LOSS_SESS:
2052 case QLA_TGT_NEXUS_LOSS:
2053 qlt_send_notify_ack(mcmd->qpair,
2054 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2055 break;
2056 }
2057 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
2058
2059 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2060 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2061 mcmd->vha->vp_idx, rc);
2062 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2063 }
2064 }
2065
2066 /* ha->hardware_lock supposed to be held on entry */
2067 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2068 struct abts_recv_from_24xx *abts, struct fc_port *sess)
2069 {
2070 struct qla_hw_data *ha = vha->hw;
2071 struct qla_tgt_mgmt_cmd *mcmd;
2072 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2073
2074 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2075 "qla_target(%d): task abort (tag=%d)\n",
2076 vha->vp_idx, abts->exchange_addr_to_abort);
2077
2078 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2079 if (mcmd == NULL) {
2080 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2081 "qla_target(%d): %s: Allocation of ABORT cmd failed",
2082 vha->vp_idx, __func__);
2083 return -ENOMEM;
2084 }
2085 memset(mcmd, 0, sizeof(*mcmd));
2086 mcmd->cmd_type = TYPE_TGT_TMCMD;
2087 mcmd->sess = sess;
2088 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2089 mcmd->reset_count = ha->base_qpair->chip_reset;
2090 mcmd->tmr_func = QLA_TGT_ABTS;
2091 mcmd->qpair = h->qpair;
2092 mcmd->vha = vha;
2093
2094 /*
2095 * LUN is looked up by target-core internally based on the passed
2096 * abts->exchange_addr_to_abort tag.
2097 */
2098 mcmd->se_cmd.cpuid = h->cpuid;
2099
2100 if (ha->tgt.tgt_ops->find_cmd_by_tag) {
2101 struct qla_tgt_cmd *abort_cmd;
2102
2103 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
2104 abts->exchange_addr_to_abort);
2105 if (abort_cmd && abort_cmd->qpair) {
2106 mcmd->qpair = abort_cmd->qpair;
2107 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2108 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
2109 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2110 }
2111 }
2112
2113 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
2114 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
2115
2116 return 0;
2117 }
2118
2119 /*
2120 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2121 */
2122 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2123 struct abts_recv_from_24xx *abts)
2124 {
2125 struct qla_hw_data *ha = vha->hw;
2126 struct fc_port *sess;
2127 uint32_t tag = abts->exchange_addr_to_abort;
2128 be_id_t s_id;
2129 int rc;
2130 unsigned long flags;
2131
2132 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
2133 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2134 "qla_target(%d): ABTS: Abort Sequence not "
2135 "supported\n", vha->vp_idx);
2136 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2137 false);
2138 return;
2139 }
2140
2141 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
2142 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2143 "qla_target(%d): ABTS: Unknown Exchange "
2144 "Address received\n", vha->vp_idx);
2145 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2146 false);
2147 return;
2148 }
2149
2150 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2151 "qla_target(%d): task abort (s_id=%x:%x:%x, "
2152 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2153 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
2154 le32_to_cpu(abts->fcp_hdr_le.parameter));
2155
2156 s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
2157
2158 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2159 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2160 if (!sess) {
2161 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2162 "qla_target(%d): task abort for non-existent session\n",
2163 vha->vp_idx);
2164 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2165
2166 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2167 false);
2168 return;
2169 }
2170 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2171
2172
2173 if (sess->deleted) {
2174 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2175 false);
2176 return;
2177 }
2178
2179 rc = __qlt_24xx_handle_abts(vha, abts, sess);
2180 if (rc != 0) {
2181 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2182 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2183 vha->vp_idx, rc);
2184 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2185 false);
2186 return;
2187 }
2188 }
2189
2190 /*
2191 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2192 */
2193 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2194 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2195 {
2196 struct scsi_qla_host *ha = mcmd->vha;
2197 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2198 struct ctio7_to_24xx *ctio;
2199 uint16_t temp;
2200
2201 ql_dbg(ql_dbg_tgt, ha, 0xe008,
2202 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2203 ha, atio, resp_code);
2204
2205
2206 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2207 if (ctio == NULL) {
2208 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2209 "qla_target(%d): %s failed: unable to allocate "
2210 "request packet\n", ha->vp_idx, __func__);
2211 return;
2212 }
2213
2214 ctio->entry_type = CTIO_TYPE7;
2215 ctio->entry_count = 1;
2216 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2217 ctio->nport_handle = mcmd->sess->loop_id;
2218 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2219 ctio->vp_index = ha->vp_idx;
2220 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2221 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2222 temp = (atio->u.isp24.attr << 9)|
2223 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2224 ctio->u.status1.flags = cpu_to_le16(temp);
2225 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2226 ctio->u.status1.ox_id = cpu_to_le16(temp);
2227 ctio->u.status1.scsi_status =
2228 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2229 ctio->u.status1.response_len = cpu_to_le16(8);
2230 ctio->u.status1.sense_data[0] = resp_code;
2231
2232 /* Memory Barrier */
2233 wmb();
2234 if (qpair->reqq_start_iocbs)
2235 qpair->reqq_start_iocbs(qpair);
2236 else
2237 qla2x00_start_iocbs(ha, qpair->req);
2238 }
2239
2240 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2241 {
2242 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2243 }
2244 EXPORT_SYMBOL(qlt_free_mcmd);
2245
2246 /*
2247 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2248 * reacquire
2249 */
2250 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2251 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2252 {
2253 struct atio_from_isp *atio = &cmd->atio;
2254 struct ctio7_to_24xx *ctio;
2255 uint16_t temp;
2256 struct scsi_qla_host *vha = cmd->vha;
2257
2258 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2259 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2260 "sense_key=%02x, asc=%02x, ascq=%02x",
2261 vha, atio, scsi_status, sense_key, asc, ascq);
2262
2263 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2264 if (!ctio) {
2265 ql_dbg(ql_dbg_async, vha, 0x3067,
2266 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2267 vha->host_no, __func__);
2268 goto out;
2269 }
2270
2271 ctio->entry_type = CTIO_TYPE7;
2272 ctio->entry_count = 1;
2273 ctio->handle = QLA_TGT_SKIP_HANDLE;
2274 ctio->nport_handle = cmd->sess->loop_id;
2275 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2276 ctio->vp_index = vha->vp_idx;
2277 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2278 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2279 temp = (atio->u.isp24.attr << 9) |
2280 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2281 ctio->u.status1.flags = cpu_to_le16(temp);
2282 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2283 ctio->u.status1.ox_id = cpu_to_le16(temp);
2284 ctio->u.status1.scsi_status =
2285 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2286 ctio->u.status1.response_len = cpu_to_le16(18);
2287 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2288
2289 if (ctio->u.status1.residual != 0)
2290 ctio->u.status1.scsi_status |=
2291 cpu_to_le16(SS_RESIDUAL_UNDER);
2292
2293 /* Fixed format sense data. */
2294 ctio->u.status1.sense_data[0] = 0x70;
2295 ctio->u.status1.sense_data[2] = sense_key;
2296 /* Additional sense length */
2297 ctio->u.status1.sense_data[7] = 0xa;
2298 /* ASC and ASCQ */
2299 ctio->u.status1.sense_data[12] = asc;
2300 ctio->u.status1.sense_data[13] = ascq;
2301
2302 /* Memory Barrier */
2303 wmb();
2304
2305 if (qpair->reqq_start_iocbs)
2306 qpair->reqq_start_iocbs(qpair);
2307 else
2308 qla2x00_start_iocbs(vha, qpair->req);
2309
2310 out:
2311 return;
2312 }
2313
2314 /* callback from target fabric module code */
2315 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2316 {
2317 struct scsi_qla_host *vha = mcmd->sess->vha;
2318 struct qla_hw_data *ha = vha->hw;
2319 unsigned long flags;
2320 struct qla_qpair *qpair = mcmd->qpair;
2321 bool free_mcmd = true;
2322
2323 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2324 "TM response mcmd (%p) status %#x state %#x",
2325 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2326
2327 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2328
2329 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2330 /*
2331 * Either the port is not online or this request was from
2332 * previous life, just abort the processing.
2333 */
2334 ql_dbg(ql_dbg_async, vha, 0xe100,
2335 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2336 vha->flags.online, qla2x00_reset_active(vha),
2337 mcmd->reset_count, qpair->chip_reset);
2338 ha->tgt.tgt_ops->free_mcmd(mcmd);
2339 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2340 return;
2341 }
2342
2343 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2344 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
2345 case ELS_LOGO:
2346 case ELS_PRLO:
2347 case ELS_TPRLO:
2348 ql_dbg(ql_dbg_disc, vha, 0x2106,
2349 "TM response logo %8phC status %#x state %#x",
2350 mcmd->sess->port_name, mcmd->fc_tm_rsp,
2351 mcmd->flags);
2352 qlt_schedule_sess_for_deletion(mcmd->sess);
2353 break;
2354 default:
2355 qlt_send_notify_ack(vha->hw->base_qpair,
2356 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2357 break;
2358 }
2359 } else {
2360 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
2361 qlt_build_abts_resp_iocb(mcmd);
2362 free_mcmd = false;
2363 } else
2364 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2365 mcmd->fc_tm_rsp);
2366 }
2367 /*
2368 * Make the callback for ->free_mcmd() to queue_work() and invoke
2369 * target_put_sess_cmd() to drop cmd_kref to 1. The final
2370 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2371 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2372 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2373 * qlt_xmit_tm_rsp() returns here..
2374 */
2375 if (free_mcmd)
2376 ha->tgt.tgt_ops->free_mcmd(mcmd);
2377
2378 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2379 }
2380 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2381
2382 /* No locks */
2383 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2384 {
2385 struct qla_tgt_cmd *cmd = prm->cmd;
2386
2387 BUG_ON(cmd->sg_cnt == 0);
2388
2389 prm->sg = (struct scatterlist *)cmd->sg;
2390 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2391 cmd->sg_cnt, cmd->dma_data_direction);
2392 if (unlikely(prm->seg_cnt == 0))
2393 goto out_err;
2394
2395 prm->cmd->sg_mapped = 1;
2396
2397 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2398 /*
2399 * If greater than four sg entries then we need to allocate
2400 * the continuation entries
2401 */
2402 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2403 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2404 QLA_TGT_DATASEGS_PER_CMD_24XX,
2405 QLA_TGT_DATASEGS_PER_CONT_24XX);
2406 } else {
2407 /* DIF */
2408 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2409 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2410 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2411 prm->tot_dsds = prm->seg_cnt;
2412 } else
2413 prm->tot_dsds = prm->seg_cnt;
2414
2415 if (cmd->prot_sg_cnt) {
2416 prm->prot_sg = cmd->prot_sg;
2417 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2418 cmd->prot_sg, cmd->prot_sg_cnt,
2419 cmd->dma_data_direction);
2420 if (unlikely(prm->prot_seg_cnt == 0))
2421 goto out_err;
2422
2423 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2424 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2425 /* Dif Bundling not support here */
2426 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2427 cmd->blk_sz);
2428 prm->tot_dsds += prm->prot_seg_cnt;
2429 } else
2430 prm->tot_dsds += prm->prot_seg_cnt;
2431 }
2432 }
2433
2434 return 0;
2435
2436 out_err:
2437 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2438 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2439 0, prm->cmd->sg_cnt);
2440 return -1;
2441 }
2442
2443 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2444 {
2445 struct qla_hw_data *ha;
2446 struct qla_qpair *qpair;
2447
2448 if (!cmd->sg_mapped)
2449 return;
2450
2451 qpair = cmd->qpair;
2452
2453 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2454 cmd->dma_data_direction);
2455 cmd->sg_mapped = 0;
2456
2457 if (cmd->prot_sg_cnt)
2458 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2459 cmd->dma_data_direction);
2460
2461 if (!cmd->ctx)
2462 return;
2463 ha = vha->hw;
2464 if (cmd->ctx_dsd_alloced)
2465 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2466
2467 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2468 }
2469
2470 static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2471 uint32_t req_cnt)
2472 {
2473 uint32_t cnt;
2474 struct req_que *req = qpair->req;
2475
2476 if (req->cnt < (req_cnt + 2)) {
2477 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2478 RD_REG_DWORD_RELAXED(req->req_q_out));
2479
2480 if (req->ring_index < cnt)
2481 req->cnt = cnt - req->ring_index;
2482 else
2483 req->cnt = req->length - (req->ring_index - cnt);
2484
2485 if (unlikely(req->cnt < (req_cnt + 2)))
2486 return -EAGAIN;
2487 }
2488
2489 req->cnt -= req_cnt;
2490
2491 return 0;
2492 }
2493
2494 /*
2495 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2496 */
2497 static inline void *qlt_get_req_pkt(struct req_que *req)
2498 {
2499 /* Adjust ring index. */
2500 req->ring_index++;
2501 if (req->ring_index == req->length) {
2502 req->ring_index = 0;
2503 req->ring_ptr = req->ring;
2504 } else {
2505 req->ring_ptr++;
2506 }
2507 return (cont_entry_t *)req->ring_ptr;
2508 }
2509
2510 /* ha->hardware_lock supposed to be held on entry */
2511 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2512 {
2513 uint32_t h;
2514 int index;
2515 uint8_t found = 0;
2516 struct req_que *req = qpair->req;
2517
2518 h = req->current_outstanding_cmd;
2519
2520 for (index = 1; index < req->num_outstanding_cmds; index++) {
2521 h++;
2522 if (h == req->num_outstanding_cmds)
2523 h = 1;
2524
2525 if (h == QLA_TGT_SKIP_HANDLE)
2526 continue;
2527
2528 if (!req->outstanding_cmds[h]) {
2529 found = 1;
2530 break;
2531 }
2532 }
2533
2534 if (found) {
2535 req->current_outstanding_cmd = h;
2536 } else {
2537 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2538 "qla_target(%d): Ran out of empty cmd slots\n",
2539 qpair->vha->vp_idx);
2540 h = QLA_TGT_NULL_HANDLE;
2541 }
2542
2543 return h;
2544 }
2545
2546 /* ha->hardware_lock supposed to be held on entry */
2547 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2548 struct qla_tgt_prm *prm)
2549 {
2550 uint32_t h;
2551 struct ctio7_to_24xx *pkt;
2552 struct atio_from_isp *atio = &prm->cmd->atio;
2553 uint16_t temp;
2554
2555 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2556 prm->pkt = pkt;
2557 memset(pkt, 0, sizeof(*pkt));
2558
2559 pkt->entry_type = CTIO_TYPE7;
2560 pkt->entry_count = (uint8_t)prm->req_cnt;
2561 pkt->vp_index = prm->cmd->vp_idx;
2562
2563 h = qlt_make_handle(qpair);
2564 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2565 /*
2566 * CTIO type 7 from the firmware doesn't provide a way to
2567 * know the initiator's LOOP ID, hence we can't find
2568 * the session and, so, the command.
2569 */
2570 return -EAGAIN;
2571 } else
2572 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2573
2574 pkt->handle = MAKE_HANDLE(qpair->req->id, h);
2575 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2576 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2577 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2578 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2579 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2580 temp = atio->u.isp24.attr << 9;
2581 pkt->u.status0.flags |= cpu_to_le16(temp);
2582 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2583 pkt->u.status0.ox_id = cpu_to_le16(temp);
2584 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2585
2586 return 0;
2587 }
2588
2589 /*
2590 * ha->hardware_lock supposed to be held on entry. We have already made sure
2591 * that there is sufficient amount of request entries to not drop it.
2592 */
2593 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2594 {
2595 int cnt;
2596 struct dsd64 *cur_dsd;
2597
2598 /* Build continuation packets */
2599 while (prm->seg_cnt > 0) {
2600 cont_a64_entry_t *cont_pkt64 =
2601 (cont_a64_entry_t *)qlt_get_req_pkt(
2602 prm->cmd->qpair->req);
2603
2604 /*
2605 * Make sure that from cont_pkt64 none of
2606 * 64-bit specific fields used for 32-bit
2607 * addressing. Cast to (cont_entry_t *) for
2608 * that.
2609 */
2610
2611 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2612
2613 cont_pkt64->entry_count = 1;
2614 cont_pkt64->sys_define = 0;
2615
2616 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2617 cur_dsd = cont_pkt64->dsd;
2618
2619 /* Load continuation entry data segments */
2620 for (cnt = 0;
2621 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2622 cnt++, prm->seg_cnt--) {
2623 append_dsd64(&cur_dsd, prm->sg);
2624 prm->sg = sg_next(prm->sg);
2625 }
2626 }
2627 }
2628
2629 /*
2630 * ha->hardware_lock supposed to be held on entry. We have already made sure
2631 * that there is sufficient amount of request entries to not drop it.
2632 */
2633 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2634 {
2635 int cnt;
2636 struct dsd64 *cur_dsd;
2637 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2638
2639 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2640
2641 /* Setup packet address segment pointer */
2642 cur_dsd = &pkt24->u.status0.dsd;
2643
2644 /* Set total data segment count */
2645 if (prm->seg_cnt)
2646 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2647
2648 if (prm->seg_cnt == 0) {
2649 /* No data transfer */
2650 cur_dsd->address = 0;
2651 cur_dsd->length = 0;
2652 return;
2653 }
2654
2655 /* If scatter gather */
2656
2657 /* Load command entry data segments */
2658 for (cnt = 0;
2659 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2660 cnt++, prm->seg_cnt--) {
2661 append_dsd64(&cur_dsd, prm->sg);
2662 prm->sg = sg_next(prm->sg);
2663 }
2664
2665 qlt_load_cont_data_segments(prm);
2666 }
2667
2668 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2669 {
2670 return cmd->bufflen > 0;
2671 }
2672
2673 static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2674 {
2675 struct qla_tgt_cmd *cmd;
2676 struct scsi_qla_host *vha;
2677
2678 /* asc 0x10=dif error */
2679 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2680 cmd = prm->cmd;
2681 vha = cmd->vha;
2682 /* ASCQ */
2683 switch (prm->sense_buffer[13]) {
2684 case 1:
2685 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2686 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2687 "se_cmd=%p tag[%x]",
2688 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2689 cmd->atio.u.isp24.exchange_addr);
2690 break;
2691 case 2:
2692 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2693 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2694 "se_cmd=%p tag[%x]",
2695 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2696 cmd->atio.u.isp24.exchange_addr);
2697 break;
2698 case 3:
2699 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2700 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2701 "se_cmd=%p tag[%x]",
2702 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2703 cmd->atio.u.isp24.exchange_addr);
2704 break;
2705 default:
2706 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2707 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2708 "se_cmd=%p tag[%x]",
2709 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2710 cmd->atio.u.isp24.exchange_addr);
2711 break;
2712 }
2713 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2714 }
2715 }
2716
2717 /*
2718 * Called without ha->hardware_lock held
2719 */
2720 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2721 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2722 uint32_t *full_req_cnt)
2723 {
2724 struct se_cmd *se_cmd = &cmd->se_cmd;
2725 struct qla_qpair *qpair = cmd->qpair;
2726
2727 prm->cmd = cmd;
2728 prm->tgt = cmd->tgt;
2729 prm->pkt = NULL;
2730 prm->rq_result = scsi_status;
2731 prm->sense_buffer = &cmd->sense_buffer[0];
2732 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2733 prm->sg = NULL;
2734 prm->seg_cnt = -1;
2735 prm->req_cnt = 1;
2736 prm->residual = 0;
2737 prm->add_status_pkt = 0;
2738 prm->prot_sg = NULL;
2739 prm->prot_seg_cnt = 0;
2740 prm->tot_dsds = 0;
2741
2742 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2743 if (qlt_pci_map_calc_cnt(prm) != 0)
2744 return -EAGAIN;
2745 }
2746
2747 *full_req_cnt = prm->req_cnt;
2748
2749 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2750 prm->residual = se_cmd->residual_count;
2751 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2752 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2753 prm->residual, se_cmd->tag,
2754 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2755 cmd->bufflen, prm->rq_result);
2756 prm->rq_result |= SS_RESIDUAL_UNDER;
2757 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2758 prm->residual = se_cmd->residual_count;
2759 ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2760 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2761 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2762 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2763 prm->rq_result |= SS_RESIDUAL_OVER;
2764 }
2765
2766 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2767 /*
2768 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2769 * ignored in *xmit_response() below
2770 */
2771 if (qlt_has_data(cmd)) {
2772 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2773 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2774 (prm->rq_result != 0))) {
2775 prm->add_status_pkt = 1;
2776 (*full_req_cnt)++;
2777 }
2778 }
2779 }
2780
2781 return 0;
2782 }
2783
2784 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2785 int sending_sense)
2786 {
2787 if (cmd->qpair->enable_class_2)
2788 return 0;
2789
2790 if (sending_sense)
2791 return cmd->conf_compl_supported;
2792 else
2793 return cmd->qpair->enable_explicit_conf &&
2794 cmd->conf_compl_supported;
2795 }
2796
2797 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2798 struct qla_tgt_prm *prm)
2799 {
2800 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2801 (uint32_t)sizeof(ctio->u.status1.sense_data));
2802 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2803 if (qlt_need_explicit_conf(prm->cmd, 0)) {
2804 ctio->u.status0.flags |= cpu_to_le16(
2805 CTIO7_FLAGS_EXPLICIT_CONFORM |
2806 CTIO7_FLAGS_CONFORM_REQ);
2807 }
2808 ctio->u.status0.residual = cpu_to_le32(prm->residual);
2809 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2810 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2811 int i;
2812
2813 if (qlt_need_explicit_conf(prm->cmd, 1)) {
2814 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2815 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2816 "Skipping EXPLICIT_CONFORM and "
2817 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2818 "non GOOD status\n");
2819 goto skip_explict_conf;
2820 }
2821 ctio->u.status1.flags |= cpu_to_le16(
2822 CTIO7_FLAGS_EXPLICIT_CONFORM |
2823 CTIO7_FLAGS_CONFORM_REQ);
2824 }
2825 skip_explict_conf:
2826 ctio->u.status1.flags &=
2827 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2828 ctio->u.status1.flags |=
2829 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2830 ctio->u.status1.scsi_status |=
2831 cpu_to_le16(SS_SENSE_LEN_VALID);
2832 ctio->u.status1.sense_length =
2833 cpu_to_le16(prm->sense_buffer_len);
2834 for (i = 0; i < prm->sense_buffer_len/4; i++)
2835 ((uint32_t *)ctio->u.status1.sense_data)[i] =
2836 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2837
2838 qlt_print_dif_err(prm);
2839
2840 } else {
2841 ctio->u.status1.flags &=
2842 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2843 ctio->u.status1.flags |=
2844 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2845 ctio->u.status1.sense_length = 0;
2846 memset(ctio->u.status1.sense_data, 0,
2847 sizeof(ctio->u.status1.sense_data));
2848 }
2849
2850 /* Sense with len > 24, is it possible ??? */
2851 }
2852
2853 static inline int
2854 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2855 {
2856 switch (se_cmd->prot_op) {
2857 case TARGET_PROT_DOUT_INSERT:
2858 case TARGET_PROT_DIN_STRIP:
2859 if (ql2xenablehba_err_chk >= 1)
2860 return 1;
2861 break;
2862 case TARGET_PROT_DOUT_PASS:
2863 case TARGET_PROT_DIN_PASS:
2864 if (ql2xenablehba_err_chk >= 2)
2865 return 1;
2866 break;
2867 case TARGET_PROT_DIN_INSERT:
2868 case TARGET_PROT_DOUT_STRIP:
2869 return 1;
2870 default:
2871 break;
2872 }
2873 return 0;
2874 }
2875
2876 static inline int
2877 qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2878 {
2879 switch (se_cmd->prot_op) {
2880 case TARGET_PROT_DIN_INSERT:
2881 case TARGET_PROT_DOUT_INSERT:
2882 case TARGET_PROT_DIN_STRIP:
2883 case TARGET_PROT_DOUT_STRIP:
2884 case TARGET_PROT_DIN_PASS:
2885 case TARGET_PROT_DOUT_PASS:
2886 return 1;
2887 default:
2888 return 0;
2889 }
2890 return 0;
2891 }
2892
2893 /*
2894 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2895 */
2896 static void
2897 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2898 uint16_t *pfw_prot_opts)
2899 {
2900 struct se_cmd *se_cmd = &cmd->se_cmd;
2901 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2902 scsi_qla_host_t *vha = cmd->tgt->vha;
2903 struct qla_hw_data *ha = vha->hw;
2904 uint32_t t32 = 0;
2905
2906 /*
2907 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2908 * have been immplemented by TCM, before AppTag is avail.
2909 * Look for modesense_handlers[]
2910 */
2911 ctx->app_tag = 0;
2912 ctx->app_tag_mask[0] = 0x0;
2913 ctx->app_tag_mask[1] = 0x0;
2914
2915 if (IS_PI_UNINIT_CAPABLE(ha)) {
2916 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2917 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2918 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2919 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2920 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2921 }
2922
2923 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2924
2925 switch (se_cmd->prot_type) {
2926 case TARGET_DIF_TYPE0_PROT:
2927 /*
2928 * No check for ql2xenablehba_err_chk, as it
2929 * would be an I/O error if hba tag generation
2930 * is not done.
2931 */
2932 ctx->ref_tag = cpu_to_le32(lba);
2933 /* enable ALL bytes of the ref tag */
2934 ctx->ref_tag_mask[0] = 0xff;
2935 ctx->ref_tag_mask[1] = 0xff;
2936 ctx->ref_tag_mask[2] = 0xff;
2937 ctx->ref_tag_mask[3] = 0xff;
2938 break;
2939 case TARGET_DIF_TYPE1_PROT:
2940 /*
2941 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2942 * REF tag, and 16 bit app tag.
2943 */
2944 ctx->ref_tag = cpu_to_le32(lba);
2945 if (!qla_tgt_ref_mask_check(se_cmd) ||
2946 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2947 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2948 break;
2949 }
2950 /* enable ALL bytes of the ref tag */
2951 ctx->ref_tag_mask[0] = 0xff;
2952 ctx->ref_tag_mask[1] = 0xff;
2953 ctx->ref_tag_mask[2] = 0xff;
2954 ctx->ref_tag_mask[3] = 0xff;
2955 break;
2956 case TARGET_DIF_TYPE2_PROT:
2957 /*
2958 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2959 * tag has to match LBA in CDB + N
2960 */
2961 ctx->ref_tag = cpu_to_le32(lba);
2962 if (!qla_tgt_ref_mask_check(se_cmd) ||
2963 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2964 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2965 break;
2966 }
2967 /* enable ALL bytes of the ref tag */
2968 ctx->ref_tag_mask[0] = 0xff;
2969 ctx->ref_tag_mask[1] = 0xff;
2970 ctx->ref_tag_mask[2] = 0xff;
2971 ctx->ref_tag_mask[3] = 0xff;
2972 break;
2973 case TARGET_DIF_TYPE3_PROT:
2974 /* For TYPE 3 protection: 16 bit GUARD only */
2975 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2976 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2977 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2978 break;
2979 }
2980 }
2981
2982 static inline int
2983 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
2984 {
2985 struct dsd64 *cur_dsd;
2986 uint32_t transfer_length = 0;
2987 uint32_t data_bytes;
2988 uint32_t dif_bytes;
2989 uint8_t bundling = 1;
2990 struct crc_context *crc_ctx_pkt = NULL;
2991 struct qla_hw_data *ha;
2992 struct ctio_crc2_to_fw *pkt;
2993 dma_addr_t crc_ctx_dma;
2994 uint16_t fw_prot_opts = 0;
2995 struct qla_tgt_cmd *cmd = prm->cmd;
2996 struct se_cmd *se_cmd = &cmd->se_cmd;
2997 uint32_t h;
2998 struct atio_from_isp *atio = &prm->cmd->atio;
2999 struct qla_tc_param tc;
3000 uint16_t t16;
3001 scsi_qla_host_t *vha = cmd->vha;
3002
3003 ha = vha->hw;
3004
3005 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
3006 prm->pkt = pkt;
3007 memset(pkt, 0, sizeof(*pkt));
3008
3009 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3010 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3011 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3012 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
3013
3014 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
3015 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
3016 bundling = 0;
3017
3018 /* Compute dif len and adjust data len to incude protection */
3019 data_bytes = cmd->bufflen;
3020 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
3021
3022 switch (se_cmd->prot_op) {
3023 case TARGET_PROT_DIN_INSERT:
3024 case TARGET_PROT_DOUT_STRIP:
3025 transfer_length = data_bytes;
3026 if (cmd->prot_sg_cnt)
3027 data_bytes += dif_bytes;
3028 break;
3029 case TARGET_PROT_DIN_STRIP:
3030 case TARGET_PROT_DOUT_INSERT:
3031 case TARGET_PROT_DIN_PASS:
3032 case TARGET_PROT_DOUT_PASS:
3033 transfer_length = data_bytes + dif_bytes;
3034 break;
3035 default:
3036 BUG();
3037 break;
3038 }
3039
3040 if (!qlt_hba_err_chk_enabled(se_cmd))
3041 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
3042 /* HBA error checking enabled */
3043 else if (IS_PI_UNINIT_CAPABLE(ha)) {
3044 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
3045 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
3046 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
3047 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
3048 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
3049 }
3050
3051 switch (se_cmd->prot_op) {
3052 case TARGET_PROT_DIN_INSERT:
3053 case TARGET_PROT_DOUT_INSERT:
3054 fw_prot_opts |= PO_MODE_DIF_INSERT;
3055 break;
3056 case TARGET_PROT_DIN_STRIP:
3057 case TARGET_PROT_DOUT_STRIP:
3058 fw_prot_opts |= PO_MODE_DIF_REMOVE;
3059 break;
3060 case TARGET_PROT_DIN_PASS:
3061 case TARGET_PROT_DOUT_PASS:
3062 fw_prot_opts |= PO_MODE_DIF_PASS;
3063 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3064 break;
3065 default:/* Normal Request */
3066 fw_prot_opts |= PO_MODE_DIF_PASS;
3067 break;
3068 }
3069
3070 /* ---- PKT ---- */
3071 /* Update entry type to indicate Command Type CRC_2 IOCB */
3072 pkt->entry_type = CTIO_CRC2;
3073 pkt->entry_count = 1;
3074 pkt->vp_index = cmd->vp_idx;
3075
3076 h = qlt_make_handle(qpair);
3077 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
3078 /*
3079 * CTIO type 7 from the firmware doesn't provide a way to
3080 * know the initiator's LOOP ID, hence we can't find
3081 * the session and, so, the command.
3082 */
3083 return -EAGAIN;
3084 } else
3085 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3086
3087 pkt->handle = MAKE_HANDLE(qpair->req->id, h);
3088 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3089 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3090 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3091 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3092 pkt->exchange_addr = atio->u.isp24.exchange_addr;
3093
3094 /* silence compile warning */
3095 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3096 pkt->ox_id = cpu_to_le16(t16);
3097
3098 t16 = (atio->u.isp24.attr << 9);
3099 pkt->flags |= cpu_to_le16(t16);
3100 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
3101
3102 /* Set transfer direction */
3103 if (cmd->dma_data_direction == DMA_TO_DEVICE)
3104 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
3105 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3106 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
3107
3108 pkt->dseg_count = prm->tot_dsds;
3109 /* Fibre channel byte count */
3110 pkt->transfer_length = cpu_to_le32(transfer_length);
3111
3112 /* ----- CRC context -------- */
3113
3114 /* Allocate CRC context from global pool */
3115 crc_ctx_pkt = cmd->ctx =
3116 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3117
3118 if (!crc_ctx_pkt)
3119 goto crc_queuing_error;
3120
3121 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
3122 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
3123
3124 /* Set handle */
3125 crc_ctx_pkt->handle = pkt->handle;
3126
3127 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3128
3129 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
3130 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
3131
3132 if (!bundling) {
3133 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
3134 } else {
3135 /*
3136 * Configure Bundling if we need to fetch interlaving
3137 * protection PCI accesses
3138 */
3139 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
3140 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3141 crc_ctx_pkt->u.bundling.dseg_count =
3142 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3143 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
3144 }
3145
3146 /* Finish the common fields of CRC pkt */
3147 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
3148 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
3149 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3150 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3151
3152 memset((uint8_t *)&tc, 0 , sizeof(tc));
3153 tc.vha = vha;
3154 tc.blk_sz = cmd->blk_sz;
3155 tc.bufflen = cmd->bufflen;
3156 tc.sg = cmd->sg;
3157 tc.prot_sg = cmd->prot_sg;
3158 tc.ctx = crc_ctx_pkt;
3159 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3160
3161 /* Walks data segments */
3162 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3163
3164 if (!bundling && prm->prot_seg_cnt) {
3165 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3166 prm->tot_dsds, &tc))
3167 goto crc_queuing_error;
3168 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3169 (prm->tot_dsds - prm->prot_seg_cnt), &tc))
3170 goto crc_queuing_error;
3171
3172 if (bundling && prm->prot_seg_cnt) {
3173 /* Walks dif segments */
3174 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3175
3176 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
3177 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3178 prm->prot_seg_cnt, cmd))
3179 goto crc_queuing_error;
3180 }
3181 return QLA_SUCCESS;
3182
3183 crc_queuing_error:
3184 /* Cleanup will be performed by the caller */
3185 qpair->req->outstanding_cmds[h] = NULL;
3186
3187 return QLA_FUNCTION_FAILED;
3188 }
3189
3190 /*
3191 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3192 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3193 */
3194 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3195 uint8_t scsi_status)
3196 {
3197 struct scsi_qla_host *vha = cmd->vha;
3198 struct qla_qpair *qpair = cmd->qpair;
3199 struct ctio7_to_24xx *pkt;
3200 struct qla_tgt_prm prm;
3201 uint32_t full_req_cnt = 0;
3202 unsigned long flags = 0;
3203 int res;
3204
3205 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3206 (cmd->sess && cmd->sess->deleted)) {
3207 cmd->state = QLA_TGT_STATE_PROCESSED;
3208 res = 0;
3209 goto free;
3210 }
3211
3212 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3213 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3214 (xmit_type & QLA_TGT_XMIT_STATUS) ?
3215 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3216 &cmd->se_cmd, qpair->id);
3217
3218 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3219 &full_req_cnt);
3220 if (unlikely(res != 0))
3221 goto free;
3222
3223 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3224
3225 if (xmit_type == QLA_TGT_XMIT_STATUS)
3226 qpair->tgt_counters.core_qla_snd_status++;
3227 else
3228 qpair->tgt_counters.core_qla_que_buf++;
3229
3230 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3231 /*
3232 * Either the port is not online or this request was from
3233 * previous life, just abort the processing.
3234 */
3235 cmd->state = QLA_TGT_STATE_PROCESSED;
3236 ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3237 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3238 vha->flags.online, qla2x00_reset_active(vha),
3239 cmd->reset_count, qpair->chip_reset);
3240 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3241 res = 0;
3242 goto free;
3243 }
3244
3245 /* Does F/W have an IOCBs for this request */
3246 res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3247 if (unlikely(res))
3248 goto out_unmap_unlock;
3249
3250 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3251 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3252 else
3253 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3254 if (unlikely(res != 0)) {
3255 qpair->req->cnt += full_req_cnt;
3256 goto out_unmap_unlock;
3257 }
3258
3259 pkt = (struct ctio7_to_24xx *)prm.pkt;
3260
3261 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3262 pkt->u.status0.flags |=
3263 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3264 CTIO7_FLAGS_STATUS_MODE_0);
3265
3266 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3267 qlt_load_data_segments(&prm);
3268
3269 if (prm.add_status_pkt == 0) {
3270 if (xmit_type & QLA_TGT_XMIT_STATUS) {
3271 pkt->u.status0.scsi_status =
3272 cpu_to_le16(prm.rq_result);
3273 pkt->u.status0.residual =
3274 cpu_to_le32(prm.residual);
3275 pkt->u.status0.flags |= cpu_to_le16(
3276 CTIO7_FLAGS_SEND_STATUS);
3277 if (qlt_need_explicit_conf(cmd, 0)) {
3278 pkt->u.status0.flags |=
3279 cpu_to_le16(
3280 CTIO7_FLAGS_EXPLICIT_CONFORM |
3281 CTIO7_FLAGS_CONFORM_REQ);
3282 }
3283 }
3284
3285 } else {
3286 /*
3287 * We have already made sure that there is sufficient
3288 * amount of request entries to not drop HW lock in
3289 * req_pkt().
3290 */
3291 struct ctio7_to_24xx *ctio =
3292 (struct ctio7_to_24xx *)qlt_get_req_pkt(
3293 qpair->req);
3294
3295 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3296 "Building additional status packet 0x%p.\n",
3297 ctio);
3298
3299 /*
3300 * T10Dif: ctio_crc2_to_fw overlay ontop of
3301 * ctio7_to_24xx
3302 */
3303 memcpy(ctio, pkt, sizeof(*ctio));
3304 /* reset back to CTIO7 */
3305 ctio->entry_count = 1;
3306 ctio->entry_type = CTIO_TYPE7;
3307 ctio->dseg_count = 0;
3308 ctio->u.status1.flags &= ~cpu_to_le16(
3309 CTIO7_FLAGS_DATA_IN);
3310
3311 /* Real finish is ctio_m1's finish */
3312 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3313 pkt->u.status0.flags |= cpu_to_le16(
3314 CTIO7_FLAGS_DONT_RET_CTIO);
3315
3316 /* qlt_24xx_init_ctio_to_isp will correct
3317 * all neccessary fields that's part of CTIO7.
3318 * There should be no residual of CTIO-CRC2 data.
3319 */
3320 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3321 &prm);
3322 }
3323 } else
3324 qlt_24xx_init_ctio_to_isp(pkt, &prm);
3325
3326
3327 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3328 cmd->cmd_sent_to_fw = 1;
3329 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3330
3331 /* Memory Barrier */
3332 wmb();
3333 if (qpair->reqq_start_iocbs)
3334 qpair->reqq_start_iocbs(qpair);
3335 else
3336 qla2x00_start_iocbs(vha, qpair->req);
3337 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3338
3339 return 0;
3340
3341 out_unmap_unlock:
3342 qlt_unmap_sg(vha, cmd);
3343 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3344
3345 free:
3346 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3347 return res;
3348 }
3349 EXPORT_SYMBOL(qlt_xmit_response);
3350
3351 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3352 {
3353 struct ctio7_to_24xx *pkt;
3354 struct scsi_qla_host *vha = cmd->vha;
3355 struct qla_tgt *tgt = cmd->tgt;
3356 struct qla_tgt_prm prm;
3357 unsigned long flags = 0;
3358 int res = 0;
3359 struct qla_qpair *qpair = cmd->qpair;
3360
3361 memset(&prm, 0, sizeof(prm));
3362 prm.cmd = cmd;
3363 prm.tgt = tgt;
3364 prm.sg = NULL;
3365 prm.req_cnt = 1;
3366
3367 /* Calculate number of entries and segments required */
3368 if (qlt_pci_map_calc_cnt(&prm) != 0)
3369 return -EAGAIN;
3370
3371 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3372 (cmd->sess && cmd->sess->deleted)) {
3373 /*
3374 * Either the port is not online or this request was from
3375 * previous life, just abort the processing.
3376 */
3377 cmd->aborted = 1;
3378 cmd->write_data_transferred = 0;
3379 cmd->state = QLA_TGT_STATE_DATA_IN;
3380 vha->hw->tgt.tgt_ops->handle_data(cmd);
3381 ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3382 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3383 vha->flags.online, qla2x00_reset_active(vha),
3384 cmd->reset_count, qpair->chip_reset);
3385 return 0;
3386 }
3387
3388 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3389 /* Does F/W have an IOCBs for this request */
3390 res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3391 if (res != 0)
3392 goto out_unlock_free_unmap;
3393 if (cmd->se_cmd.prot_op)
3394 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3395 else
3396 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3397
3398 if (unlikely(res != 0)) {
3399 qpair->req->cnt += prm.req_cnt;
3400 goto out_unlock_free_unmap;
3401 }
3402
3403 pkt = (struct ctio7_to_24xx *)prm.pkt;
3404 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3405 CTIO7_FLAGS_STATUS_MODE_0);
3406
3407 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3408 qlt_load_data_segments(&prm);
3409
3410 cmd->state = QLA_TGT_STATE_NEED_DATA;
3411 cmd->cmd_sent_to_fw = 1;
3412 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3413
3414 /* Memory Barrier */
3415 wmb();
3416 if (qpair->reqq_start_iocbs)
3417 qpair->reqq_start_iocbs(qpair);
3418 else
3419 qla2x00_start_iocbs(vha, qpair->req);
3420 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3421
3422 return res;
3423
3424 out_unlock_free_unmap:
3425 qlt_unmap_sg(vha, cmd);
3426 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3427
3428 return res;
3429 }
3430 EXPORT_SYMBOL(qlt_rdy_to_xfer);
3431
3432
3433 /*
3434 * it is assumed either hardware_lock or qpair lock is held.
3435 */
3436 static void
3437 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3438 struct ctio_crc_from_fw *sts)
3439 {
3440 uint8_t *ap = &sts->actual_dif[0];
3441 uint8_t *ep = &sts->expected_dif[0];
3442 uint64_t lba = cmd->se_cmd.t_task_lba;
3443 uint8_t scsi_status, sense_key, asc, ascq;
3444 unsigned long flags;
3445 struct scsi_qla_host *vha = cmd->vha;
3446
3447 cmd->trc_flags |= TRC_DIF_ERR;
3448
3449 cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
3450 cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
3451 cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3452
3453 cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
3454 cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
3455 cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3456
3457 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3458 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3459
3460 scsi_status = sense_key = asc = ascq = 0;
3461
3462 /* check appl tag */
3463 if (cmd->e_app_tag != cmd->a_app_tag) {
3464 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3465 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3466 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3467 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3468 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3469 cmd->atio.u.isp24.fcp_hdr.ox_id);
3470
3471 cmd->dif_err_code = DIF_ERR_APP;
3472 scsi_status = SAM_STAT_CHECK_CONDITION;
3473 sense_key = ABORTED_COMMAND;
3474 asc = 0x10;
3475 ascq = 0x2;
3476 }
3477
3478 /* check ref tag */
3479 if (cmd->e_ref_tag != cmd->a_ref_tag) {
3480 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3481 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3482 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3483 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3484 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3485 cmd->atio.u.isp24.fcp_hdr.ox_id);
3486
3487 cmd->dif_err_code = DIF_ERR_REF;
3488 scsi_status = SAM_STAT_CHECK_CONDITION;
3489 sense_key = ABORTED_COMMAND;
3490 asc = 0x10;
3491 ascq = 0x3;
3492 goto out;
3493 }
3494
3495 /* check guard */
3496 if (cmd->e_guard != cmd->a_guard) {
3497 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3498 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3499 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3500 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3501 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3502 cmd->atio.u.isp24.fcp_hdr.ox_id);
3503
3504 cmd->dif_err_code = DIF_ERR_GRD;
3505 scsi_status = SAM_STAT_CHECK_CONDITION;
3506 sense_key = ABORTED_COMMAND;
3507 asc = 0x10;
3508 ascq = 0x1;
3509 }
3510 out:
3511 switch (cmd->state) {
3512 case QLA_TGT_STATE_NEED_DATA:
3513 /* handle_data will load DIF error code */
3514 cmd->state = QLA_TGT_STATE_DATA_IN;
3515 vha->hw->tgt.tgt_ops->handle_data(cmd);
3516 break;
3517 default:
3518 spin_lock_irqsave(&cmd->cmd_lock, flags);
3519 if (cmd->aborted) {
3520 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3521 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3522 break;
3523 }
3524 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3525
3526 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3527 ascq);
3528 /* assume scsi status gets out on the wire.
3529 * Will not wait for completion.
3530 */
3531 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3532 break;
3533 }
3534 }
3535
3536 /* If hardware_lock held on entry, might drop it, then reaquire */
3537 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3538 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3539 struct imm_ntfy_from_isp *ntfy)
3540 {
3541 struct nack_to_isp *nack;
3542 struct qla_hw_data *ha = vha->hw;
3543 request_t *pkt;
3544 int ret = 0;
3545
3546 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3547 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3548
3549 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3550 if (pkt == NULL) {
3551 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3552 "qla_target(%d): %s failed: unable to allocate "
3553 "request packet\n", vha->vp_idx, __func__);
3554 return -ENOMEM;
3555 }
3556
3557 pkt->entry_type = NOTIFY_ACK_TYPE;
3558 pkt->entry_count = 1;
3559 pkt->handle = QLA_TGT_SKIP_HANDLE;
3560
3561 nack = (struct nack_to_isp *)pkt;
3562 nack->ox_id = ntfy->ox_id;
3563
3564 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3565 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3566 nack->u.isp24.flags = ntfy->u.isp24.flags &
3567 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3568 }
3569
3570 /* terminate */
3571 nack->u.isp24.flags |=
3572 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3573
3574 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3575 nack->u.isp24.status = ntfy->u.isp24.status;
3576 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3577 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3578 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3579 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3580 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3581 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3582
3583 qla2x00_start_iocbs(vha, vha->req);
3584 return ret;
3585 }
3586
3587 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3588 struct imm_ntfy_from_isp *imm, int ha_locked)
3589 {
3590 int rc;
3591
3592 WARN_ON_ONCE(!ha_locked);
3593 rc = __qlt_send_term_imm_notif(vha, imm);
3594 pr_debug("rc = %d\n", rc);
3595 }
3596
3597 /*
3598 * If hardware_lock held on entry, might drop it, then reaquire
3599 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3600 */
3601 static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3602 struct qla_tgt_cmd *cmd,
3603 struct atio_from_isp *atio)
3604 {
3605 struct scsi_qla_host *vha = qpair->vha;
3606 struct ctio7_to_24xx *ctio24;
3607 struct qla_hw_data *ha = vha->hw;
3608 request_t *pkt;
3609 int ret = 0;
3610 uint16_t temp;
3611
3612 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3613
3614 if (cmd)
3615 vha = cmd->vha;
3616
3617 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3618 if (pkt == NULL) {
3619 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3620 "qla_target(%d): %s failed: unable to allocate "
3621 "request packet\n", vha->vp_idx, __func__);
3622 return -ENOMEM;
3623 }
3624
3625 if (cmd != NULL) {
3626 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3627 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3628 "qla_target(%d): Terminating cmd %p with "
3629 "incorrect state %d\n", vha->vp_idx, cmd,
3630 cmd->state);
3631 } else
3632 ret = 1;
3633 }
3634
3635 qpair->tgt_counters.num_term_xchg_sent++;
3636 pkt->entry_count = 1;
3637 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3638
3639 ctio24 = (struct ctio7_to_24xx *)pkt;
3640 ctio24->entry_type = CTIO_TYPE7;
3641 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3642 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3643 ctio24->vp_index = vha->vp_idx;
3644 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3645 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3646 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3647 CTIO7_FLAGS_TERMINATE;
3648 ctio24->u.status1.flags = cpu_to_le16(temp);
3649 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3650 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3651
3652 /* Memory Barrier */
3653 wmb();
3654 if (qpair->reqq_start_iocbs)
3655 qpair->reqq_start_iocbs(qpair);
3656 else
3657 qla2x00_start_iocbs(vha, qpair->req);
3658 return ret;
3659 }
3660
3661 static void qlt_send_term_exchange(struct qla_qpair *qpair,
3662 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3663 int ul_abort)
3664 {
3665 struct scsi_qla_host *vha;
3666 unsigned long flags = 0;
3667 int rc;
3668
3669 /* why use different vha? NPIV */
3670 if (cmd)
3671 vha = cmd->vha;
3672 else
3673 vha = qpair->vha;
3674
3675 if (ha_locked) {
3676 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3677 if (rc == -ENOMEM)
3678 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3679 goto done;
3680 }
3681 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3682 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3683 if (rc == -ENOMEM)
3684 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3685
3686 done:
3687 if (cmd && !ul_abort && !cmd->aborted) {
3688 if (cmd->sg_mapped)
3689 qlt_unmap_sg(vha, cmd);
3690 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3691 }
3692
3693 if (!ha_locked)
3694 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3695
3696 return;
3697 }
3698
3699 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3700 {
3701 struct list_head free_list;
3702 struct qla_tgt_cmd *cmd, *tcmd;
3703
3704 vha->hw->tgt.leak_exchg_thresh_hold =
3705 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3706
3707 cmd = tcmd = NULL;
3708 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3709 INIT_LIST_HEAD(&free_list);
3710 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3711
3712 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3713 list_del(&cmd->cmd_list);
3714 /* This cmd was never sent to TCM. There is no need
3715 * to schedule free or call free_cmd
3716 */
3717 qlt_free_cmd(cmd);
3718 vha->hw->tgt.num_qfull_cmds_alloc--;
3719 }
3720 }
3721 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3722 }
3723
3724 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3725 {
3726 uint32_t total_leaked;
3727
3728 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3729
3730 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3731 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3732
3733 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3734 "Chip reset due to exchange starvation: %d/%d.\n",
3735 total_leaked, vha->hw->cur_fw_xcb_count);
3736
3737 if (IS_P3P_TYPE(vha->hw))
3738 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3739 else
3740 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3741 qla2xxx_wake_dpc(vha);
3742 }
3743
3744 }
3745
3746 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3747 {
3748 struct qla_tgt *tgt = cmd->tgt;
3749 struct scsi_qla_host *vha = tgt->vha;
3750 struct se_cmd *se_cmd = &cmd->se_cmd;
3751 unsigned long flags;
3752
3753 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3754 "qla_target(%d): terminating exchange for aborted cmd=%p "
3755 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3756 se_cmd->tag);
3757
3758 spin_lock_irqsave(&cmd->cmd_lock, flags);
3759 if (cmd->aborted) {
3760 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3761 /*
3762 * It's normal to see 2 calls in this path:
3763 * 1) XFER Rdy completion + CMD_T_ABORT
3764 * 2) TCM TMR - drain_state_list
3765 */
3766 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3767 "multiple abort. %p transport_state %x, t_state %x, "
3768 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3769 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3770 return EIO;
3771 }
3772 cmd->aborted = 1;
3773 cmd->trc_flags |= TRC_ABORT;
3774 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3775
3776 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3777 return 0;
3778 }
3779 EXPORT_SYMBOL(qlt_abort_cmd);
3780
3781 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3782 {
3783 struct fc_port *sess = cmd->sess;
3784
3785 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3786 "%s: se_cmd[%p] ox_id %04x\n",
3787 __func__, &cmd->se_cmd,
3788 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3789
3790 BUG_ON(cmd->cmd_in_wq);
3791
3792 if (cmd->sg_mapped)
3793 qlt_unmap_sg(cmd->vha, cmd);
3794
3795 if (!cmd->q_full)
3796 qlt_decr_num_pend_cmds(cmd->vha);
3797
3798 BUG_ON(cmd->sg_mapped);
3799 cmd->jiffies_at_free = get_jiffies_64();
3800 if (unlikely(cmd->free_sg))
3801 kfree(cmd->sg);
3802
3803 if (!sess || !sess->se_sess) {
3804 WARN_ON(1);
3805 return;
3806 }
3807 cmd->jiffies_at_free = get_jiffies_64();
3808 target_free_tag(sess->se_sess, &cmd->se_cmd);
3809 }
3810 EXPORT_SYMBOL(qlt_free_cmd);
3811
3812 /*
3813 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3814 */
3815 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3816 struct qla_tgt_cmd *cmd, uint32_t status)
3817 {
3818 int term = 0;
3819 struct scsi_qla_host *vha = qpair->vha;
3820
3821 if (cmd->se_cmd.prot_op)
3822 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3823 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3824 "se_cmd=%p tag[%x] op %#x/%s",
3825 cmd->lba, cmd->lba,
3826 cmd->num_blks, &cmd->se_cmd,
3827 cmd->atio.u.isp24.exchange_addr,
3828 cmd->se_cmd.prot_op,
3829 prot_op_str(cmd->se_cmd.prot_op));
3830
3831 if (ctio != NULL) {
3832 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3833
3834 term = !(c->flags &
3835 cpu_to_le16(OF_TERM_EXCH));
3836 } else
3837 term = 1;
3838
3839 if (term)
3840 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3841
3842 return term;
3843 }
3844
3845
3846 /* ha->hardware_lock supposed to be held on entry */
3847 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3848 struct rsp_que *rsp, uint32_t handle, void *ctio)
3849 {
3850 void *cmd = NULL;
3851 struct req_que *req;
3852 int qid = GET_QID(handle);
3853 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3854
3855 if (unlikely(h == QLA_TGT_SKIP_HANDLE))
3856 return NULL;
3857
3858 if (qid == rsp->req->id) {
3859 req = rsp->req;
3860 } else if (vha->hw->req_q_map[qid]) {
3861 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3862 "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3863 vha->vp_idx, rsp->id, handle);
3864 req = vha->hw->req_q_map[qid];
3865 } else {
3866 return NULL;
3867 }
3868
3869 h &= QLA_CMD_HANDLE_MASK;
3870
3871 if (h != QLA_TGT_NULL_HANDLE) {
3872 if (unlikely(h >= req->num_outstanding_cmds)) {
3873 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3874 "qla_target(%d): Wrong handle %x received\n",
3875 vha->vp_idx, handle);
3876 return NULL;
3877 }
3878
3879 cmd = (void *) req->outstanding_cmds[h];
3880 if (unlikely(cmd == NULL)) {
3881 ql_dbg(ql_dbg_async, vha, 0xe053,
3882 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3883 vha->vp_idx, handle, req->id, rsp->id);
3884 return NULL;
3885 }
3886 req->outstanding_cmds[h] = NULL;
3887 } else if (ctio != NULL) {
3888 /* We can't get loop ID from CTIO7 */
3889 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3890 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3891 "support NULL handles\n", vha->vp_idx);
3892 return NULL;
3893 }
3894
3895 return cmd;
3896 }
3897
3898 /*
3899 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3900 */
3901 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3902 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3903 {
3904 struct qla_hw_data *ha = vha->hw;
3905 struct se_cmd *se_cmd;
3906 struct qla_tgt_cmd *cmd;
3907 struct qla_qpair *qpair = rsp->qpair;
3908
3909 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3910 /* That could happen only in case of an error/reset/abort */
3911 if (status != CTIO_SUCCESS) {
3912 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3913 "Intermediate CTIO received"
3914 " (status %x)\n", status);
3915 }
3916 return;
3917 }
3918
3919 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3920 if (cmd == NULL)
3921 return;
3922
3923 se_cmd = &cmd->se_cmd;
3924 cmd->cmd_sent_to_fw = 0;
3925
3926 qlt_unmap_sg(vha, cmd);
3927
3928 if (unlikely(status != CTIO_SUCCESS)) {
3929 switch (status & 0xFFFF) {
3930 case CTIO_INVALID_RX_ID:
3931 if (printk_ratelimit())
3932 dev_info(&vha->hw->pdev->dev,
3933 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
3934 vha->vp_idx, cmd->atio.u.isp24.attr,
3935 ((cmd->ctio_flags >> 9) & 0xf),
3936 cmd->ctio_flags);
3937
3938 break;
3939 case CTIO_LIP_RESET:
3940 case CTIO_TARGET_RESET:
3941 case CTIO_ABORTED:
3942 /* driver request abort via Terminate exchange */
3943 case CTIO_TIMEOUT:
3944 /* They are OK */
3945 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3946 "qla_target(%d): CTIO with "
3947 "status %#x received, state %x, se_cmd %p, "
3948 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3949 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3950 status, cmd->state, se_cmd);
3951 break;
3952
3953 case CTIO_PORT_LOGGED_OUT:
3954 case CTIO_PORT_UNAVAILABLE:
3955 {
3956 int logged_out =
3957 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3958
3959 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3960 "qla_target(%d): CTIO with %s status %x "
3961 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3962 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3963 status, cmd->state, se_cmd);
3964
3965 if (logged_out && cmd->sess) {
3966 /*
3967 * Session is already logged out, but we need
3968 * to notify initiator, who's not aware of this
3969 */
3970 cmd->sess->send_els_logo = 1;
3971 ql_dbg(ql_dbg_disc, vha, 0x20f8,
3972 "%s %d %8phC post del sess\n",
3973 __func__, __LINE__, cmd->sess->port_name);
3974
3975 qlt_schedule_sess_for_deletion(cmd->sess);
3976 }
3977 break;
3978 }
3979 case CTIO_DIF_ERROR: {
3980 struct ctio_crc_from_fw *crc =
3981 (struct ctio_crc_from_fw *)ctio;
3982 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3983 "qla_target(%d): CTIO with DIF_ERROR status %x "
3984 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
3985 "expect_dif[0x%llx]\n",
3986 vha->vp_idx, status, cmd->state, se_cmd,
3987 *((u64 *)&crc->actual_dif[0]),
3988 *((u64 *)&crc->expected_dif[0]));
3989
3990 qlt_handle_dif_error(qpair, cmd, ctio);
3991 return;
3992 }
3993 default:
3994 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3995 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3996 vha->vp_idx, status, cmd->state, se_cmd);
3997 break;
3998 }
3999
4000
4001 /* "cmd->aborted" means
4002 * cmd is already aborted/terminated, we don't
4003 * need to terminate again. The exchange is already
4004 * cleaned up/freed at FW level. Just cleanup at driver
4005 * level.
4006 */
4007 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
4008 (!cmd->aborted)) {
4009 cmd->trc_flags |= TRC_CTIO_ERR;
4010 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
4011 return;
4012 }
4013 }
4014
4015 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4016 cmd->trc_flags |= TRC_CTIO_DONE;
4017 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4018 cmd->state = QLA_TGT_STATE_DATA_IN;
4019
4020 if (status == CTIO_SUCCESS)
4021 cmd->write_data_transferred = 1;
4022
4023 ha->tgt.tgt_ops->handle_data(cmd);
4024 return;
4025 } else if (cmd->aborted) {
4026 cmd->trc_flags |= TRC_CTIO_ABORTED;
4027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4028 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
4029 } else {
4030 cmd->trc_flags |= TRC_CTIO_STRANGE;
4031 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4032 "qla_target(%d): A command in state (%d) should "
4033 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4034 }
4035
4036 if (unlikely(status != CTIO_SUCCESS) &&
4037 !cmd->aborted) {
4038 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4039 dump_stack();
4040 }
4041
4042 ha->tgt.tgt_ops->free_cmd(cmd);
4043 }
4044
4045 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4046 uint8_t task_codes)
4047 {
4048 int fcp_task_attr;
4049
4050 switch (task_codes) {
4051 case ATIO_SIMPLE_QUEUE:
4052 fcp_task_attr = TCM_SIMPLE_TAG;
4053 break;
4054 case ATIO_HEAD_OF_QUEUE:
4055 fcp_task_attr = TCM_HEAD_TAG;
4056 break;
4057 case ATIO_ORDERED_QUEUE:
4058 fcp_task_attr = TCM_ORDERED_TAG;
4059 break;
4060 case ATIO_ACA_QUEUE:
4061 fcp_task_attr = TCM_ACA_TAG;
4062 break;
4063 case ATIO_UNTAGGED:
4064 fcp_task_attr = TCM_SIMPLE_TAG;
4065 break;
4066 default:
4067 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4068 "qla_target: unknown task code %x, use ORDERED instead\n",
4069 task_codes);
4070 fcp_task_attr = TCM_ORDERED_TAG;
4071 break;
4072 }
4073
4074 return fcp_task_attr;
4075 }
4076
4077 /*
4078 * Process context for I/O path into tcm_qla2xxx code
4079 */
4080 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4081 {
4082 scsi_qla_host_t *vha = cmd->vha;
4083 struct qla_hw_data *ha = vha->hw;
4084 struct fc_port *sess = cmd->sess;
4085 struct atio_from_isp *atio = &cmd->atio;
4086 unsigned char *cdb;
4087 unsigned long flags;
4088 uint32_t data_length;
4089 int ret, fcp_task_attr, data_dir, bidi = 0;
4090 struct qla_qpair *qpair = cmd->qpair;
4091
4092 cmd->cmd_in_wq = 0;
4093 cmd->trc_flags |= TRC_DO_WORK;
4094
4095 if (cmd->aborted) {
4096 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4097 "cmd with tag %u is aborted\n",
4098 cmd->atio.u.isp24.exchange_addr);
4099 goto out_term;
4100 }
4101
4102 spin_lock_init(&cmd->cmd_lock);
4103 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4104 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
4105
4106 if (atio->u.isp24.fcp_cmnd.rddata &&
4107 atio->u.isp24.fcp_cmnd.wrdata) {
4108 bidi = 1;
4109 data_dir = DMA_TO_DEVICE;
4110 } else if (atio->u.isp24.fcp_cmnd.rddata)
4111 data_dir = DMA_FROM_DEVICE;
4112 else if (atio->u.isp24.fcp_cmnd.wrdata)
4113 data_dir = DMA_TO_DEVICE;
4114 else
4115 data_dir = DMA_NONE;
4116
4117 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4118 atio->u.isp24.fcp_cmnd.task_attr);
4119 data_length = get_datalen_for_atio(atio);
4120
4121 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4122 fcp_task_attr, data_dir, bidi);
4123 if (ret != 0)
4124 goto out_term;
4125 /*
4126 * Drop extra session reference from qlt_handle_cmd_for_atio().
4127 */
4128 ha->tgt.tgt_ops->put_sess(sess);
4129 return;
4130
4131 out_term:
4132 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4133 /*
4134 * cmd has not sent to target yet, so pass NULL as the second
4135 * argument to qlt_send_term_exchange() and free the memory here.
4136 */
4137 cmd->trc_flags |= TRC_DO_WORK_ERR;
4138 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4139 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4140
4141 qlt_decr_num_pend_cmds(vha);
4142 target_free_tag(sess->se_sess, &cmd->se_cmd);
4143 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4144
4145 ha->tgt.tgt_ops->put_sess(sess);
4146 }
4147
4148 static void qlt_do_work(struct work_struct *work)
4149 {
4150 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4151 scsi_qla_host_t *vha = cmd->vha;
4152 unsigned long flags;
4153
4154 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4155 list_del(&cmd->cmd_list);
4156 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4157
4158 __qlt_do_work(cmd);
4159 }
4160
4161 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4162 {
4163 unsigned long flags;
4164 struct qla_hw_data *ha = vha->hw;
4165 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4166 void *node;
4167 u64 key = 0;
4168
4169 ql_log(ql_log_info, vha, 0x706c,
4170 "User update Number of Active Qpairs %d\n",
4171 ha->tgt.num_act_qpairs);
4172
4173 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4174
4175 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4176 btree_remove64(&tgt->lun_qpair_map, key);
4177
4178 ha->base_qpair->lun_cnt = 0;
4179 for (key = 0; key < ha->max_qpairs; key++)
4180 if (ha->queue_pair_map[key])
4181 ha->queue_pair_map[key]->lun_cnt = 0;
4182
4183 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4184 }
4185
4186 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4187 struct qla_tgt_cmd *cmd)
4188 {
4189 struct qla_qpair *qpair, *qp;
4190 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4191 struct qla_qpair_hint *h;
4192
4193 if (vha->flags.qpairs_available) {
4194 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4195 if (unlikely(!h)) {
4196 /* spread lun to qpair ratio evently */
4197 int lcnt = 0, rc;
4198 struct scsi_qla_host *base_vha =
4199 pci_get_drvdata(vha->hw->pdev);
4200
4201 qpair = vha->hw->base_qpair;
4202 if (qpair->lun_cnt == 0) {
4203 qpair->lun_cnt++;
4204 h = qla_qpair_to_hint(tgt, qpair);
4205 BUG_ON(!h);
4206 rc = btree_insert64(&tgt->lun_qpair_map,
4207 cmd->unpacked_lun, h, GFP_ATOMIC);
4208 if (rc) {
4209 qpair->lun_cnt--;
4210 ql_log(ql_log_info, vha, 0xd037,
4211 "Unable to insert lun %llx into lun_qpair_map\n",
4212 cmd->unpacked_lun);
4213 }
4214 goto out;
4215 } else {
4216 lcnt = qpair->lun_cnt;
4217 }
4218
4219 h = NULL;
4220 list_for_each_entry(qp, &base_vha->qp_list,
4221 qp_list_elem) {
4222 if (qp->lun_cnt == 0) {
4223 qp->lun_cnt++;
4224 h = qla_qpair_to_hint(tgt, qp);
4225 BUG_ON(!h);
4226 rc = btree_insert64(&tgt->lun_qpair_map,
4227 cmd->unpacked_lun, h, GFP_ATOMIC);
4228 if (rc) {
4229 qp->lun_cnt--;
4230 ql_log(ql_log_info, vha, 0xd038,
4231 "Unable to insert lun %llx into lun_qpair_map\n",
4232 cmd->unpacked_lun);
4233 }
4234 qpair = qp;
4235 goto out;
4236 } else {
4237 if (qp->lun_cnt < lcnt) {
4238 lcnt = qp->lun_cnt;
4239 qpair = qp;
4240 continue;
4241 }
4242 }
4243 }
4244 BUG_ON(!qpair);
4245 qpair->lun_cnt++;
4246 h = qla_qpair_to_hint(tgt, qpair);
4247 BUG_ON(!h);
4248 rc = btree_insert64(&tgt->lun_qpair_map,
4249 cmd->unpacked_lun, h, GFP_ATOMIC);
4250 if (rc) {
4251 qpair->lun_cnt--;
4252 ql_log(ql_log_info, vha, 0xd039,
4253 "Unable to insert lun %llx into lun_qpair_map\n",
4254 cmd->unpacked_lun);
4255 }
4256 }
4257 } else {
4258 h = &tgt->qphints[0];
4259 }
4260 out:
4261 cmd->qpair = h->qpair;
4262 cmd->se_cmd.cpuid = h->cpuid;
4263 }
4264
4265 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4266 struct fc_port *sess,
4267 struct atio_from_isp *atio)
4268 {
4269 struct se_session *se_sess = sess->se_sess;
4270 struct qla_tgt_cmd *cmd;
4271 int tag, cpu;
4272
4273 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
4274 if (tag < 0)
4275 return NULL;
4276
4277 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
4278 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4279 cmd->cmd_type = TYPE_TGT_CMD;
4280 memcpy(&cmd->atio, atio, sizeof(*atio));
4281 cmd->state = QLA_TGT_STATE_NEW;
4282 cmd->tgt = vha->vha_tgt.qla_tgt;
4283 qlt_incr_num_pend_cmds(vha);
4284 cmd->vha = vha;
4285 cmd->se_cmd.map_tag = tag;
4286 cmd->se_cmd.map_cpu = cpu;
4287 cmd->sess = sess;
4288 cmd->loop_id = sess->loop_id;
4289 cmd->conf_compl_supported = sess->conf_compl_supported;
4290
4291 cmd->trc_flags = 0;
4292 cmd->jiffies_at_alloc = get_jiffies_64();
4293
4294 cmd->unpacked_lun = scsilun_to_int(
4295 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4296 qlt_assign_qpair(vha, cmd);
4297 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4298 cmd->vp_idx = vha->vp_idx;
4299
4300 return cmd;
4301 }
4302
4303 /* ha->hardware_lock supposed to be held on entry */
4304 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4305 struct atio_from_isp *atio)
4306 {
4307 struct qla_hw_data *ha = vha->hw;
4308 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4309 struct fc_port *sess;
4310 struct qla_tgt_cmd *cmd;
4311 unsigned long flags;
4312 port_id_t id;
4313
4314 if (unlikely(tgt->tgt_stop)) {
4315 ql_dbg(ql_dbg_io, vha, 0x3061,
4316 "New command while device %p is shutting down\n", tgt);
4317 return -ENODEV;
4318 }
4319
4320 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
4321 if (IS_SW_RESV_ADDR(id))
4322 return -EBUSY;
4323
4324 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4325 if (unlikely(!sess))
4326 return -EFAULT;
4327
4328 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4329 * session deletion, but it's still in sess_del_work wq */
4330 if (sess->deleted) {
4331 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4332 "New command while old session %p is being deleted\n",
4333 sess);
4334 return -EFAULT;
4335 }
4336
4337 /*
4338 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4339 */
4340 if (!kref_get_unless_zero(&sess->sess_kref)) {
4341 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4342 "%s: kref_get fail, %8phC oxid %x \n",
4343 __func__, sess->port_name,
4344 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4345 return -EFAULT;
4346 }
4347
4348 cmd = qlt_get_tag(vha, sess, atio);
4349 if (!cmd) {
4350 ql_dbg(ql_dbg_io, vha, 0x3062,
4351 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4352 ha->tgt.tgt_ops->put_sess(sess);
4353 return -EBUSY;
4354 }
4355
4356 cmd->cmd_in_wq = 1;
4357 cmd->trc_flags |= TRC_NEW_CMD;
4358
4359 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4360 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4361 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4362
4363 INIT_WORK(&cmd->work, qlt_do_work);
4364 if (vha->flags.qpairs_available) {
4365 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4366 } else if (ha->msix_count) {
4367 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4368 queue_work_on(smp_processor_id(), qla_tgt_wq,
4369 &cmd->work);
4370 else
4371 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4372 &cmd->work);
4373 } else {
4374 queue_work(qla_tgt_wq, &cmd->work);
4375 }
4376
4377 return 0;
4378 }
4379
4380 /* ha->hardware_lock supposed to be held on entry */
4381 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4382 int fn, void *iocb, int flags)
4383 {
4384 struct scsi_qla_host *vha = sess->vha;
4385 struct qla_hw_data *ha = vha->hw;
4386 struct qla_tgt_mgmt_cmd *mcmd;
4387 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4388 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4389
4390 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4391 if (!mcmd) {
4392 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4393 "qla_target(%d): Allocation of management "
4394 "command failed, some commands and their data could "
4395 "leak\n", vha->vp_idx);
4396 return -ENOMEM;
4397 }
4398 memset(mcmd, 0, sizeof(*mcmd));
4399 mcmd->sess = sess;
4400
4401 if (iocb) {
4402 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4403 sizeof(mcmd->orig_iocb.imm_ntfy));
4404 }
4405 mcmd->tmr_func = fn;
4406 mcmd->flags = flags;
4407 mcmd->reset_count = ha->base_qpair->chip_reset;
4408 mcmd->qpair = h->qpair;
4409 mcmd->vha = vha;
4410 mcmd->se_cmd.cpuid = h->cpuid;
4411 mcmd->unpacked_lun = lun;
4412
4413 switch (fn) {
4414 case QLA_TGT_LUN_RESET:
4415 case QLA_TGT_CLEAR_TS:
4416 case QLA_TGT_ABORT_TS:
4417 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4418 /* fall through */
4419 case QLA_TGT_CLEAR_ACA:
4420 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4421 mcmd->qpair = h->qpair;
4422 mcmd->se_cmd.cpuid = h->cpuid;
4423 break;
4424
4425 case QLA_TGT_TARGET_RESET:
4426 case QLA_TGT_NEXUS_LOSS_SESS:
4427 case QLA_TGT_NEXUS_LOSS:
4428 case QLA_TGT_ABORT_ALL:
4429 default:
4430 /* no-op */
4431 break;
4432 }
4433
4434 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
4435 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
4436 &mcmd->work);
4437
4438 return 0;
4439 }
4440
4441 /* ha->hardware_lock supposed to be held on entry */
4442 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4443 {
4444 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4445 struct qla_hw_data *ha = vha->hw;
4446 struct fc_port *sess;
4447 u64 unpacked_lun;
4448 int fn;
4449 unsigned long flags;
4450
4451 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4452
4453 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4454 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4455 a->u.isp24.fcp_hdr.s_id);
4456 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4457
4458 unpacked_lun =
4459 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4460
4461 if (sess == NULL || sess->deleted)
4462 return -EFAULT;
4463
4464 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4465 }
4466
4467 /* ha->hardware_lock supposed to be held on entry */
4468 static int __qlt_abort_task(struct scsi_qla_host *vha,
4469 struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4470 {
4471 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4472 struct qla_hw_data *ha = vha->hw;
4473 struct qla_tgt_mgmt_cmd *mcmd;
4474 u64 unpacked_lun;
4475 int rc;
4476
4477 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4478 if (mcmd == NULL) {
4479 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4480 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4481 vha->vp_idx, __func__);
4482 return -ENOMEM;
4483 }
4484 memset(mcmd, 0, sizeof(*mcmd));
4485
4486 mcmd->sess = sess;
4487 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4488 sizeof(mcmd->orig_iocb.imm_ntfy));
4489
4490 unpacked_lun =
4491 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4492 mcmd->reset_count = ha->base_qpair->chip_reset;
4493 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4494 mcmd->qpair = ha->base_qpair;
4495
4496 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4497 le16_to_cpu(iocb->u.isp2x.seq_id));
4498 if (rc != 0) {
4499 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4500 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4501 vha->vp_idx, rc);
4502 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4503 return -EFAULT;
4504 }
4505
4506 return 0;
4507 }
4508
4509 /* ha->hardware_lock supposed to be held on entry */
4510 static int qlt_abort_task(struct scsi_qla_host *vha,
4511 struct imm_ntfy_from_isp *iocb)
4512 {
4513 struct qla_hw_data *ha = vha->hw;
4514 struct fc_port *sess;
4515 int loop_id;
4516 unsigned long flags;
4517
4518 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4519
4520 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4521 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4522 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4523
4524 if (sess == NULL) {
4525 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4526 "qla_target(%d): task abort for unexisting "
4527 "session\n", vha->vp_idx);
4528 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4529 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4530 }
4531
4532 return __qlt_abort_task(vha, iocb, sess);
4533 }
4534
4535 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4536 {
4537 if (rc != MBS_COMMAND_COMPLETE) {
4538 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4539 "%s: se_sess %p / sess %p from"
4540 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4541 " LOGO failed: %#x\n",
4542 __func__,
4543 fcport->se_sess,
4544 fcport,
4545 fcport->port_name, fcport->loop_id,
4546 fcport->d_id.b.domain, fcport->d_id.b.area,
4547 fcport->d_id.b.al_pa, rc);
4548 }
4549
4550 fcport->logout_completed = 1;
4551 }
4552
4553 /*
4554 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4555 *
4556 * Schedules sessions with matching port_id/loop_id but different wwn for
4557 * deletion. Returns existing session with matching wwn if present.
4558 * Null otherwise.
4559 */
4560 struct fc_port *
4561 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4562 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4563 {
4564 struct fc_port *sess = NULL, *other_sess;
4565 uint64_t other_wwn;
4566
4567 *conflict_sess = NULL;
4568
4569 list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4570
4571 other_wwn = wwn_to_u64(other_sess->port_name);
4572
4573 if (wwn == other_wwn) {
4574 WARN_ON(sess);
4575 sess = other_sess;
4576 continue;
4577 }
4578
4579 /* find other sess with nport_id collision */
4580 if (port_id.b24 == other_sess->d_id.b24) {
4581 if (loop_id != other_sess->loop_id) {
4582 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
4583 "Invalidating sess %p loop_id %d wwn %llx.\n",
4584 other_sess, other_sess->loop_id, other_wwn);
4585
4586 /*
4587 * logout_on_delete is set by default, but another
4588 * session that has the same s_id/loop_id combo
4589 * might have cleared it when requested this session
4590 * deletion, so don't touch it
4591 */
4592 qlt_schedule_sess_for_deletion(other_sess);
4593 } else {
4594 /*
4595 * Another wwn used to have our s_id/loop_id
4596 * kill the session, but don't free the loop_id
4597 */
4598 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
4599 "Invalidating sess %p loop_id %d wwn %llx.\n",
4600 other_sess, other_sess->loop_id, other_wwn);
4601
4602 other_sess->keep_nport_handle = 1;
4603 if (other_sess->disc_state != DSC_DELETED)
4604 *conflict_sess = other_sess;
4605 qlt_schedule_sess_for_deletion(other_sess);
4606 }
4607 continue;
4608 }
4609
4610 /* find other sess with nport handle collision */
4611 if ((loop_id == other_sess->loop_id) &&
4612 (loop_id != FC_NO_LOOP_ID)) {
4613 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
4614 "Invalidating sess %p loop_id %d wwn %llx.\n",
4615 other_sess, other_sess->loop_id, other_wwn);
4616
4617 /* Same loop_id but different s_id
4618 * Ok to kill and logout */
4619 qlt_schedule_sess_for_deletion(other_sess);
4620 }
4621 }
4622
4623 return sess;
4624 }
4625
4626 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4627 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4628 {
4629 struct qla_tgt_sess_op *op;
4630 struct qla_tgt_cmd *cmd;
4631 uint32_t key;
4632 int count = 0;
4633 unsigned long flags;
4634
4635 key = (((u32)s_id->b.domain << 16) |
4636 ((u32)s_id->b.area << 8) |
4637 ((u32)s_id->b.al_pa));
4638
4639 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4640 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4641 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4642
4643 if (op_key == key) {
4644 op->aborted = true;
4645 count++;
4646 }
4647 }
4648
4649 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4650 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4651
4652 if (op_key == key) {
4653 op->aborted = true;
4654 count++;
4655 }
4656 }
4657
4658 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4659 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4660
4661 if (cmd_key == key) {
4662 cmd->aborted = 1;
4663 count++;
4664 }
4665 }
4666 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4667
4668 return count;
4669 }
4670
4671 static int qlt_handle_login(struct scsi_qla_host *vha,
4672 struct imm_ntfy_from_isp *iocb)
4673 {
4674 struct fc_port *sess = NULL, *conflict_sess = NULL;
4675 uint64_t wwn;
4676 port_id_t port_id;
4677 uint16_t loop_id, wd3_lo;
4678 int res = 0;
4679 struct qlt_plogi_ack_t *pla;
4680 unsigned long flags;
4681
4682 lockdep_assert_held(&vha->hw->hardware_lock);
4683
4684 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4685
4686 port_id.b.domain = iocb->u.isp24.port_id[2];
4687 port_id.b.area = iocb->u.isp24.port_id[1];
4688 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4689 port_id.b.rsvd_1 = 0;
4690
4691 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4692
4693 /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4694 abort_cmds_for_s_id(vha, &port_id);
4695
4696 if (wwn) {
4697 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4698 sess = qlt_find_sess_invalidate_other(vha, wwn,
4699 port_id, loop_id, &conflict_sess);
4700 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4701 } else {
4702 ql_dbg(ql_dbg_disc, vha, 0xffff,
4703 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4704 __func__, __LINE__, loop_id, port_id.b24);
4705 qlt_send_term_imm_notif(vha, iocb, 1);
4706 goto out;
4707 }
4708
4709 if (IS_SW_RESV_ADDR(port_id)) {
4710 res = 1;
4711 goto out;
4712 }
4713
4714 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4715 if (!pla) {
4716 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4717 "%s %d %8phC Term INOT due to mem alloc fail",
4718 __func__, __LINE__,
4719 iocb->u.isp24.port_name);
4720 qlt_send_term_imm_notif(vha, iocb, 1);
4721 goto out;
4722 }
4723
4724 if (conflict_sess) {
4725 conflict_sess->login_gen++;
4726 qlt_plogi_ack_link(vha, pla, conflict_sess,
4727 QLT_PLOGI_LINK_CONFLICT);
4728 }
4729
4730 if (!sess) {
4731 pla->ref_count++;
4732 ql_dbg(ql_dbg_disc, vha, 0xffff,
4733 "%s %d %8phC post new sess\n",
4734 __func__, __LINE__, iocb->u.isp24.port_name);
4735 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
4736 qla24xx_post_newsess_work(vha, &port_id,
4737 iocb->u.isp24.port_name,
4738 iocb->u.isp24.u.plogi.node_name,
4739 pla, FC4_TYPE_UNKNOWN);
4740 else
4741 qla24xx_post_newsess_work(vha, &port_id,
4742 iocb->u.isp24.port_name, NULL,
4743 pla, FC4_TYPE_UNKNOWN);
4744
4745 goto out;
4746 }
4747
4748 if (sess->disc_state == DSC_UPD_FCPORT) {
4749 u16 sec;
4750
4751 /*
4752 * Remote port registration is still going on from
4753 * previous login. Allow it to finish before we
4754 * accept the new login.
4755 */
4756 sess->next_disc_state = DSC_DELETE_PEND;
4757 sec = jiffies_to_msecs(jiffies -
4758 sess->jiffies_at_registration) / 1000;
4759 if (sess->sec_since_registration < sec && sec &&
4760 !(sec % 5)) {
4761 sess->sec_since_registration = sec;
4762 ql_dbg(ql_dbg_disc, vha, 0xffff,
4763 "%s %8phC - Slow Rport registration (%d Sec)\n",
4764 __func__, sess->port_name, sec);
4765 }
4766
4767 if (!conflict_sess) {
4768 list_del(&pla->list);
4769 kmem_cache_free(qla_tgt_plogi_cachep, pla);
4770 }
4771
4772 qlt_send_term_imm_notif(vha, iocb, 1);
4773 goto out;
4774 }
4775
4776 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4777 sess->d_id = port_id;
4778 sess->login_gen++;
4779
4780 if (iocb->u.isp24.status_subcode == ELS_PRLI) {
4781 sess->fw_login_state = DSC_LS_PRLI_PEND;
4782 sess->local = 0;
4783 sess->loop_id = loop_id;
4784 sess->d_id = port_id;
4785 sess->fw_login_state = DSC_LS_PRLI_PEND;
4786 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4787
4788 if (wd3_lo & BIT_7)
4789 sess->conf_compl_supported = 1;
4790
4791 if ((wd3_lo & BIT_4) == 0)
4792 sess->port_type = FCT_INITIATOR;
4793 else
4794 sess->port_type = FCT_TARGET;
4795
4796 } else
4797 sess->fw_login_state = DSC_LS_PLOGI_PEND;
4798
4799
4800 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4801 "%s %d %8phC DS %d\n",
4802 __func__, __LINE__, sess->port_name, sess->disc_state);
4803
4804 switch (sess->disc_state) {
4805 case DSC_DELETED:
4806 qlt_plogi_ack_unref(vha, pla);
4807 break;
4808
4809 default:
4810 /*
4811 * Under normal circumstances we want to release nport handle
4812 * during LOGO process to avoid nport handle leaks inside FW.
4813 * The exception is when LOGO is done while another PLOGI with
4814 * the same nport handle is waiting as might be the case here.
4815 * Note: there is always a possibily of a race where session
4816 * deletion has already started for other reasons (e.g. ACL
4817 * removal) and now PLOGI arrives:
4818 * 1. if PLOGI arrived in FW after nport handle has been freed,
4819 * FW must have assigned this PLOGI a new/same handle and we
4820 * can proceed ACK'ing it as usual when session deletion
4821 * completes.
4822 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4823 * bit reached it, the handle has now been released. We'll
4824 * get an error when we ACK this PLOGI. Nothing will be sent
4825 * back to initiator. Initiator should eventually retry
4826 * PLOGI and situation will correct itself.
4827 */
4828 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4829 (sess->d_id.b24 == port_id.b24));
4830
4831 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4832 "%s %d %8phC post del sess\n",
4833 __func__, __LINE__, sess->port_name);
4834
4835
4836 qlt_schedule_sess_for_deletion(sess);
4837 break;
4838 }
4839 out:
4840 return res;
4841 }
4842
4843 /*
4844 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4845 */
4846 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4847 struct imm_ntfy_from_isp *iocb)
4848 {
4849 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4850 struct qla_hw_data *ha = vha->hw;
4851 struct fc_port *sess = NULL, *conflict_sess = NULL;
4852 uint64_t wwn;
4853 port_id_t port_id;
4854 uint16_t loop_id;
4855 uint16_t wd3_lo;
4856 int res = 0;
4857 unsigned long flags;
4858
4859 lockdep_assert_held(&ha->hardware_lock);
4860
4861 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4862
4863 port_id.b.domain = iocb->u.isp24.port_id[2];
4864 port_id.b.area = iocb->u.isp24.port_id[1];
4865 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4866 port_id.b.rsvd_1 = 0;
4867
4868 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4869
4870 ql_dbg(ql_dbg_disc, vha, 0xf026,
4871 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4872 vha->vp_idx, iocb->u.isp24.port_id[2],
4873 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4874 iocb->u.isp24.status_subcode, loop_id,
4875 iocb->u.isp24.port_name);
4876
4877 /* res = 1 means ack at the end of thread
4878 * res = 0 means ack async/later.
4879 */
4880 switch (iocb->u.isp24.status_subcode) {
4881 case ELS_PLOGI:
4882 res = qlt_handle_login(vha, iocb);
4883 break;
4884
4885 case ELS_PRLI:
4886 if (N2N_TOPO(ha)) {
4887 sess = qla2x00_find_fcport_by_wwpn(vha,
4888 iocb->u.isp24.port_name, 1);
4889
4890 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
4891 ql_dbg(ql_dbg_disc, vha, 0xffff,
4892 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
4893 __func__, __LINE__,
4894 iocb->u.isp24.port_name);
4895 qlt_send_term_imm_notif(vha, iocb, 1);
4896 break;
4897 }
4898
4899 res = qlt_handle_login(vha, iocb);
4900 break;
4901 }
4902
4903 if (IS_SW_RESV_ADDR(port_id)) {
4904 res = 1;
4905 break;
4906 }
4907
4908 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4909
4910 if (wwn) {
4911 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4912 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
4913 loop_id, &conflict_sess);
4914 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4915 }
4916
4917 if (conflict_sess) {
4918 switch (conflict_sess->disc_state) {
4919 case DSC_DELETED:
4920 case DSC_DELETE_PEND:
4921 break;
4922 default:
4923 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4924 "PRLI with conflicting sess %p port %8phC\n",
4925 conflict_sess, conflict_sess->port_name);
4926 conflict_sess->fw_login_state =
4927 DSC_LS_PORT_UNAVAIL;
4928 qlt_send_term_imm_notif(vha, iocb, 1);
4929 res = 0;
4930 break;
4931 }
4932 }
4933
4934 if (sess != NULL) {
4935 bool delete = false;
4936 int sec;
4937
4938 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4939 switch (sess->fw_login_state) {
4940 case DSC_LS_PLOGI_PEND:
4941 case DSC_LS_PLOGI_COMP:
4942 case DSC_LS_PRLI_COMP:
4943 break;
4944 default:
4945 delete = true;
4946 break;
4947 }
4948
4949 switch (sess->disc_state) {
4950 case DSC_UPD_FCPORT:
4951 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4952 flags);
4953
4954 sec = jiffies_to_msecs(jiffies -
4955 sess->jiffies_at_registration)/1000;
4956 if (sess->sec_since_registration < sec && sec &&
4957 !(sec % 5)) {
4958 sess->sec_since_registration = sec;
4959 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
4960 "%s %8phC : Slow Rport registration(%d Sec)\n",
4961 __func__, sess->port_name, sec);
4962 }
4963 qlt_send_term_imm_notif(vha, iocb, 1);
4964 return 0;
4965
4966 case DSC_LOGIN_PEND:
4967 case DSC_GPDB:
4968 case DSC_LOGIN_COMPLETE:
4969 case DSC_ADISC:
4970 delete = false;
4971 break;
4972 default:
4973 break;
4974 }
4975
4976 if (delete) {
4977 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4978 flags);
4979 /*
4980 * Impatient initiator sent PRLI before last
4981 * PLOGI could finish. Will force him to re-try,
4982 * while last one finishes.
4983 */
4984 ql_log(ql_log_warn, sess->vha, 0xf095,
4985 "sess %p PRLI received, before plogi ack.\n",
4986 sess);
4987 qlt_send_term_imm_notif(vha, iocb, 1);
4988 res = 0;
4989 break;
4990 }
4991
4992 /*
4993 * This shouldn't happen under normal circumstances,
4994 * since we have deleted the old session during PLOGI
4995 */
4996 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4997 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4998 sess->loop_id, sess, iocb->u.isp24.nport_handle);
4999
5000 sess->local = 0;
5001 sess->loop_id = loop_id;
5002 sess->d_id = port_id;
5003 sess->fw_login_state = DSC_LS_PRLI_PEND;
5004
5005 if (wd3_lo & BIT_7)
5006 sess->conf_compl_supported = 1;
5007
5008 if ((wd3_lo & BIT_4) == 0)
5009 sess->port_type = FCT_INITIATOR;
5010 else
5011 sess->port_type = FCT_TARGET;
5012
5013 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5014 }
5015 res = 1; /* send notify ack */
5016
5017 /* Make session global (not used in fabric mode) */
5018 if (ha->current_topology != ISP_CFG_F) {
5019 if (sess) {
5020 ql_dbg(ql_dbg_disc, vha, 0x20fa,
5021 "%s %d %8phC post nack\n",
5022 __func__, __LINE__, sess->port_name);
5023 qla24xx_post_nack_work(vha, sess, iocb,
5024 SRB_NACK_PRLI);
5025 res = 0;
5026 } else {
5027 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5028 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5029 qla2xxx_wake_dpc(vha);
5030 }
5031 } else {
5032 if (sess) {
5033 ql_dbg(ql_dbg_disc, vha, 0x20fb,
5034 "%s %d %8phC post nack\n",
5035 __func__, __LINE__, sess->port_name);
5036 qla24xx_post_nack_work(vha, sess, iocb,
5037 SRB_NACK_PRLI);
5038 res = 0;
5039 }
5040 }
5041 break;
5042
5043 case ELS_TPRLO:
5044 if (le16_to_cpu(iocb->u.isp24.flags) &
5045 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
5046 loop_id = 0xFFFF;
5047 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5048 res = 1;
5049 break;
5050 }
5051 /* fall through */
5052 case ELS_LOGO:
5053 case ELS_PRLO:
5054 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5055 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5056 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5057
5058 if (sess) {
5059 sess->login_gen++;
5060 sess->fw_login_state = DSC_LS_LOGO_PEND;
5061 sess->logo_ack_needed = 1;
5062 memcpy(sess->iocb, iocb, IOCB_SIZE);
5063 }
5064
5065 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5066
5067 ql_dbg(ql_dbg_disc, vha, 0x20fc,
5068 "%s: logo %llx res %d sess %p ",
5069 __func__, wwn, res, sess);
5070 if (res == 0) {
5071 /*
5072 * cmd went upper layer, look for qlt_xmit_tm_rsp()
5073 * for LOGO_ACK & sess delete
5074 */
5075 BUG_ON(!sess);
5076 res = 0;
5077 } else {
5078 /* cmd did not go to upper layer. */
5079 if (sess) {
5080 qlt_schedule_sess_for_deletion(sess);
5081 res = 0;
5082 }
5083 /* else logo will be ack */
5084 }
5085 break;
5086 case ELS_PDISC:
5087 case ELS_ADISC:
5088 {
5089 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5090
5091 if (tgt->link_reinit_iocb_pending) {
5092 qlt_send_notify_ack(ha->base_qpair,
5093 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5094 tgt->link_reinit_iocb_pending = 0;
5095 }
5096
5097 sess = qla2x00_find_fcport_by_wwpn(vha,
5098 iocb->u.isp24.port_name, 1);
5099 if (sess) {
5100 ql_dbg(ql_dbg_disc, vha, 0x20fd,
5101 "sess %p lid %d|%d DS %d LS %d\n",
5102 sess, sess->loop_id, loop_id,
5103 sess->disc_state, sess->fw_login_state);
5104 }
5105
5106 res = 1; /* send notify ack */
5107 break;
5108 }
5109
5110 case ELS_FLOGI: /* should never happen */
5111 default:
5112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5113 "qla_target(%d): Unsupported ELS command %x "
5114 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5115 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5116 break;
5117 }
5118
5119 ql_dbg(ql_dbg_disc, vha, 0xf026,
5120 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5121 vha->vp_idx, iocb->u.isp24.status_subcode, res);
5122
5123 return res;
5124 }
5125
5126 /*
5127 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5128 */
5129 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5130 struct imm_ntfy_from_isp *iocb)
5131 {
5132 struct qla_hw_data *ha = vha->hw;
5133 uint32_t add_flags = 0;
5134 int send_notify_ack = 1;
5135 uint16_t status;
5136
5137 lockdep_assert_held(&ha->hardware_lock);
5138
5139 status = le16_to_cpu(iocb->u.isp2x.status);
5140 switch (status) {
5141 case IMM_NTFY_LIP_RESET:
5142 {
5143 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5144 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5145 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5146 iocb->u.isp24.status_subcode);
5147
5148 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5149 send_notify_ack = 0;
5150 break;
5151 }
5152
5153 case IMM_NTFY_LIP_LINK_REINIT:
5154 {
5155 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5156
5157 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5158 "qla_target(%d): LINK REINIT (loop %#x, "
5159 "subcode %x)\n", vha->vp_idx,
5160 le16_to_cpu(iocb->u.isp24.nport_handle),
5161 iocb->u.isp24.status_subcode);
5162 if (tgt->link_reinit_iocb_pending) {
5163 qlt_send_notify_ack(ha->base_qpair,
5164 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5165 }
5166 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5167 tgt->link_reinit_iocb_pending = 1;
5168 /*
5169 * QLogic requires to wait after LINK REINIT for possible
5170 * PDISC or ADISC ELS commands
5171 */
5172 send_notify_ack = 0;
5173 break;
5174 }
5175
5176 case IMM_NTFY_PORT_LOGOUT:
5177 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5178 "qla_target(%d): Port logout (loop "
5179 "%#x, subcode %x)\n", vha->vp_idx,
5180 le16_to_cpu(iocb->u.isp24.nport_handle),
5181 iocb->u.isp24.status_subcode);
5182
5183 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5184 send_notify_ack = 0;
5185 /* The sessions will be cleared in the callback, if needed */
5186 break;
5187
5188 case IMM_NTFY_GLBL_TPRLO:
5189 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5190 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5191 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5192 send_notify_ack = 0;
5193 /* The sessions will be cleared in the callback, if needed */
5194 break;
5195
5196 case IMM_NTFY_PORT_CONFIG:
5197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5198 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5199 status);
5200 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5201 send_notify_ack = 0;
5202 /* The sessions will be cleared in the callback, if needed */
5203 break;
5204
5205 case IMM_NTFY_GLBL_LOGO:
5206 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5207 "qla_target(%d): Link failure detected\n",
5208 vha->vp_idx);
5209 /* I_T nexus loss */
5210 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5211 send_notify_ack = 0;
5212 break;
5213
5214 case IMM_NTFY_IOCB_OVERFLOW:
5215 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5216 "qla_target(%d): Cannot provide requested "
5217 "capability (IOCB overflowed the immediate notify "
5218 "resource count)\n", vha->vp_idx);
5219 break;
5220
5221 case IMM_NTFY_ABORT_TASK:
5222 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5223 "qla_target(%d): Abort Task (S %08x I %#x -> "
5224 "L %#x)\n", vha->vp_idx,
5225 le16_to_cpu(iocb->u.isp2x.seq_id),
5226 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5227 le16_to_cpu(iocb->u.isp2x.lun));
5228 if (qlt_abort_task(vha, iocb) == 0)
5229 send_notify_ack = 0;
5230 break;
5231
5232 case IMM_NTFY_RESOURCE:
5233 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5234 "qla_target(%d): Out of resources, host %ld\n",
5235 vha->vp_idx, vha->host_no);
5236 break;
5237
5238 case IMM_NTFY_MSG_RX:
5239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5240 "qla_target(%d): Immediate notify task %x\n",
5241 vha->vp_idx, iocb->u.isp2x.task_flags);
5242 break;
5243
5244 case IMM_NTFY_ELS:
5245 if (qlt_24xx_handle_els(vha, iocb) == 0)
5246 send_notify_ack = 0;
5247 break;
5248 default:
5249 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5250 "qla_target(%d): Received unknown immediate "
5251 "notify status %x\n", vha->vp_idx, status);
5252 break;
5253 }
5254
5255 if (send_notify_ack)
5256 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
5257 0, 0);
5258 }
5259
5260 /*
5261 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5262 * This function sends busy to ISP 2xxx or 24xx.
5263 */
5264 static int __qlt_send_busy(struct qla_qpair *qpair,
5265 struct atio_from_isp *atio, uint16_t status)
5266 {
5267 struct scsi_qla_host *vha = qpair->vha;
5268 struct ctio7_to_24xx *ctio24;
5269 struct qla_hw_data *ha = vha->hw;
5270 request_t *pkt;
5271 struct fc_port *sess = NULL;
5272 unsigned long flags;
5273 u16 temp;
5274 port_id_t id;
5275
5276 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
5277
5278 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5279 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5280 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5281 if (!sess) {
5282 qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5283 return 0;
5284 }
5285 /* Sending marker isn't necessary, since we called from ISR */
5286
5287 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5288 if (!pkt) {
5289 ql_dbg(ql_dbg_io, vha, 0x3063,
5290 "qla_target(%d): %s failed: unable to allocate "
5291 "request packet", vha->vp_idx, __func__);
5292 return -ENOMEM;
5293 }
5294
5295 qpair->tgt_counters.num_q_full_sent++;
5296 pkt->entry_count = 1;
5297 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5298
5299 ctio24 = (struct ctio7_to_24xx *)pkt;
5300 ctio24->entry_type = CTIO_TYPE7;
5301 ctio24->nport_handle = sess->loop_id;
5302 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5303 ctio24->vp_index = vha->vp_idx;
5304 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
5305 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5306 temp = (atio->u.isp24.attr << 9) |
5307 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5308 CTIO7_FLAGS_DONT_RET_CTIO;
5309 ctio24->u.status1.flags = cpu_to_le16(temp);
5310 /*
5311 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5312 * if the explicit conformation is used.
5313 */
5314 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
5315 ctio24->u.status1.scsi_status = cpu_to_le16(status);
5316
5317 ctio24->u.status1.residual = get_datalen_for_atio(atio);
5318
5319 if (ctio24->u.status1.residual != 0)
5320 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
5321
5322 /* Memory Barrier */
5323 wmb();
5324 if (qpair->reqq_start_iocbs)
5325 qpair->reqq_start_iocbs(qpair);
5326 else
5327 qla2x00_start_iocbs(vha, qpair->req);
5328 return 0;
5329 }
5330
5331 /*
5332 * This routine is used to allocate a command for either a QFull condition
5333 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5334 * out previously.
5335 */
5336 static void
5337 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5338 struct atio_from_isp *atio, uint16_t status, int qfull)
5339 {
5340 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5341 struct qla_hw_data *ha = vha->hw;
5342 struct fc_port *sess;
5343 struct se_session *se_sess;
5344 struct qla_tgt_cmd *cmd;
5345 int tag, cpu;
5346 unsigned long flags;
5347
5348 if (unlikely(tgt->tgt_stop)) {
5349 ql_dbg(ql_dbg_io, vha, 0x300a,
5350 "New command while device %p is shutting down\n", tgt);
5351 return;
5352 }
5353
5354 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5355 vha->hw->tgt.num_qfull_cmds_dropped++;
5356 if (vha->hw->tgt.num_qfull_cmds_dropped >
5357 vha->qla_stats.stat_max_qfull_cmds_dropped)
5358 vha->qla_stats.stat_max_qfull_cmds_dropped =
5359 vha->hw->tgt.num_qfull_cmds_dropped;
5360
5361 ql_dbg(ql_dbg_io, vha, 0x3068,
5362 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5363 vha->vp_idx, __func__,
5364 vha->hw->tgt.num_qfull_cmds_dropped);
5365
5366 qlt_chk_exch_leak_thresh_hold(vha);
5367 return;
5368 }
5369
5370 sess = ha->tgt.tgt_ops->find_sess_by_s_id
5371 (vha, atio->u.isp24.fcp_hdr.s_id);
5372 if (!sess)
5373 return;
5374
5375 se_sess = sess->se_sess;
5376
5377 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
5378 if (tag < 0) {
5379 ql_dbg(ql_dbg_io, vha, 0x3009,
5380 "qla_target(%d): %s: Allocation of cmd failed\n",
5381 vha->vp_idx, __func__);
5382
5383 vha->hw->tgt.num_qfull_cmds_dropped++;
5384 if (vha->hw->tgt.num_qfull_cmds_dropped >
5385 vha->qla_stats.stat_max_qfull_cmds_dropped)
5386 vha->qla_stats.stat_max_qfull_cmds_dropped =
5387 vha->hw->tgt.num_qfull_cmds_dropped;
5388
5389 qlt_chk_exch_leak_thresh_hold(vha);
5390 return;
5391 }
5392
5393 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
5394 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
5395
5396 qlt_incr_num_pend_cmds(vha);
5397 INIT_LIST_HEAD(&cmd->cmd_list);
5398 memcpy(&cmd->atio, atio, sizeof(*atio));
5399
5400 cmd->tgt = vha->vha_tgt.qla_tgt;
5401 cmd->vha = vha;
5402 cmd->reset_count = ha->base_qpair->chip_reset;
5403 cmd->q_full = 1;
5404 cmd->qpair = ha->base_qpair;
5405 cmd->se_cmd.map_cpu = cpu;
5406
5407 if (qfull) {
5408 cmd->q_full = 1;
5409 /* NOTE: borrowing the state field to carry the status */
5410 cmd->state = status;
5411 } else
5412 cmd->term_exchg = 1;
5413
5414 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5415 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5416
5417 vha->hw->tgt.num_qfull_cmds_alloc++;
5418 if (vha->hw->tgt.num_qfull_cmds_alloc >
5419 vha->qla_stats.stat_max_qfull_cmds_alloc)
5420 vha->qla_stats.stat_max_qfull_cmds_alloc =
5421 vha->hw->tgt.num_qfull_cmds_alloc;
5422 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5423 }
5424
5425 int
5426 qlt_free_qfull_cmds(struct qla_qpair *qpair)
5427 {
5428 struct scsi_qla_host *vha = qpair->vha;
5429 struct qla_hw_data *ha = vha->hw;
5430 unsigned long flags;
5431 struct qla_tgt_cmd *cmd, *tcmd;
5432 struct list_head free_list, q_full_list;
5433 int rc = 0;
5434
5435 if (list_empty(&ha->tgt.q_full_list))
5436 return 0;
5437
5438 INIT_LIST_HEAD(&free_list);
5439 INIT_LIST_HEAD(&q_full_list);
5440
5441 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5442 if (list_empty(&ha->tgt.q_full_list)) {
5443 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5444 return 0;
5445 }
5446
5447 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5448 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5449
5450 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5451 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5452 if (cmd->q_full)
5453 /* cmd->state is a borrowed field to hold status */
5454 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5455 else if (cmd->term_exchg)
5456 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5457
5458 if (rc == -ENOMEM)
5459 break;
5460
5461 if (cmd->q_full)
5462 ql_dbg(ql_dbg_io, vha, 0x3006,
5463 "%s: busy sent for ox_id[%04x]\n", __func__,
5464 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5465 else if (cmd->term_exchg)
5466 ql_dbg(ql_dbg_io, vha, 0x3007,
5467 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5468 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5469 else
5470 ql_dbg(ql_dbg_io, vha, 0x3008,
5471 "%s: Unexpected cmd in QFull list %p\n", __func__,
5472 cmd);
5473
5474 list_del(&cmd->cmd_list);
5475 list_add_tail(&cmd->cmd_list, &free_list);
5476
5477 /* piggy back on hardware_lock for protection */
5478 vha->hw->tgt.num_qfull_cmds_alloc--;
5479 }
5480 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5481
5482 cmd = NULL;
5483
5484 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5485 list_del(&cmd->cmd_list);
5486 /* This cmd was never sent to TCM. There is no need
5487 * to schedule free or call free_cmd
5488 */
5489 qlt_free_cmd(cmd);
5490 }
5491
5492 if (!list_empty(&q_full_list)) {
5493 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5494 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5495 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5496 }
5497
5498 return rc;
5499 }
5500
5501 static void
5502 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
5503 uint16_t status)
5504 {
5505 int rc = 0;
5506 struct scsi_qla_host *vha = qpair->vha;
5507
5508 rc = __qlt_send_busy(qpair, atio, status);
5509 if (rc == -ENOMEM)
5510 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5511 }
5512
5513 static int
5514 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5515 struct atio_from_isp *atio, uint8_t ha_locked)
5516 {
5517 struct qla_hw_data *ha = vha->hw;
5518 unsigned long flags;
5519
5520 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5521 return 0;
5522
5523 if (!ha_locked)
5524 spin_lock_irqsave(&ha->hardware_lock, flags);
5525 qlt_send_busy(qpair, atio, qla_sam_status);
5526 if (!ha_locked)
5527 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5528
5529 return 1;
5530 }
5531
5532 /* ha->hardware_lock supposed to be held on entry */
5533 /* called via callback from qla2xxx */
5534 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5535 struct atio_from_isp *atio, uint8_t ha_locked)
5536 {
5537 struct qla_hw_data *ha = vha->hw;
5538 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5539 int rc;
5540 unsigned long flags = 0;
5541
5542 if (unlikely(tgt == NULL)) {
5543 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5544 "ATIO pkt, but no tgt (ha %p)", ha);
5545 return;
5546 }
5547 /*
5548 * In tgt_stop mode we also should allow all requests to pass.
5549 * Otherwise, some commands can stuck.
5550 */
5551
5552 tgt->atio_irq_cmd_count++;
5553
5554 switch (atio->u.raw.entry_type) {
5555 case ATIO_TYPE7:
5556 if (unlikely(atio->u.isp24.exchange_addr ==
5557 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5558 ql_dbg(ql_dbg_io, vha, 0x3065,
5559 "qla_target(%d): ATIO_TYPE7 "
5560 "received with UNKNOWN exchange address, "
5561 "sending QUEUE_FULL\n", vha->vp_idx);
5562 if (!ha_locked)
5563 spin_lock_irqsave(&ha->hardware_lock, flags);
5564 qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5565 if (!ha_locked)
5566 spin_unlock_irqrestore(&ha->hardware_lock,
5567 flags);
5568 break;
5569 }
5570
5571 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5572 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5573 atio, ha_locked);
5574 if (rc != 0) {
5575 tgt->atio_irq_cmd_count--;
5576 return;
5577 }
5578 rc = qlt_handle_cmd_for_atio(vha, atio);
5579 } else {
5580 rc = qlt_handle_task_mgmt(vha, atio);
5581 }
5582 if (unlikely(rc != 0)) {
5583 if (!ha_locked)
5584 spin_lock_irqsave(&ha->hardware_lock, flags);
5585 switch (rc) {
5586 case -ENODEV:
5587 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5588 "qla_target: Unable to send command to target\n");
5589 break;
5590 case -EBADF:
5591 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5592 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5593 qlt_send_term_exchange(ha->base_qpair, NULL,
5594 atio, 1, 0);
5595 break;
5596 case -EBUSY:
5597 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5598 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5599 vha->vp_idx);
5600 qlt_send_busy(ha->base_qpair, atio,
5601 tc_sam_status);
5602 break;
5603 default:
5604 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5605 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5606 vha->vp_idx);
5607 qlt_send_busy(ha->base_qpair, atio,
5608 qla_sam_status);
5609 break;
5610 }
5611 if (!ha_locked)
5612 spin_unlock_irqrestore(&ha->hardware_lock,
5613 flags);
5614 }
5615 break;
5616
5617 case IMMED_NOTIFY_TYPE:
5618 {
5619 if (unlikely(atio->u.isp2x.entry_status != 0)) {
5620 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5621 "qla_target(%d): Received ATIO packet %x "
5622 "with error status %x\n", vha->vp_idx,
5623 atio->u.raw.entry_type,
5624 atio->u.isp2x.entry_status);
5625 break;
5626 }
5627 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5628
5629 if (!ha_locked)
5630 spin_lock_irqsave(&ha->hardware_lock, flags);
5631 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5632 if (!ha_locked)
5633 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5634 break;
5635 }
5636
5637 default:
5638 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5639 "qla_target(%d): Received unknown ATIO atio "
5640 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5641 break;
5642 }
5643
5644 tgt->atio_irq_cmd_count--;
5645 }
5646
5647 /*
5648 * qpair lock is assume to be held
5649 * rc = 0 : send terminate & abts respond
5650 * rc != 0: do not send term & abts respond
5651 */
5652 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5653 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
5654 {
5655 struct qla_hw_data *ha = vha->hw;
5656 int rc = 0;
5657
5658 /*
5659 * Detect unresolved exchange. If the same ABTS is unable
5660 * to terminate an existing command and the same ABTS loops
5661 * between FW & Driver, then force FW dump. Under 1 jiff,
5662 * we should see multiple loops.
5663 */
5664 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
5665 qpair->retry_term_jiff == jiffies) {
5666 /* found existing exchange */
5667 qpair->retry_term_cnt++;
5668 if (qpair->retry_term_cnt >= 5) {
5669 rc = EIO;
5670 qpair->retry_term_cnt = 0;
5671 ql_log(ql_log_warn, vha, 0xffff,
5672 "Unable to send ABTS Respond. Dumping firmware.\n");
5673 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
5674 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5675
5676 if (qpair == ha->base_qpair)
5677 ha->isp_ops->fw_dump(vha, 1);
5678 else
5679 ha->isp_ops->fw_dump(vha, 0);
5680
5681 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5682 qla2xxx_wake_dpc(vha);
5683 }
5684 } else if (qpair->retry_term_jiff != jiffies) {
5685 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
5686 qpair->retry_term_cnt = 0;
5687 qpair->retry_term_jiff = jiffies;
5688 }
5689
5690 return rc;
5691 }
5692
5693
5694 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5695 struct rsp_que *rsp, response_t *pkt)
5696 {
5697 struct abts_resp_from_24xx_fw *entry =
5698 (struct abts_resp_from_24xx_fw *)pkt;
5699 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
5700 struct qla_tgt_mgmt_cmd *mcmd;
5701 struct qla_hw_data *ha = vha->hw;
5702
5703 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5704 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
5705 ql_dbg(ql_dbg_async, vha, 0xe064,
5706 "qla_target(%d): ABTS Comp without mcmd\n",
5707 vha->vp_idx);
5708 return;
5709 }
5710
5711 if (mcmd)
5712 vha = mcmd->vha;
5713 vha->vha_tgt.qla_tgt->abts_resp_expected--;
5714
5715 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5716 "ABTS_RESP_24XX: compl_status %x\n",
5717 entry->compl_status);
5718
5719 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
5720 if ((entry->error_subcode1 == 0x1E) &&
5721 (entry->error_subcode2 == 0)) {
5722 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5723 ha->tgt.tgt_ops->free_mcmd(mcmd);
5724 return;
5725 }
5726 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5727 pkt, mcmd);
5728 } else {
5729 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5730 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5731 vha->vp_idx, entry->compl_status,
5732 entry->error_subcode1,
5733 entry->error_subcode2);
5734 ha->tgt.tgt_ops->free_mcmd(mcmd);
5735 }
5736 } else if (mcmd) {
5737 ha->tgt.tgt_ops->free_mcmd(mcmd);
5738 }
5739 }
5740
5741 /* ha->hardware_lock supposed to be held on entry */
5742 /* called via callback from qla2xxx */
5743 static void qlt_response_pkt(struct scsi_qla_host *vha,
5744 struct rsp_que *rsp, response_t *pkt)
5745 {
5746 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5747
5748 if (unlikely(tgt == NULL)) {
5749 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5750 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5751 vha->vp_idx, pkt->entry_type, vha->hw);
5752 return;
5753 }
5754
5755 /*
5756 * In tgt_stop mode we also should allow all requests to pass.
5757 * Otherwise, some commands can stuck.
5758 */
5759
5760 switch (pkt->entry_type) {
5761 case CTIO_CRC2:
5762 case CTIO_TYPE7:
5763 {
5764 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5765
5766 qlt_do_ctio_completion(vha, rsp, entry->handle,
5767 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5768 entry);
5769 break;
5770 }
5771
5772 case ACCEPT_TGT_IO_TYPE:
5773 {
5774 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5775 int rc;
5776
5777 if (atio->u.isp2x.status !=
5778 cpu_to_le16(ATIO_CDB_VALID)) {
5779 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5780 "qla_target(%d): ATIO with error "
5781 "status %x received\n", vha->vp_idx,
5782 le16_to_cpu(atio->u.isp2x.status));
5783 break;
5784 }
5785
5786 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5787 if (rc != 0)
5788 return;
5789
5790 rc = qlt_handle_cmd_for_atio(vha, atio);
5791 if (unlikely(rc != 0)) {
5792 switch (rc) {
5793 case -ENODEV:
5794 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5795 "qla_target: Unable to send command to target\n");
5796 break;
5797 case -EBADF:
5798 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5799 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5800 qlt_send_term_exchange(rsp->qpair, NULL,
5801 atio, 1, 0);
5802 break;
5803 case -EBUSY:
5804 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5805 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5806 vha->vp_idx);
5807 qlt_send_busy(rsp->qpair, atio,
5808 tc_sam_status);
5809 break;
5810 default:
5811 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5812 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5813 vha->vp_idx);
5814 qlt_send_busy(rsp->qpair, atio,
5815 qla_sam_status);
5816 break;
5817 }
5818 }
5819 }
5820 break;
5821
5822 case CONTINUE_TGT_IO_TYPE:
5823 {
5824 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5825
5826 qlt_do_ctio_completion(vha, rsp, entry->handle,
5827 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5828 entry);
5829 break;
5830 }
5831
5832 case CTIO_A64_TYPE:
5833 {
5834 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5835
5836 qlt_do_ctio_completion(vha, rsp, entry->handle,
5837 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5838 entry);
5839 break;
5840 }
5841
5842 case IMMED_NOTIFY_TYPE:
5843 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5844 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5845 break;
5846
5847 case NOTIFY_ACK_TYPE:
5848 if (tgt->notify_ack_expected > 0) {
5849 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5850
5851 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5852 "NOTIFY_ACK seq %08x status %x\n",
5853 le16_to_cpu(entry->u.isp2x.seq_id),
5854 le16_to_cpu(entry->u.isp2x.status));
5855 tgt->notify_ack_expected--;
5856 if (entry->u.isp2x.status !=
5857 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5858 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5859 "qla_target(%d): NOTIFY_ACK "
5860 "failed %x\n", vha->vp_idx,
5861 le16_to_cpu(entry->u.isp2x.status));
5862 }
5863 } else {
5864 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5865 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5866 vha->vp_idx);
5867 }
5868 break;
5869
5870 case ABTS_RECV_24XX:
5871 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5872 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5873 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5874 break;
5875
5876 case ABTS_RESP_24XX:
5877 if (tgt->abts_resp_expected > 0) {
5878 qlt_handle_abts_completion(vha, rsp, pkt);
5879 } else {
5880 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5881 "qla_target(%d): Unexpected ABTS_RESP_24XX "
5882 "received\n", vha->vp_idx);
5883 }
5884 break;
5885
5886 default:
5887 ql_dbg(ql_dbg_tgt, vha, 0xe065,
5888 "qla_target(%d): Received unknown response pkt "
5889 "type %x\n", vha->vp_idx, pkt->entry_type);
5890 break;
5891 }
5892
5893 }
5894
5895 /*
5896 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5897 */
5898 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5899 uint16_t *mailbox)
5900 {
5901 struct qla_hw_data *ha = vha->hw;
5902 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5903 int login_code;
5904
5905 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
5906 return;
5907
5908 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5909 IS_QLA2100(ha))
5910 return;
5911 /*
5912 * In tgt_stop mode we also should allow all requests to pass.
5913 * Otherwise, some commands can stuck.
5914 */
5915
5916
5917 switch (code) {
5918 case MBA_RESET: /* Reset */
5919 case MBA_SYSTEM_ERR: /* System Error */
5920 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
5921 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
5922 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5923 "qla_target(%d): System error async event %#x "
5924 "occurred", vha->vp_idx, code);
5925 break;
5926 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
5927 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5928 break;
5929
5930 case MBA_LOOP_UP:
5931 {
5932 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5933 "qla_target(%d): Async LOOP_UP occurred "
5934 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5935 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5936 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5937 if (tgt->link_reinit_iocb_pending) {
5938 qlt_send_notify_ack(ha->base_qpair,
5939 (void *)&tgt->link_reinit_iocb,
5940 0, 0, 0, 0, 0, 0);
5941 tgt->link_reinit_iocb_pending = 0;
5942 }
5943 break;
5944 }
5945
5946 case MBA_LIP_OCCURRED:
5947 case MBA_LOOP_DOWN:
5948 case MBA_LIP_RESET:
5949 case MBA_RSCN_UPDATE:
5950 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5951 "qla_target(%d): Async event %#x occurred "
5952 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5953 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5954 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5955 break;
5956
5957 case MBA_REJECTED_FCP_CMD:
5958 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
5959 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
5960 vha->vp_idx,
5961 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5962 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5963
5964 if (le16_to_cpu(mailbox[3]) == 1) {
5965 /* exchange starvation. */
5966 vha->hw->exch_starvation++;
5967 if (vha->hw->exch_starvation > 5) {
5968 ql_log(ql_log_warn, vha, 0xd03a,
5969 "Exchange starvation-. Resetting RISC\n");
5970
5971 vha->hw->exch_starvation = 0;
5972 if (IS_P3P_TYPE(vha->hw))
5973 set_bit(FCOE_CTX_RESET_NEEDED,
5974 &vha->dpc_flags);
5975 else
5976 set_bit(ISP_ABORT_NEEDED,
5977 &vha->dpc_flags);
5978 qla2xxx_wake_dpc(vha);
5979 }
5980 }
5981 break;
5982
5983 case MBA_PORT_UPDATE:
5984 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5985 "qla_target(%d): Port update async event %#x "
5986 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5987 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5988 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5989 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5990
5991 login_code = le16_to_cpu(mailbox[2]);
5992 if (login_code == 0x4) {
5993 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5994 "Async MB 2: Got PLOGI Complete\n");
5995 vha->hw->exch_starvation = 0;
5996 } else if (login_code == 0x7)
5997 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
5998 "Async MB 2: Port Logged Out\n");
5999 break;
6000 default:
6001 break;
6002 }
6003
6004 }
6005
6006 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6007 uint16_t loop_id)
6008 {
6009 fc_port_t *fcport, *tfcp, *del;
6010 int rc;
6011 unsigned long flags;
6012 u8 newfcport = 0;
6013
6014 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6015 if (!fcport) {
6016 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6017 "qla_target(%d): Allocation of tmp FC port failed",
6018 vha->vp_idx);
6019 return NULL;
6020 }
6021
6022 fcport->loop_id = loop_id;
6023
6024 rc = qla24xx_gpdb_wait(vha, fcport, 0);
6025 if (rc != QLA_SUCCESS) {
6026 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6027 "qla_target(%d): Failed to retrieve fcport "
6028 "information -- get_port_database() returned %x "
6029 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6030 kfree(fcport);
6031 return NULL;
6032 }
6033
6034 del = NULL;
6035 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6036 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6037
6038 if (tfcp) {
6039 tfcp->d_id = fcport->d_id;
6040 tfcp->port_type = fcport->port_type;
6041 tfcp->supported_classes = fcport->supported_classes;
6042 tfcp->flags |= fcport->flags;
6043 tfcp->scan_state = QLA_FCPORT_FOUND;
6044
6045 del = fcport;
6046 fcport = tfcp;
6047 } else {
6048 if (vha->hw->current_topology == ISP_CFG_F)
6049 fcport->flags |= FCF_FABRIC_DEVICE;
6050
6051 list_add_tail(&fcport->list, &vha->vp_fcports);
6052 if (!IS_SW_RESV_ADDR(fcport->d_id))
6053 vha->fcport_count++;
6054 fcport->login_gen++;
6055 fcport->disc_state = DSC_LOGIN_COMPLETE;
6056 fcport->login_succ = 1;
6057 newfcport = 1;
6058 }
6059
6060 fcport->deleted = 0;
6061 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6062
6063 switch (vha->host->active_mode) {
6064 case MODE_INITIATOR:
6065 case MODE_DUAL:
6066 if (newfcport) {
6067 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6068 qla24xx_sched_upd_fcport(fcport);
6069 } else {
6070 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6071 "%s %d %8phC post gpsc fcp_cnt %d\n",
6072 __func__, __LINE__, fcport->port_name, vha->fcport_count);
6073 qla24xx_post_gpsc_work(vha, fcport);
6074 }
6075 }
6076 break;
6077
6078 case MODE_TARGET:
6079 default:
6080 break;
6081 }
6082 if (del)
6083 qla2x00_free_fcport(del);
6084
6085 return fcport;
6086 }
6087
6088 /* Must be called under tgt_mutex */
6089 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6090 be_id_t s_id)
6091 {
6092 struct fc_port *sess = NULL;
6093 fc_port_t *fcport = NULL;
6094 int rc, global_resets;
6095 uint16_t loop_id = 0;
6096
6097 if (s_id.domain == 0xFF && s_id.area == 0xFC) {
6098 /*
6099 * This is Domain Controller, so it should be
6100 * OK to drop SCSI commands from it.
6101 */
6102 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6103 "Unable to find initiator with S_ID %x:%x:%x",
6104 s_id.domain, s_id.area, s_id.al_pa);
6105 return NULL;
6106 }
6107
6108 mutex_lock(&vha->vha_tgt.tgt_mutex);
6109
6110 retry:
6111 global_resets =
6112 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6113
6114 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6115 if (rc != 0) {
6116 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6117
6118 ql_log(ql_log_info, vha, 0xf071,
6119 "qla_target(%d): Unable to find "
6120 "initiator with S_ID %x:%x:%x",
6121 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6122
6123 if (rc == -ENOENT) {
6124 qlt_port_logo_t logo;
6125
6126 logo.id = be_to_port_id(s_id);
6127 logo.cmd_count = 1;
6128 qlt_send_first_logo(vha, &logo);
6129 }
6130
6131 return NULL;
6132 }
6133
6134 fcport = qlt_get_port_database(vha, loop_id);
6135 if (!fcport) {
6136 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6137 return NULL;
6138 }
6139
6140 if (global_resets !=
6141 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6142 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6143 "qla_target(%d): global reset during session discovery "
6144 "(counter was %d, new %d), retrying", vha->vp_idx,
6145 global_resets,
6146 atomic_read(&vha->vha_tgt.
6147 qla_tgt->tgt_global_resets_count));
6148 goto retry;
6149 }
6150
6151 sess = qlt_create_sess(vha, fcport, true);
6152
6153 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6154
6155 return sess;
6156 }
6157
6158 static void qlt_abort_work(struct qla_tgt *tgt,
6159 struct qla_tgt_sess_work_param *prm)
6160 {
6161 struct scsi_qla_host *vha = tgt->vha;
6162 struct qla_hw_data *ha = vha->hw;
6163 struct fc_port *sess = NULL;
6164 unsigned long flags = 0, flags2 = 0;
6165 be_id_t s_id;
6166 int rc;
6167
6168 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6169
6170 if (tgt->tgt_stop)
6171 goto out_term2;
6172
6173 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
6174
6175 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6176 if (!sess) {
6177 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6178
6179 sess = qlt_make_local_sess(vha, s_id);
6180 /* sess has got an extra creation ref */
6181
6182 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6183 if (!sess)
6184 goto out_term2;
6185 } else {
6186 if (sess->deleted) {
6187 sess = NULL;
6188 goto out_term2;
6189 }
6190
6191 if (!kref_get_unless_zero(&sess->sess_kref)) {
6192 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6193 "%s: kref_get fail %8phC \n",
6194 __func__, sess->port_name);
6195 sess = NULL;
6196 goto out_term2;
6197 }
6198 }
6199
6200 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6201 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6202
6203 ha->tgt.tgt_ops->put_sess(sess);
6204
6205 if (rc != 0)
6206 goto out_term;
6207 return;
6208
6209 out_term2:
6210 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6211
6212 out_term:
6213 spin_lock_irqsave(&ha->hardware_lock, flags);
6214 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
6215 FCP_TMF_REJECTED, false);
6216 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6217 }
6218
6219 static void qlt_tmr_work(struct qla_tgt *tgt,
6220 struct qla_tgt_sess_work_param *prm)
6221 {
6222 struct atio_from_isp *a = &prm->tm_iocb2;
6223 struct scsi_qla_host *vha = tgt->vha;
6224 struct qla_hw_data *ha = vha->hw;
6225 struct fc_port *sess;
6226 unsigned long flags;
6227 be_id_t s_id;
6228 int rc;
6229 u64 unpacked_lun;
6230 int fn;
6231 void *iocb;
6232
6233 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6234
6235 if (tgt->tgt_stop)
6236 goto out_term2;
6237
6238 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
6239 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6240 if (!sess) {
6241 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6242
6243 sess = qlt_make_local_sess(vha, s_id);
6244 /* sess has got an extra creation ref */
6245
6246 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6247 if (!sess)
6248 goto out_term2;
6249 } else {
6250 if (sess->deleted) {
6251 goto out_term2;
6252 }
6253
6254 if (!kref_get_unless_zero(&sess->sess_kref)) {
6255 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6256 "%s: kref_get fail %8phC\n",
6257 __func__, sess->port_name);
6258 goto out_term2;
6259 }
6260 }
6261
6262 iocb = a;
6263 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6264 unpacked_lun =
6265 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6266
6267 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6268 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6269
6270 ha->tgt.tgt_ops->put_sess(sess);
6271
6272 if (rc != 0)
6273 goto out_term;
6274 return;
6275
6276 out_term2:
6277 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6278 out_term:
6279 qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6280 }
6281
6282 static void qlt_sess_work_fn(struct work_struct *work)
6283 {
6284 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6285 struct scsi_qla_host *vha = tgt->vha;
6286 unsigned long flags;
6287
6288 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6289
6290 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6291 while (!list_empty(&tgt->sess_works_list)) {
6292 struct qla_tgt_sess_work_param *prm = list_entry(
6293 tgt->sess_works_list.next, typeof(*prm),
6294 sess_works_list_entry);
6295
6296 /*
6297 * This work can be scheduled on several CPUs at time, so we
6298 * must delete the entry to eliminate double processing
6299 */
6300 list_del(&prm->sess_works_list_entry);
6301
6302 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6303
6304 switch (prm->type) {
6305 case QLA_TGT_SESS_WORK_ABORT:
6306 qlt_abort_work(tgt, prm);
6307 break;
6308 case QLA_TGT_SESS_WORK_TM:
6309 qlt_tmr_work(tgt, prm);
6310 break;
6311 default:
6312 BUG_ON(1);
6313 break;
6314 }
6315
6316 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6317
6318 kfree(prm);
6319 }
6320 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6321 }
6322
6323 /* Must be called under tgt_host_action_mutex */
6324 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6325 {
6326 struct qla_tgt *tgt;
6327 int rc, i;
6328 struct qla_qpair_hint *h;
6329
6330 if (!QLA_TGT_MODE_ENABLED())
6331 return 0;
6332
6333 if (!IS_TGT_MODE_CAPABLE(ha)) {
6334 ql_log(ql_log_warn, base_vha, 0xe070,
6335 "This adapter does not support target mode.\n");
6336 return 0;
6337 }
6338
6339 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6340 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6341
6342 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6343
6344 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6345 if (!tgt) {
6346 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6347 "Unable to allocate struct qla_tgt\n");
6348 return -ENOMEM;
6349 }
6350
6351 tgt->qphints = kcalloc(ha->max_qpairs + 1,
6352 sizeof(struct qla_qpair_hint),
6353 GFP_KERNEL);
6354 if (!tgt->qphints) {
6355 kfree(tgt);
6356 ql_log(ql_log_warn, base_vha, 0x0197,
6357 "Unable to allocate qpair hints.\n");
6358 return -ENOMEM;
6359 }
6360
6361 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6362 base_vha->host->hostt->supported_mode |= MODE_TARGET;
6363
6364 rc = btree_init64(&tgt->lun_qpair_map);
6365 if (rc) {
6366 kfree(tgt->qphints);
6367 kfree(tgt);
6368 ql_log(ql_log_info, base_vha, 0x0198,
6369 "Unable to initialize lun_qpair_map btree\n");
6370 return -EIO;
6371 }
6372 h = &tgt->qphints[0];
6373 h->qpair = ha->base_qpair;
6374 INIT_LIST_HEAD(&h->hint_elem);
6375 h->cpuid = ha->base_qpair->cpuid;
6376 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
6377
6378 for (i = 0; i < ha->max_qpairs; i++) {
6379 unsigned long flags;
6380
6381 struct qla_qpair *qpair = ha->queue_pair_map[i];
6382
6383 h = &tgt->qphints[i + 1];
6384 INIT_LIST_HEAD(&h->hint_elem);
6385 if (qpair) {
6386 h->qpair = qpair;
6387 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
6388 list_add_tail(&h->hint_elem, &qpair->hints_list);
6389 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6390 h->cpuid = qpair->cpuid;
6391 }
6392 }
6393
6394 tgt->ha = ha;
6395 tgt->vha = base_vha;
6396 init_waitqueue_head(&tgt->waitQ);
6397 INIT_LIST_HEAD(&tgt->del_sess_list);
6398 spin_lock_init(&tgt->sess_work_lock);
6399 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6400 INIT_LIST_HEAD(&tgt->sess_works_list);
6401 atomic_set(&tgt->tgt_global_resets_count, 0);
6402
6403 base_vha->vha_tgt.qla_tgt = tgt;
6404
6405 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6406 "qla_target(%d): using 64 Bit PCI addressing",
6407 base_vha->vp_idx);
6408 /* 3 is reserved */
6409 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6410
6411 mutex_lock(&qla_tgt_mutex);
6412 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6413 mutex_unlock(&qla_tgt_mutex);
6414
6415 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6416 ha->tgt.tgt_ops->add_target(base_vha);
6417
6418 return 0;
6419 }
6420
6421 /* Must be called under tgt_host_action_mutex */
6422 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6423 {
6424 if (!vha->vha_tgt.qla_tgt)
6425 return 0;
6426
6427 if (vha->fc_vport) {
6428 qlt_release(vha->vha_tgt.qla_tgt);
6429 return 0;
6430 }
6431
6432 /* free left over qfull cmds */
6433 qlt_init_term_exchange(vha);
6434
6435 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6436 vha->host_no, ha);
6437 qlt_release(vha->vha_tgt.qla_tgt);
6438
6439 return 0;
6440 }
6441
6442 void qlt_remove_target_resources(struct qla_hw_data *ha)
6443 {
6444 struct scsi_qla_host *node;
6445 u32 key = 0;
6446
6447 btree_for_each_safe32(&ha->tgt.host_map, key, node)
6448 btree_remove32(&ha->tgt.host_map, key);
6449
6450 btree_destroy32(&ha->tgt.host_map);
6451 }
6452
6453 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6454 unsigned char *b)
6455 {
6456 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6457 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6458 put_unaligned_be64(wwpn, b);
6459 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
6460 }
6461
6462 /**
6463 * qla_tgt_lport_register - register lport with external module
6464 *
6465 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6466 * @phys_wwpn: physical port WWPN
6467 * @npiv_wwpn: NPIV WWPN
6468 * @npiv_wwnn: NPIV WWNN
6469 * @callback: lport initialization callback for tcm_qla2xxx code
6470 */
6471 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6472 u64 npiv_wwpn, u64 npiv_wwnn,
6473 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6474 {
6475 struct qla_tgt *tgt;
6476 struct scsi_qla_host *vha;
6477 struct qla_hw_data *ha;
6478 struct Scsi_Host *host;
6479 unsigned long flags;
6480 int rc;
6481 u8 b[WWN_SIZE];
6482
6483 mutex_lock(&qla_tgt_mutex);
6484 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6485 vha = tgt->vha;
6486 ha = vha->hw;
6487
6488 host = vha->host;
6489 if (!host)
6490 continue;
6491
6492 if (!(host->hostt->supported_mode & MODE_TARGET))
6493 continue;
6494
6495 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6496 continue;
6497
6498 spin_lock_irqsave(&ha->hardware_lock, flags);
6499 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6500 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6501 host->host_no);
6502 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6503 continue;
6504 }
6505 if (tgt->tgt_stop) {
6506 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6507 host->host_no);
6508 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6509 continue;
6510 }
6511 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6512
6513 if (!scsi_host_get(host)) {
6514 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6515 "Unable to scsi_host_get() for"
6516 " qla2xxx scsi_host\n");
6517 continue;
6518 }
6519 qlt_lport_dump(vha, phys_wwpn, b);
6520
6521 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6522 scsi_host_put(host);
6523 continue;
6524 }
6525 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6526 if (rc != 0)
6527 scsi_host_put(host);
6528
6529 mutex_unlock(&qla_tgt_mutex);
6530 return rc;
6531 }
6532 mutex_unlock(&qla_tgt_mutex);
6533
6534 return -ENODEV;
6535 }
6536 EXPORT_SYMBOL(qlt_lport_register);
6537
6538 /**
6539 * qla_tgt_lport_deregister - Degister lport
6540 *
6541 * @vha: Registered scsi_qla_host pointer
6542 */
6543 void qlt_lport_deregister(struct scsi_qla_host *vha)
6544 {
6545 struct qla_hw_data *ha = vha->hw;
6546 struct Scsi_Host *sh = vha->host;
6547 /*
6548 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6549 */
6550 vha->vha_tgt.target_lport_ptr = NULL;
6551 ha->tgt.tgt_ops = NULL;
6552 /*
6553 * Release the Scsi_Host reference for the underlying qla2xxx host
6554 */
6555 scsi_host_put(sh);
6556 }
6557 EXPORT_SYMBOL(qlt_lport_deregister);
6558
6559 /* Must be called under HW lock */
6560 void qlt_set_mode(struct scsi_qla_host *vha)
6561 {
6562 switch (vha->qlini_mode) {
6563 case QLA2XXX_INI_MODE_DISABLED:
6564 case QLA2XXX_INI_MODE_EXCLUSIVE:
6565 vha->host->active_mode = MODE_TARGET;
6566 break;
6567 case QLA2XXX_INI_MODE_ENABLED:
6568 vha->host->active_mode = MODE_INITIATOR;
6569 break;
6570 case QLA2XXX_INI_MODE_DUAL:
6571 vha->host->active_mode = MODE_DUAL;
6572 break;
6573 default:
6574 break;
6575 }
6576 }
6577
6578 /* Must be called under HW lock */
6579 static void qlt_clear_mode(struct scsi_qla_host *vha)
6580 {
6581 switch (vha->qlini_mode) {
6582 case QLA2XXX_INI_MODE_DISABLED:
6583 vha->host->active_mode = MODE_UNKNOWN;
6584 break;
6585 case QLA2XXX_INI_MODE_EXCLUSIVE:
6586 vha->host->active_mode = MODE_INITIATOR;
6587 break;
6588 case QLA2XXX_INI_MODE_ENABLED:
6589 case QLA2XXX_INI_MODE_DUAL:
6590 vha->host->active_mode = MODE_INITIATOR;
6591 break;
6592 default:
6593 break;
6594 }
6595 }
6596
6597 /*
6598 * qla_tgt_enable_vha - NO LOCK HELD
6599 *
6600 * host_reset, bring up w/ Target Mode Enabled
6601 */
6602 void
6603 qlt_enable_vha(struct scsi_qla_host *vha)
6604 {
6605 struct qla_hw_data *ha = vha->hw;
6606 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6607 unsigned long flags;
6608 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6609
6610 if (!tgt) {
6611 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6612 "Unable to locate qla_tgt pointer from"
6613 " struct qla_hw_data\n");
6614 dump_stack();
6615 return;
6616 }
6617 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6618 return;
6619
6620 if (ha->tgt.num_act_qpairs > ha->max_qpairs)
6621 ha->tgt.num_act_qpairs = ha->max_qpairs;
6622 spin_lock_irqsave(&ha->hardware_lock, flags);
6623 tgt->tgt_stopped = 0;
6624 qlt_set_mode(vha);
6625 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6626
6627 mutex_lock(&ha->optrom_mutex);
6628 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6629 "%s.\n", __func__);
6630 if (vha->vp_idx) {
6631 qla24xx_disable_vp(vha);
6632 qla24xx_enable_vp(vha);
6633 } else {
6634 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6635 qla2xxx_wake_dpc(base_vha);
6636 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
6637 QLA_SUCCESS);
6638 }
6639 mutex_unlock(&ha->optrom_mutex);
6640 }
6641 EXPORT_SYMBOL(qlt_enable_vha);
6642
6643 /*
6644 * qla_tgt_disable_vha - NO LOCK HELD
6645 *
6646 * Disable Target Mode and reset the adapter
6647 */
6648 static void qlt_disable_vha(struct scsi_qla_host *vha)
6649 {
6650 struct qla_hw_data *ha = vha->hw;
6651 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6652 unsigned long flags;
6653
6654 if (!tgt) {
6655 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6656 "Unable to locate qla_tgt pointer from"
6657 " struct qla_hw_data\n");
6658 dump_stack();
6659 return;
6660 }
6661
6662 spin_lock_irqsave(&ha->hardware_lock, flags);
6663 qlt_clear_mode(vha);
6664 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6665
6666 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6667 qla2xxx_wake_dpc(vha);
6668 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6669 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6670 "qla2x00_wait_for_hba_online() failed\n");
6671 }
6672
6673 /*
6674 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6675 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6676 * members.
6677 */
6678 void
6679 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6680 {
6681 vha->vha_tgt.qla_tgt = NULL;
6682
6683 mutex_init(&vha->vha_tgt.tgt_mutex);
6684 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6685
6686 qlt_clear_mode(vha);
6687
6688 /*
6689 * NOTE: Currently the value is kept the same for <24xx and
6690 * >=24xx ISPs. If it is necessary to change it,
6691 * the check should be added for specific ISPs,
6692 * assigning the value appropriately.
6693 */
6694 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6695
6696 qlt_add_target(ha, vha);
6697 }
6698
6699 u8
6700 qlt_rff_id(struct scsi_qla_host *vha)
6701 {
6702 u8 fc4_feature = 0;
6703 /*
6704 * FC-4 Feature bit 0 indicates target functionality to the name server.
6705 */
6706 if (qla_tgt_mode_enabled(vha)) {
6707 fc4_feature = BIT_0;
6708 } else if (qla_ini_mode_enabled(vha)) {
6709 fc4_feature = BIT_1;
6710 } else if (qla_dual_mode_enabled(vha))
6711 fc4_feature = BIT_0 | BIT_1;
6712
6713 return fc4_feature;
6714 }
6715
6716 /*
6717 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6718 * @ha: HA context
6719 *
6720 * Beginning of ATIO ring has initialization control block already built
6721 * by nvram config routine.
6722 *
6723 * Returns 0 on success.
6724 */
6725 void
6726 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6727 {
6728 struct qla_hw_data *ha = vha->hw;
6729 uint16_t cnt;
6730 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6731
6732 if (qla_ini_mode_enabled(vha))
6733 return;
6734
6735 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6736 pkt->u.raw.signature = ATIO_PROCESSED;
6737 pkt++;
6738 }
6739
6740 }
6741
6742 /*
6743 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6744 * @ha: SCSI driver HA context
6745 */
6746 void
6747 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6748 {
6749 struct qla_hw_data *ha = vha->hw;
6750 struct atio_from_isp *pkt;
6751 int cnt, i;
6752
6753 if (!ha->flags.fw_started)
6754 return;
6755
6756 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6757 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6758 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6759 cnt = pkt->u.raw.entry_count;
6760
6761 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6762 /*
6763 * This packet is corrupted. The header + payload
6764 * can not be trusted. There is no point in passing
6765 * it further up.
6766 */
6767 ql_log(ql_log_warn, vha, 0xd03c,
6768 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6769 &pkt->u.isp24.fcp_hdr.s_id,
6770 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6771 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
6772
6773 adjust_corrupted_atio(pkt);
6774 qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
6775 ha_locked, 0);
6776 } else {
6777 qlt_24xx_atio_pkt_all_vps(vha,
6778 (struct atio_from_isp *)pkt, ha_locked);
6779 }
6780
6781 for (i = 0; i < cnt; i++) {
6782 ha->tgt.atio_ring_index++;
6783 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6784 ha->tgt.atio_ring_index = 0;
6785 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6786 } else
6787 ha->tgt.atio_ring_ptr++;
6788
6789 pkt->u.raw.signature = ATIO_PROCESSED;
6790 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6791 }
6792 wmb();
6793 }
6794
6795 /* Adjust ring index */
6796 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6797 }
6798
6799 void
6800 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6801 {
6802 struct qla_hw_data *ha = vha->hw;
6803 struct qla_msix_entry *msix = &ha->msix_entries[2];
6804 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6805
6806 if (!QLA_TGT_MODE_ENABLED())
6807 return;
6808
6809 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
6810 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6811 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6812
6813 if (ha->flags.msix_enabled) {
6814 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6815 if (IS_QLA2071(ha)) {
6816 /* 4 ports Baker: Enable Interrupt Handshake */
6817 icb->msix_atio = 0;
6818 icb->firmware_options_2 |= BIT_26;
6819 } else {
6820 icb->msix_atio = cpu_to_le16(msix->entry);
6821 icb->firmware_options_2 &= ~BIT_26;
6822 }
6823 ql_dbg(ql_dbg_init, vha, 0xf072,
6824 "Registering ICB vector 0x%x for atio que.\n",
6825 msix->entry);
6826 }
6827 } else {
6828 /* INTx|MSI */
6829 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6830 icb->msix_atio = 0;
6831 icb->firmware_options_2 |= BIT_26;
6832 ql_dbg(ql_dbg_init, vha, 0xf072,
6833 "%s: Use INTx for ATIOQ.\n", __func__);
6834 }
6835 }
6836 }
6837
6838 void
6839 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6840 {
6841 struct qla_hw_data *ha = vha->hw;
6842 u32 tmp;
6843
6844 if (!QLA_TGT_MODE_ENABLED())
6845 return;
6846
6847 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6848 if (!ha->tgt.saved_set) {
6849 /* We save only once */
6850 ha->tgt.saved_exchange_count = nv->exchange_count;
6851 ha->tgt.saved_firmware_options_1 =
6852 nv->firmware_options_1;
6853 ha->tgt.saved_firmware_options_2 =
6854 nv->firmware_options_2;
6855 ha->tgt.saved_firmware_options_3 =
6856 nv->firmware_options_3;
6857 ha->tgt.saved_set = 1;
6858 }
6859
6860 if (qla_tgt_mode_enabled(vha))
6861 nv->exchange_count = cpu_to_le16(0xFFFF);
6862 else /* dual */
6863 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6864
6865 /* Enable target mode */
6866 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6867
6868 /* Disable ini mode, if requested */
6869 if (qla_tgt_mode_enabled(vha))
6870 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6871
6872 /* Disable Full Login after LIP */
6873 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6874 /* Enable initial LIP */
6875 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6876 if (ql2xtgt_tape_enable)
6877 /* Enable FC Tape support */
6878 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6879 else
6880 /* Disable FC Tape support */
6881 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6882
6883 /* Disable Full Login after LIP */
6884 nv->host_p &= cpu_to_le32(~BIT_10);
6885
6886 /*
6887 * clear BIT 15 explicitly as we have seen at least
6888 * a couple of instances where this was set and this
6889 * was causing the firmware to not be initialized.
6890 */
6891 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6892 /* Enable target PRLI control */
6893 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6894
6895 if (IS_QLA25XX(ha)) {
6896 /* Change Loop-prefer to Pt-Pt */
6897 tmp = ~(BIT_4|BIT_5|BIT_6);
6898 nv->firmware_options_2 &= cpu_to_le32(tmp);
6899 tmp = P2P << 4;
6900 nv->firmware_options_2 |= cpu_to_le32(tmp);
6901 }
6902 } else {
6903 if (ha->tgt.saved_set) {
6904 nv->exchange_count = ha->tgt.saved_exchange_count;
6905 nv->firmware_options_1 =
6906 ha->tgt.saved_firmware_options_1;
6907 nv->firmware_options_2 =
6908 ha->tgt.saved_firmware_options_2;
6909 nv->firmware_options_3 =
6910 ha->tgt.saved_firmware_options_3;
6911 }
6912 return;
6913 }
6914
6915 if (ha->base_qpair->enable_class_2) {
6916 if (vha->flags.init_done)
6917 fc_host_supported_classes(vha->host) =
6918 FC_COS_CLASS2 | FC_COS_CLASS3;
6919
6920 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6921 } else {
6922 if (vha->flags.init_done)
6923 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6924
6925 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6926 }
6927 }
6928
6929 void
6930 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6931 struct init_cb_24xx *icb)
6932 {
6933 struct qla_hw_data *ha = vha->hw;
6934
6935 if (!QLA_TGT_MODE_ENABLED())
6936 return;
6937
6938 if (ha->tgt.node_name_set) {
6939 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6940 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6941 }
6942 }
6943
6944 void
6945 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6946 {
6947 struct qla_hw_data *ha = vha->hw;
6948 u32 tmp;
6949
6950 if (!QLA_TGT_MODE_ENABLED())
6951 return;
6952
6953 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6954 if (!ha->tgt.saved_set) {
6955 /* We save only once */
6956 ha->tgt.saved_exchange_count = nv->exchange_count;
6957 ha->tgt.saved_firmware_options_1 =
6958 nv->firmware_options_1;
6959 ha->tgt.saved_firmware_options_2 =
6960 nv->firmware_options_2;
6961 ha->tgt.saved_firmware_options_3 =
6962 nv->firmware_options_3;
6963 ha->tgt.saved_set = 1;
6964 }
6965
6966 if (qla_tgt_mode_enabled(vha))
6967 nv->exchange_count = cpu_to_le16(0xFFFF);
6968 else /* dual */
6969 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6970
6971 /* Enable target mode */
6972 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6973
6974 /* Disable ini mode, if requested */
6975 if (qla_tgt_mode_enabled(vha))
6976 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6977 /* Disable Full Login after LIP */
6978 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6979 /* Enable initial LIP */
6980 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6981 /*
6982 * clear BIT 15 explicitly as we have seen at
6983 * least a couple of instances where this was set
6984 * and this was causing the firmware to not be
6985 * initialized.
6986 */
6987 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6988 if (ql2xtgt_tape_enable)
6989 /* Enable FC tape support */
6990 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6991 else
6992 /* Disable FC tape support */
6993 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6994
6995 /* Disable Full Login after LIP */
6996 nv->host_p &= cpu_to_le32(~BIT_10);
6997 /* Enable target PRLI control */
6998 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6999
7000 /* Change Loop-prefer to Pt-Pt */
7001 tmp = ~(BIT_4|BIT_5|BIT_6);
7002 nv->firmware_options_2 &= cpu_to_le32(tmp);
7003 tmp = P2P << 4;
7004 nv->firmware_options_2 |= cpu_to_le32(tmp);
7005 } else {
7006 if (ha->tgt.saved_set) {
7007 nv->exchange_count = ha->tgt.saved_exchange_count;
7008 nv->firmware_options_1 =
7009 ha->tgt.saved_firmware_options_1;
7010 nv->firmware_options_2 =
7011 ha->tgt.saved_firmware_options_2;
7012 nv->firmware_options_3 =
7013 ha->tgt.saved_firmware_options_3;
7014 }
7015 return;
7016 }
7017
7018 if (ha->base_qpair->enable_class_2) {
7019 if (vha->flags.init_done)
7020 fc_host_supported_classes(vha->host) =
7021 FC_COS_CLASS2 | FC_COS_CLASS3;
7022
7023 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7024 } else {
7025 if (vha->flags.init_done)
7026 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7027
7028 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7029 }
7030 }
7031
7032 void
7033 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7034 struct init_cb_81xx *icb)
7035 {
7036 struct qla_hw_data *ha = vha->hw;
7037
7038 if (!QLA_TGT_MODE_ENABLED())
7039 return;
7040
7041 if (ha->tgt.node_name_set) {
7042 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7043 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7044 }
7045 }
7046
7047 void
7048 qlt_83xx_iospace_config(struct qla_hw_data *ha)
7049 {
7050 if (!QLA_TGT_MODE_ENABLED())
7051 return;
7052
7053 ha->msix_count += 1; /* For ATIO Q */
7054 }
7055
7056
7057 void
7058 qlt_modify_vp_config(struct scsi_qla_host *vha,
7059 struct vp_config_entry_24xx *vpmod)
7060 {
7061 /* enable target mode. Bit5 = 1 => disable */
7062 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7063 vpmod->options_idx1 &= ~BIT_5;
7064
7065 /* Disable ini mode, if requested. bit4 = 1 => disable */
7066 if (qla_tgt_mode_enabled(vha))
7067 vpmod->options_idx1 &= ~BIT_4;
7068 }
7069
7070 void
7071 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
7072 {
7073 int rc;
7074
7075 if (!QLA_TGT_MODE_ENABLED())
7076 return;
7077
7078 if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
7079 IS_QLA28XX(ha)) {
7080 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
7081 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
7082 } else {
7083 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
7084 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
7085 }
7086
7087 mutex_init(&base_vha->vha_tgt.tgt_mutex);
7088 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
7089
7090 INIT_LIST_HEAD(&base_vha->unknown_atio_list);
7091 INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
7092 qlt_unknown_atio_work_fn);
7093
7094 qlt_clear_mode(base_vha);
7095
7096 rc = btree_init32(&ha->tgt.host_map);
7097 if (rc)
7098 ql_log(ql_log_info, base_vha, 0xd03d,
7099 "Unable to initialize ha->host_map btree\n");
7100
7101 qlt_update_vp_map(base_vha, SET_VP_IDX);
7102 }
7103
7104 irqreturn_t
7105 qla83xx_msix_atio_q(int irq, void *dev_id)
7106 {
7107 struct rsp_que *rsp;
7108 scsi_qla_host_t *vha;
7109 struct qla_hw_data *ha;
7110 unsigned long flags;
7111
7112 rsp = (struct rsp_que *) dev_id;
7113 ha = rsp->hw;
7114 vha = pci_get_drvdata(ha->pdev);
7115
7116 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7117
7118 qlt_24xx_process_atio_queue(vha, 0);
7119
7120 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7121
7122 return IRQ_HANDLED;
7123 }
7124
7125 static void
7126 qlt_handle_abts_recv_work(struct work_struct *work)
7127 {
7128 struct qla_tgt_sess_op *op = container_of(work,
7129 struct qla_tgt_sess_op, work);
7130 scsi_qla_host_t *vha = op->vha;
7131 struct qla_hw_data *ha = vha->hw;
7132 unsigned long flags;
7133
7134 if (qla2x00_reset_active(vha) ||
7135 (op->chip_reset != ha->base_qpair->chip_reset))
7136 return;
7137
7138 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7139 qlt_24xx_process_atio_queue(vha, 0);
7140 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7141
7142 spin_lock_irqsave(&ha->hardware_lock, flags);
7143 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7144 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7145
7146 kfree(op);
7147 }
7148
7149 void
7150 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7151 response_t *pkt)
7152 {
7153 struct qla_tgt_sess_op *op;
7154
7155 op = kzalloc(sizeof(*op), GFP_ATOMIC);
7156
7157 if (!op) {
7158 /* do not reach for ATIO queue here. This is best effort err
7159 * recovery at this point.
7160 */
7161 qlt_response_pkt_all_vps(vha, rsp, pkt);
7162 return;
7163 }
7164
7165 memcpy(&op->atio, pkt, sizeof(*pkt));
7166 op->vha = vha;
7167 op->chip_reset = vha->hw->base_qpair->chip_reset;
7168 op->rsp = rsp;
7169 INIT_WORK(&op->work, qlt_handle_abts_recv_work);
7170 queue_work(qla_tgt_wq, &op->work);
7171 return;
7172 }
7173
7174 int
7175 qlt_mem_alloc(struct qla_hw_data *ha)
7176 {
7177 if (!QLA_TGT_MODE_ENABLED())
7178 return 0;
7179
7180 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
7181 sizeof(struct qla_tgt_vp_map),
7182 GFP_KERNEL);
7183 if (!ha->tgt.tgt_vp_map)
7184 return -ENOMEM;
7185
7186 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
7187 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
7188 &ha->tgt.atio_dma, GFP_KERNEL);
7189 if (!ha->tgt.atio_ring) {
7190 kfree(ha->tgt.tgt_vp_map);
7191 return -ENOMEM;
7192 }
7193 return 0;
7194 }
7195
7196 void
7197 qlt_mem_free(struct qla_hw_data *ha)
7198 {
7199 if (!QLA_TGT_MODE_ENABLED())
7200 return;
7201
7202 if (ha->tgt.atio_ring) {
7203 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
7204 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
7205 ha->tgt.atio_dma);
7206 }
7207 ha->tgt.atio_ring = NULL;
7208 ha->tgt.atio_dma = 0;
7209 kfree(ha->tgt.tgt_vp_map);
7210 ha->tgt.tgt_vp_map = NULL;
7211 }
7212
7213 /* vport_slock to be held by the caller */
7214 void
7215 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
7216 {
7217 void *slot;
7218 u32 key;
7219 int rc;
7220
7221 if (!QLA_TGT_MODE_ENABLED())
7222 return;
7223
7224 key = vha->d_id.b24;
7225
7226 switch (cmd) {
7227 case SET_VP_IDX:
7228 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7229 break;
7230 case SET_AL_PA:
7231 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7232 if (!slot) {
7233 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7234 "Save vha in host_map %p %06x\n", vha, key);
7235 rc = btree_insert32(&vha->hw->tgt.host_map,
7236 key, vha, GFP_ATOMIC);
7237 if (rc)
7238 ql_log(ql_log_info, vha, 0xd03e,
7239 "Unable to insert s_id into host_map: %06x\n",
7240 key);
7241 return;
7242 }
7243 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
7244 "replace existing vha in host_map %p %06x\n", vha, key);
7245 btree_update32(&vha->hw->tgt.host_map, key, vha);
7246 break;
7247 case RESET_VP_IDX:
7248 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7249 break;
7250 case RESET_AL_PA:
7251 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7252 "clear vha in host_map %p %06x\n", vha, key);
7253 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7254 if (slot)
7255 btree_remove32(&vha->hw->tgt.host_map, key);
7256 vha->d_id.b24 = 0;
7257 break;
7258 }
7259 }
7260
7261 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
7262 {
7263
7264 if (!vha->d_id.b24) {
7265 vha->d_id = id;
7266 qlt_update_vp_map(vha, SET_AL_PA);
7267 } else if (vha->d_id.b24 != id.b24) {
7268 qlt_update_vp_map(vha, RESET_AL_PA);
7269 vha->d_id = id;
7270 qlt_update_vp_map(vha, SET_AL_PA);
7271 }
7272 }
7273
7274 static int __init qlt_parse_ini_mode(void)
7275 {
7276 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
7277 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
7278 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
7279 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
7280 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
7281 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7282 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
7283 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7284 else
7285 return false;
7286
7287 return true;
7288 }
7289
7290 int __init qlt_init(void)
7291 {
7292 int ret;
7293
7294 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
7295 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
7296
7297 if (!qlt_parse_ini_mode()) {
7298 ql_log(ql_log_fatal, NULL, 0xe06b,
7299 "qlt_parse_ini_mode() failed\n");
7300 return -EINVAL;
7301 }
7302
7303 if (!QLA_TGT_MODE_ENABLED())
7304 return 0;
7305
7306 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7307 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
7308 qla_tgt_mgmt_cmd), 0, NULL);
7309 if (!qla_tgt_mgmt_cmd_cachep) {
7310 ql_log(ql_log_fatal, NULL, 0xd04b,
7311 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7312 return -ENOMEM;
7313 }
7314
7315 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7316 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
7317 0, NULL);
7318
7319 if (!qla_tgt_plogi_cachep) {
7320 ql_log(ql_log_fatal, NULL, 0xe06d,
7321 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7322 ret = -ENOMEM;
7323 goto out_mgmt_cmd_cachep;
7324 }
7325
7326 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
7327 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
7328 if (!qla_tgt_mgmt_cmd_mempool) {
7329 ql_log(ql_log_fatal, NULL, 0xe06e,
7330 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7331 ret = -ENOMEM;
7332 goto out_plogi_cachep;
7333 }
7334
7335 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
7336 if (!qla_tgt_wq) {
7337 ql_log(ql_log_fatal, NULL, 0xe06f,
7338 "alloc_workqueue for qla_tgt_wq failed\n");
7339 ret = -ENOMEM;
7340 goto out_cmd_mempool;
7341 }
7342 /*
7343 * Return 1 to signal that initiator-mode is being disabled
7344 */
7345 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
7346
7347 out_cmd_mempool:
7348 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7349 out_plogi_cachep:
7350 kmem_cache_destroy(qla_tgt_plogi_cachep);
7351 out_mgmt_cmd_cachep:
7352 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7353 return ret;
7354 }
7355
7356 void qlt_exit(void)
7357 {
7358 if (!QLA_TGT_MODE_ENABLED())
7359 return;
7360
7361 destroy_workqueue(qla_tgt_wq);
7362 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7363 kmem_cache_destroy(qla_tgt_plogi_cachep);
7364 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7365 }