]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/scsi/qla2xxx/qla_target.c
scsi: qla2xxx: Add a shadow variable to hold disc_state history of fcport
[mirror_ubuntu-hirsute-kernel.git] / drivers / scsi / qla2xxx / qla_target.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 *
5 * based on qla2x00t.c code:
6 *
7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
8 * Copyright (C) 2004 - 2005 Leonid Stoljar
9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
10 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 *
12 * Forward port and refactoring to modern qla2xxx and target/configfs
13 *
14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 */
16
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/blkdev.h>
21 #include <linux/interrupt.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/list.h>
25 #include <linux/workqueue.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <target/target_core_base.h>
31 #include <target/target_core_fabric.h>
32
33 #include "qla_def.h"
34 #include "qla_target.h"
35
36 static int ql2xtgt_tape_enable;
37 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
38 MODULE_PARM_DESC(ql2xtgt_tape_enable,
39 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
40
41 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
42 module_param(qlini_mode, charp, S_IRUGO);
43 MODULE_PARM_DESC(qlini_mode,
44 "Determines when initiator mode will be enabled. Possible values: "
45 "\"exclusive\" - initiator mode will be enabled on load, "
46 "disabled on enabling target mode and then on disabling target mode "
47 "enabled back; "
48 "\"disabled\" - initiator mode will never be enabled; "
49 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
50 "when ready "
51 "\"enabled\" (default) - initiator mode will always stay enabled.");
52
53 static int ql_dm_tgt_ex_pct = 0;
54 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
55 MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
56 "For Dual Mode (qlini_mode=dual), this parameter determines "
57 "the percentage of exchanges/cmds FW will allocate resources "
58 "for Target mode.");
59
60 int ql2xuctrlirq = 1;
61 module_param(ql2xuctrlirq, int, 0644);
62 MODULE_PARM_DESC(ql2xuctrlirq,
63 "User to control IRQ placement via smp_affinity."
64 "Valid with qlini_mode=disabled."
65 "1(default): enable");
66
67 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
68
69 static int qla_sam_status = SAM_STAT_BUSY;
70 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
71
72 /*
73 * From scsi/fc/fc_fcp.h
74 */
75 enum fcp_resp_rsp_codes {
76 FCP_TMF_CMPL = 0,
77 FCP_DATA_LEN_INVALID = 1,
78 FCP_CMND_FIELDS_INVALID = 2,
79 FCP_DATA_PARAM_MISMATCH = 3,
80 FCP_TMF_REJECTED = 4,
81 FCP_TMF_FAILED = 5,
82 FCP_TMF_INVALID_LUN = 9,
83 };
84
85 /*
86 * fc_pri_ta from scsi/fc/fc_fcp.h
87 */
88 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
89 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
90 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
91 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
92 #define FCP_PTA_MASK 7 /* mask for task attribute field */
93 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
94 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
95
96 /*
97 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
98 * must be called under HW lock and could unlock/lock it inside.
99 * It isn't an issue, since in the current implementation on the time when
100 * those functions are called:
101 *
102 * - Either context is IRQ and only IRQ handler can modify HW data,
103 * including rings related fields,
104 *
105 * - Or access to target mode variables from struct qla_tgt doesn't
106 * cross those functions boundaries, except tgt_stop, which
107 * additionally protected by irq_cmd_count.
108 */
109 /* Predefs for callbacks handed to qla2xxx LLD */
110 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
111 struct atio_from_isp *pkt, uint8_t);
112 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
113 response_t *pkt);
114 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
115 int fn, void *iocb, int flags);
116 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
117 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
118 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
119 struct atio_from_isp *atio, uint16_t status, int qfull);
120 static void qlt_disable_vha(struct scsi_qla_host *vha);
121 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
122 static void qlt_send_notify_ack(struct qla_qpair *qpair,
123 struct imm_ntfy_from_isp *ntfy,
124 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
125 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
126 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
127 struct imm_ntfy_from_isp *imm, int ha_locked);
128 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
129 fc_port_t *fcport, bool local);
130 void qlt_unreg_sess(struct fc_port *sess);
131 static void qlt_24xx_handle_abts(struct scsi_qla_host *,
132 struct abts_recv_from_24xx *);
133 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
134 uint16_t);
135 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
136 static inline uint32_t qlt_make_handle(struct qla_qpair *);
137
138 /*
139 * Global Variables
140 */
141 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
142 struct kmem_cache *qla_tgt_plogi_cachep;
143 static mempool_t *qla_tgt_mgmt_cmd_mempool;
144 static struct workqueue_struct *qla_tgt_wq;
145 static DEFINE_MUTEX(qla_tgt_mutex);
146 static LIST_HEAD(qla_tgt_glist);
147
148 static const char *prot_op_str(u32 prot_op)
149 {
150 switch (prot_op) {
151 case TARGET_PROT_NORMAL: return "NORMAL";
152 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
153 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
154 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
155 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
156 case TARGET_PROT_DIN_PASS: return "DIN_PASS";
157 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
158 default: return "UNKNOWN";
159 }
160 }
161
162 /* This API intentionally takes dest as a parameter, rather than returning
163 * int value to avoid caller forgetting to issue wmb() after the store */
164 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
165 {
166 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
167 *dest = atomic_inc_return(&base_vha->generation_tick);
168 /* memory barrier */
169 wmb();
170 }
171
172 /* Might release hw lock, then reaquire!! */
173 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
174 {
175 /* Send marker if required */
176 if (unlikely(vha->marker_needed != 0)) {
177 int rc = qla2x00_issue_marker(vha, vha_locked);
178
179 if (rc != QLA_SUCCESS) {
180 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
181 "qla_target(%d): issue_marker() failed\n",
182 vha->vp_idx);
183 }
184 return rc;
185 }
186 return QLA_SUCCESS;
187 }
188
189 static inline
190 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
191 be_id_t d_id)
192 {
193 struct scsi_qla_host *host;
194 uint32_t key;
195
196 if (vha->d_id.b.area == d_id.area &&
197 vha->d_id.b.domain == d_id.domain &&
198 vha->d_id.b.al_pa == d_id.al_pa)
199 return vha;
200
201 key = be_to_port_id(d_id).b24;
202
203 host = btree_lookup32(&vha->hw->tgt.host_map, key);
204 if (!host)
205 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
206 "Unable to find host %06x\n", key);
207
208 return host;
209 }
210
211 static inline
212 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
213 uint16_t vp_idx)
214 {
215 struct qla_hw_data *ha = vha->hw;
216
217 if (vha->vp_idx == vp_idx)
218 return vha;
219
220 BUG_ON(ha->tgt.tgt_vp_map == NULL);
221 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
222 return ha->tgt.tgt_vp_map[vp_idx].vha;
223
224 return NULL;
225 }
226
227 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
228 {
229 unsigned long flags;
230
231 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
232
233 vha->hw->tgt.num_pend_cmds++;
234 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
235 vha->qla_stats.stat_max_pend_cmds =
236 vha->hw->tgt.num_pend_cmds;
237 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
238 }
239 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
240 {
241 unsigned long flags;
242
243 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
244 vha->hw->tgt.num_pend_cmds--;
245 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
246 }
247
248
249 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
250 struct atio_from_isp *atio, uint8_t ha_locked)
251 {
252 struct qla_tgt_sess_op *u;
253 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
254 unsigned long flags;
255
256 if (tgt->tgt_stop) {
257 ql_dbg(ql_dbg_async, vha, 0x502c,
258 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
259 vha->vp_idx);
260 goto out_term;
261 }
262
263 u = kzalloc(sizeof(*u), GFP_ATOMIC);
264 if (u == NULL)
265 goto out_term;
266
267 u->vha = vha;
268 memcpy(&u->atio, atio, sizeof(*atio));
269 INIT_LIST_HEAD(&u->cmd_list);
270
271 spin_lock_irqsave(&vha->cmd_list_lock, flags);
272 list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
273 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
274
275 schedule_delayed_work(&vha->unknown_atio_work, 1);
276
277 out:
278 return;
279
280 out_term:
281 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
282 goto out;
283 }
284
285 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
286 uint8_t ha_locked)
287 {
288 struct qla_tgt_sess_op *u, *t;
289 scsi_qla_host_t *host;
290 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
291 unsigned long flags;
292 uint8_t queued = 0;
293
294 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
295 if (u->aborted) {
296 ql_dbg(ql_dbg_async, vha, 0x502e,
297 "Freeing unknown %s %p, because of Abort\n",
298 "ATIO_TYPE7", u);
299 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
300 &u->atio, ha_locked, 0);
301 goto abort;
302 }
303
304 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
305 if (host != NULL) {
306 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
307 "Requeuing unknown ATIO_TYPE7 %p\n", u);
308 qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
309 } else if (tgt->tgt_stop) {
310 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
311 "Freeing unknown %s %p, because tgt is being stopped\n",
312 "ATIO_TYPE7", u);
313 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
314 &u->atio, ha_locked, 0);
315 } else {
316 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
317 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
318 if (!queued) {
319 queued = 1;
320 schedule_delayed_work(&vha->unknown_atio_work,
321 1);
322 }
323 continue;
324 }
325
326 abort:
327 spin_lock_irqsave(&vha->cmd_list_lock, flags);
328 list_del(&u->cmd_list);
329 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
330 kfree(u);
331 }
332 }
333
334 void qlt_unknown_atio_work_fn(struct work_struct *work)
335 {
336 struct scsi_qla_host *vha = container_of(to_delayed_work(work),
337 struct scsi_qla_host, unknown_atio_work);
338
339 qlt_try_to_dequeue_unknown_atios(vha, 0);
340 }
341
342 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
343 struct atio_from_isp *atio, uint8_t ha_locked)
344 {
345 ql_dbg(ql_dbg_tgt, vha, 0xe072,
346 "%s: qla_target(%d): type %x ox_id %04x\n",
347 __func__, vha->vp_idx, atio->u.raw.entry_type,
348 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
349
350 switch (atio->u.raw.entry_type) {
351 case ATIO_TYPE7:
352 {
353 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
354 atio->u.isp24.fcp_hdr.d_id);
355 if (unlikely(NULL == host)) {
356 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
357 "qla_target(%d): Received ATIO_TYPE7 "
358 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
359 atio->u.isp24.fcp_hdr.d_id.domain,
360 atio->u.isp24.fcp_hdr.d_id.area,
361 atio->u.isp24.fcp_hdr.d_id.al_pa);
362
363
364 qlt_queue_unknown_atio(vha, atio, ha_locked);
365 break;
366 }
367 if (unlikely(!list_empty(&vha->unknown_atio_list)))
368 qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
369
370 qlt_24xx_atio_pkt(host, atio, ha_locked);
371 break;
372 }
373
374 case IMMED_NOTIFY_TYPE:
375 {
376 struct scsi_qla_host *host = vha;
377 struct imm_ntfy_from_isp *entry =
378 (struct imm_ntfy_from_isp *)atio;
379
380 qlt_issue_marker(vha, ha_locked);
381
382 if ((entry->u.isp24.vp_index != 0xFF) &&
383 (entry->u.isp24.nport_handle != 0xFFFF)) {
384 host = qlt_find_host_by_vp_idx(vha,
385 entry->u.isp24.vp_index);
386 if (unlikely(!host)) {
387 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
388 "qla_target(%d): Received "
389 "ATIO (IMMED_NOTIFY_TYPE) "
390 "with unknown vp_index %d\n",
391 vha->vp_idx, entry->u.isp24.vp_index);
392 break;
393 }
394 }
395 qlt_24xx_atio_pkt(host, atio, ha_locked);
396 break;
397 }
398
399 case VP_RPT_ID_IOCB_TYPE:
400 qla24xx_report_id_acquisition(vha,
401 (struct vp_rpt_id_entry_24xx *)atio);
402 break;
403
404 case ABTS_RECV_24XX:
405 {
406 struct abts_recv_from_24xx *entry =
407 (struct abts_recv_from_24xx *)atio;
408 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
409 entry->vp_index);
410 unsigned long flags;
411
412 if (unlikely(!host)) {
413 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
414 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
415 "received, with unknown vp_index %d\n",
416 vha->vp_idx, entry->vp_index);
417 break;
418 }
419 if (!ha_locked)
420 spin_lock_irqsave(&host->hw->hardware_lock, flags);
421 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
422 if (!ha_locked)
423 spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
424 break;
425 }
426
427 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
428
429 default:
430 ql_dbg(ql_dbg_tgt, vha, 0xe040,
431 "qla_target(%d): Received unknown ATIO atio "
432 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
433 break;
434 }
435
436 return false;
437 }
438
439 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
440 struct rsp_que *rsp, response_t *pkt)
441 {
442 switch (pkt->entry_type) {
443 case CTIO_CRC2:
444 ql_dbg(ql_dbg_tgt, vha, 0xe073,
445 "qla_target(%d):%s: CRC2 Response pkt\n",
446 vha->vp_idx, __func__);
447 /* fall through */
448 case CTIO_TYPE7:
449 {
450 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
451 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
452 entry->vp_index);
453 if (unlikely(!host)) {
454 ql_dbg(ql_dbg_tgt, vha, 0xe041,
455 "qla_target(%d): Response pkt (CTIO_TYPE7) "
456 "received, with unknown vp_index %d\n",
457 vha->vp_idx, entry->vp_index);
458 break;
459 }
460 qlt_response_pkt(host, rsp, pkt);
461 break;
462 }
463
464 case IMMED_NOTIFY_TYPE:
465 {
466 struct scsi_qla_host *host;
467 struct imm_ntfy_from_isp *entry =
468 (struct imm_ntfy_from_isp *)pkt;
469
470 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
471 if (unlikely(!host)) {
472 ql_dbg(ql_dbg_tgt, vha, 0xe042,
473 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
474 "received, with unknown vp_index %d\n",
475 vha->vp_idx, entry->u.isp24.vp_index);
476 break;
477 }
478 qlt_response_pkt(host, rsp, pkt);
479 break;
480 }
481
482 case NOTIFY_ACK_TYPE:
483 {
484 struct scsi_qla_host *host = vha;
485 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
486
487 if (0xFF != entry->u.isp24.vp_index) {
488 host = qlt_find_host_by_vp_idx(vha,
489 entry->u.isp24.vp_index);
490 if (unlikely(!host)) {
491 ql_dbg(ql_dbg_tgt, vha, 0xe043,
492 "qla_target(%d): Response "
493 "pkt (NOTIFY_ACK_TYPE) "
494 "received, with unknown "
495 "vp_index %d\n", vha->vp_idx,
496 entry->u.isp24.vp_index);
497 break;
498 }
499 }
500 qlt_response_pkt(host, rsp, pkt);
501 break;
502 }
503
504 case ABTS_RECV_24XX:
505 {
506 struct abts_recv_from_24xx *entry =
507 (struct abts_recv_from_24xx *)pkt;
508 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
509 entry->vp_index);
510 if (unlikely(!host)) {
511 ql_dbg(ql_dbg_tgt, vha, 0xe044,
512 "qla_target(%d): Response pkt "
513 "(ABTS_RECV_24XX) received, with unknown "
514 "vp_index %d\n", vha->vp_idx, entry->vp_index);
515 break;
516 }
517 qlt_response_pkt(host, rsp, pkt);
518 break;
519 }
520
521 case ABTS_RESP_24XX:
522 {
523 struct abts_resp_to_24xx *entry =
524 (struct abts_resp_to_24xx *)pkt;
525 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
526 entry->vp_index);
527 if (unlikely(!host)) {
528 ql_dbg(ql_dbg_tgt, vha, 0xe045,
529 "qla_target(%d): Response pkt "
530 "(ABTS_RECV_24XX) received, with unknown "
531 "vp_index %d\n", vha->vp_idx, entry->vp_index);
532 break;
533 }
534 qlt_response_pkt(host, rsp, pkt);
535 break;
536 }
537 default:
538 qlt_response_pkt(vha, rsp, pkt);
539 break;
540 }
541
542 }
543
544 /*
545 * All qlt_plogi_ack_t operations are protected by hardware_lock
546 */
547 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
548 struct imm_ntfy_from_isp *ntfy, int type)
549 {
550 struct qla_work_evt *e;
551
552 e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
553 if (!e)
554 return QLA_FUNCTION_FAILED;
555
556 e->u.nack.fcport = fcport;
557 e->u.nack.type = type;
558 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
559 return qla2x00_post_work(vha, e);
560 }
561
562 static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
563 {
564 struct scsi_qla_host *vha = sp->vha;
565 unsigned long flags;
566
567 ql_dbg(ql_dbg_disc, vha, 0x20f2,
568 "Async done-%s res %x %8phC type %d\n",
569 sp->name, res, sp->fcport->port_name, sp->type);
570
571 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
572 sp->fcport->flags &= ~FCF_ASYNC_SENT;
573 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
574
575 switch (sp->type) {
576 case SRB_NACK_PLOGI:
577 sp->fcport->login_gen++;
578 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
579 sp->fcport->logout_on_delete = 1;
580 sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
581 sp->fcport->send_els_logo = 0;
582 break;
583
584 case SRB_NACK_PRLI:
585 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
586 sp->fcport->deleted = 0;
587 sp->fcport->send_els_logo = 0;
588
589 if (!sp->fcport->login_succ &&
590 !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
591 sp->fcport->login_succ = 1;
592
593 vha->fcport_count++;
594 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
595 qla24xx_sched_upd_fcport(sp->fcport);
596 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
597 } else {
598 sp->fcport->login_retry = 0;
599 qla2x00_set_fcport_disc_state(sp->fcport,
600 DSC_LOGIN_COMPLETE);
601 sp->fcport->deleted = 0;
602 sp->fcport->logout_on_delete = 1;
603 }
604 break;
605
606 case SRB_NACK_LOGO:
607 sp->fcport->login_gen++;
608 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
609 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
610 break;
611 }
612 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
613
614 sp->free(sp);
615 }
616
617 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
618 struct imm_ntfy_from_isp *ntfy, int type)
619 {
620 int rval = QLA_FUNCTION_FAILED;
621 srb_t *sp;
622 char *c = NULL;
623
624 fcport->flags |= FCF_ASYNC_SENT;
625 switch (type) {
626 case SRB_NACK_PLOGI:
627 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
628 c = "PLOGI";
629 break;
630 case SRB_NACK_PRLI:
631 fcport->fw_login_state = DSC_LS_PRLI_PEND;
632 fcport->deleted = 0;
633 c = "PRLI";
634 break;
635 case SRB_NACK_LOGO:
636 fcport->fw_login_state = DSC_LS_LOGO_PEND;
637 c = "LOGO";
638 break;
639 }
640
641 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
642 if (!sp)
643 goto done;
644
645 sp->type = type;
646 sp->name = "nack";
647
648 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
649 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
650
651 sp->u.iocb_cmd.u.nack.ntfy = ntfy;
652 sp->done = qla2x00_async_nack_sp_done;
653
654 ql_dbg(ql_dbg_disc, vha, 0x20f4,
655 "Async-%s %8phC hndl %x %s\n",
656 sp->name, fcport->port_name, sp->handle, c);
657
658 rval = qla2x00_start_sp(sp);
659 if (rval != QLA_SUCCESS)
660 goto done_free_sp;
661
662 return rval;
663
664 done_free_sp:
665 sp->free(sp);
666 done:
667 fcport->flags &= ~FCF_ASYNC_SENT;
668 return rval;
669 }
670
671 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
672 {
673 fc_port_t *t;
674
675 switch (e->u.nack.type) {
676 case SRB_NACK_PRLI:
677 t = e->u.nack.fcport;
678 flush_work(&t->del_work);
679 flush_work(&t->free_work);
680 mutex_lock(&vha->vha_tgt.tgt_mutex);
681 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
682 mutex_unlock(&vha->vha_tgt.tgt_mutex);
683 if (t) {
684 ql_log(ql_log_info, vha, 0xd034,
685 "%s create sess success %p", __func__, t);
686 /* create sess has an extra kref */
687 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
688 }
689 break;
690 }
691 qla24xx_async_notify_ack(vha, e->u.nack.fcport,
692 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
693 }
694
695 void qla24xx_delete_sess_fn(struct work_struct *work)
696 {
697 fc_port_t *fcport = container_of(work, struct fc_port, del_work);
698 struct qla_hw_data *ha = fcport->vha->hw;
699
700 if (fcport->se_sess) {
701 ha->tgt.tgt_ops->shutdown_sess(fcport);
702 ha->tgt.tgt_ops->put_sess(fcport);
703 } else {
704 qlt_unreg_sess(fcport);
705 }
706 }
707
708 /*
709 * Called from qla2x00_reg_remote_port()
710 */
711 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
712 {
713 struct qla_hw_data *ha = vha->hw;
714 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
715 struct fc_port *sess = fcport;
716 unsigned long flags;
717
718 if (!vha->hw->tgt.tgt_ops)
719 return;
720
721 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
722 if (tgt->tgt_stop) {
723 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
724 return;
725 }
726
727 if (fcport->disc_state == DSC_DELETE_PEND) {
728 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
729 return;
730 }
731
732 if (!sess->se_sess) {
733 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
734
735 mutex_lock(&vha->vha_tgt.tgt_mutex);
736 sess = qlt_create_sess(vha, fcport, false);
737 mutex_unlock(&vha->vha_tgt.tgt_mutex);
738
739 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
740 } else {
741 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
742 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
743 return;
744 }
745
746 if (!kref_get_unless_zero(&sess->sess_kref)) {
747 ql_dbg(ql_dbg_disc, vha, 0x2107,
748 "%s: kref_get fail sess %8phC \n",
749 __func__, sess->port_name);
750 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
751 return;
752 }
753
754 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
755 "qla_target(%u): %ssession for port %8phC "
756 "(loop ID %d) reappeared\n", vha->vp_idx,
757 sess->local ? "local " : "", sess->port_name, sess->loop_id);
758
759 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
760 "Reappeared sess %p\n", sess);
761
762 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
763 fcport->loop_id,
764 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
765 }
766
767 if (sess && sess->local) {
768 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
769 "qla_target(%u): local session for "
770 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
771 fcport->port_name, sess->loop_id);
772 sess->local = 0;
773 }
774 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
775
776 ha->tgt.tgt_ops->put_sess(sess);
777 }
778
779 /*
780 * This is a zero-base ref-counting solution, since hardware_lock
781 * guarantees that ref_count is not modified concurrently.
782 * Upon successful return content of iocb is undefined
783 */
784 static struct qlt_plogi_ack_t *
785 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
786 struct imm_ntfy_from_isp *iocb)
787 {
788 struct qlt_plogi_ack_t *pla;
789
790 lockdep_assert_held(&vha->hw->hardware_lock);
791
792 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
793 if (pla->id.b24 == id->b24) {
794 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
795 "%s %d %8phC Term INOT due to new INOT",
796 __func__, __LINE__,
797 pla->iocb.u.isp24.port_name);
798 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
799 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
800 return pla;
801 }
802 }
803
804 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
805 if (!pla) {
806 ql_dbg(ql_dbg_async, vha, 0x5088,
807 "qla_target(%d): Allocation of plogi_ack failed\n",
808 vha->vp_idx);
809 return NULL;
810 }
811
812 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
813 pla->id = *id;
814 list_add_tail(&pla->list, &vha->plogi_ack_list);
815
816 return pla;
817 }
818
819 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
820 struct qlt_plogi_ack_t *pla)
821 {
822 struct imm_ntfy_from_isp *iocb = &pla->iocb;
823 port_id_t port_id;
824 uint16_t loop_id;
825 fc_port_t *fcport = pla->fcport;
826
827 BUG_ON(!pla->ref_count);
828 pla->ref_count--;
829
830 if (pla->ref_count)
831 return;
832
833 ql_dbg(ql_dbg_disc, vha, 0x5089,
834 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
835 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
836 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
837 iocb->u.isp24.port_id[0],
838 le16_to_cpu(iocb->u.isp24.nport_handle),
839 iocb->u.isp24.exchange_address, iocb->ox_id);
840
841 port_id.b.domain = iocb->u.isp24.port_id[2];
842 port_id.b.area = iocb->u.isp24.port_id[1];
843 port_id.b.al_pa = iocb->u.isp24.port_id[0];
844 port_id.b.rsvd_1 = 0;
845
846 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
847
848 fcport->loop_id = loop_id;
849 fcport->d_id = port_id;
850 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
851 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
852 else
853 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
854
855 list_for_each_entry(fcport, &vha->vp_fcports, list) {
856 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
857 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
858 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
859 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
860 }
861
862 list_del(&pla->list);
863 kmem_cache_free(qla_tgt_plogi_cachep, pla);
864 }
865
866 void
867 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
868 struct fc_port *sess, enum qlt_plogi_link_t link)
869 {
870 struct imm_ntfy_from_isp *iocb = &pla->iocb;
871 /* Inc ref_count first because link might already be pointing at pla */
872 pla->ref_count++;
873
874 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
875 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
876 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
877 sess, link, sess->port_name,
878 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
879 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
880 pla->ref_count, pla, link);
881
882 if (link == QLT_PLOGI_LINK_CONFLICT) {
883 switch (sess->disc_state) {
884 case DSC_DELETED:
885 case DSC_DELETE_PEND:
886 pla->ref_count--;
887 return;
888 default:
889 break;
890 }
891 }
892
893 if (sess->plogi_link[link])
894 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
895
896 if (link == QLT_PLOGI_LINK_SAME_WWN)
897 pla->fcport = sess;
898
899 sess->plogi_link[link] = pla;
900 }
901
902 typedef struct {
903 /* These fields must be initialized by the caller */
904 port_id_t id;
905 /*
906 * number of cmds dropped while we were waiting for
907 * initiator to ack LOGO initialize to 1 if LOGO is
908 * triggered by a command, otherwise, to 0
909 */
910 int cmd_count;
911
912 /* These fields are used by callee */
913 struct list_head list;
914 } qlt_port_logo_t;
915
916 static void
917 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
918 {
919 qlt_port_logo_t *tmp;
920 int res;
921
922 mutex_lock(&vha->vha_tgt.tgt_mutex);
923
924 list_for_each_entry(tmp, &vha->logo_list, list) {
925 if (tmp->id.b24 == logo->id.b24) {
926 tmp->cmd_count += logo->cmd_count;
927 mutex_unlock(&vha->vha_tgt.tgt_mutex);
928 return;
929 }
930 }
931
932 list_add_tail(&logo->list, &vha->logo_list);
933
934 mutex_unlock(&vha->vha_tgt.tgt_mutex);
935
936 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
937
938 mutex_lock(&vha->vha_tgt.tgt_mutex);
939 list_del(&logo->list);
940 mutex_unlock(&vha->vha_tgt.tgt_mutex);
941
942 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
943 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
944 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
945 logo->cmd_count, res);
946 }
947
948 void qlt_free_session_done(struct work_struct *work)
949 {
950 struct fc_port *sess = container_of(work, struct fc_port,
951 free_work);
952 struct qla_tgt *tgt = sess->tgt;
953 struct scsi_qla_host *vha = sess->vha;
954 struct qla_hw_data *ha = vha->hw;
955 unsigned long flags;
956 bool logout_started = false;
957 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
958 struct qlt_plogi_ack_t *own =
959 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
960
961 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
962 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
963 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
964 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
965 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
966 sess->logout_on_delete, sess->keep_nport_handle,
967 sess->send_els_logo);
968
969 if (!IS_SW_RESV_ADDR(sess->d_id)) {
970 qla2x00_mark_device_lost(vha, sess, 0);
971
972 if (sess->send_els_logo) {
973 qlt_port_logo_t logo;
974
975 logo.id = sess->d_id;
976 logo.cmd_count = 0;
977 if (!own)
978 qlt_send_first_logo(vha, &logo);
979 sess->send_els_logo = 0;
980 }
981
982 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
983 int rc;
984
985 if (!own ||
986 (own &&
987 (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
988 rc = qla2x00_post_async_logout_work(vha, sess,
989 NULL);
990 if (rc != QLA_SUCCESS)
991 ql_log(ql_log_warn, vha, 0xf085,
992 "Schedule logo failed sess %p rc %d\n",
993 sess, rc);
994 else
995 logout_started = true;
996 } else if (own && (own->iocb.u.isp24.status_subcode ==
997 ELS_PRLI) && ha->flags.rida_fmt2) {
998 rc = qla2x00_post_async_prlo_work(vha, sess,
999 NULL);
1000 if (rc != QLA_SUCCESS)
1001 ql_log(ql_log_warn, vha, 0xf085,
1002 "Schedule PRLO failed sess %p rc %d\n",
1003 sess, rc);
1004 else
1005 logout_started = true;
1006 }
1007 } /* if sess->logout_on_delete */
1008
1009 if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
1010 !(sess->nvme_flag & NVME_FLAG_DELETING)) {
1011 sess->nvme_flag |= NVME_FLAG_DELETING;
1012 qla_nvme_unregister_remote_port(sess);
1013 }
1014 }
1015
1016 /*
1017 * Release the target session for FC Nexus from fabric module code.
1018 */
1019 if (sess->se_sess != NULL)
1020 ha->tgt.tgt_ops->free_session(sess);
1021
1022 if (logout_started) {
1023 bool traced = false;
1024 u16 cnt = 0;
1025
1026 while (!READ_ONCE(sess->logout_completed)) {
1027 if (!traced) {
1028 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
1029 "%s: waiting for sess %p logout\n",
1030 __func__, sess);
1031 traced = true;
1032 }
1033 msleep(100);
1034 cnt++;
1035 if (cnt > 200)
1036 break;
1037 }
1038
1039 ql_dbg(ql_dbg_disc, vha, 0xf087,
1040 "%s: sess %p logout completed\n", __func__, sess);
1041 }
1042
1043 if (sess->logo_ack_needed) {
1044 sess->logo_ack_needed = 0;
1045 qla24xx_async_notify_ack(vha, sess,
1046 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1047 }
1048
1049 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1050 if (sess->se_sess) {
1051 sess->se_sess = NULL;
1052 if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1053 tgt->sess_count--;
1054 }
1055
1056 qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
1057 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1058 sess->deleted = QLA_SESS_DELETED;
1059
1060 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1061 vha->fcport_count--;
1062 sess->login_succ = 0;
1063 }
1064
1065 qla2x00_clear_loop_id(sess);
1066
1067 if (sess->conflict) {
1068 sess->conflict->login_pause = 0;
1069 sess->conflict = NULL;
1070 if (!test_bit(UNLOADING, &vha->dpc_flags))
1071 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1072 }
1073
1074 {
1075 struct qlt_plogi_ack_t *con =
1076 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1077 struct imm_ntfy_from_isp *iocb;
1078
1079 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1080
1081 if (con) {
1082 iocb = &con->iocb;
1083 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1084 "se_sess %p / sess %p port %8phC is gone,"
1085 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1086 sess->se_sess, sess, sess->port_name,
1087 own ? "releasing own PLOGI" : "no own PLOGI pending",
1088 own ? own->ref_count : -1,
1089 iocb->u.isp24.port_name, con->ref_count);
1090 qlt_plogi_ack_unref(vha, con);
1091 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1092 } else {
1093 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1094 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1095 sess->se_sess, sess, sess->port_name,
1096 own ? "releasing own PLOGI" :
1097 "no own PLOGI pending",
1098 own ? own->ref_count : -1);
1099 }
1100
1101 if (own) {
1102 sess->fw_login_state = DSC_LS_PLOGI_PEND;
1103 qlt_plogi_ack_unref(vha, own);
1104 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1105 }
1106 }
1107
1108 sess->explicit_logout = 0;
1109 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1110 sess->free_pending = 0;
1111
1112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
1113 "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1114 sess, sess->port_name, vha->fcport_count);
1115
1116 if (tgt && (tgt->sess_count == 0))
1117 wake_up_all(&tgt->waitQ);
1118
1119 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
1120 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1121 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1122 switch (vha->host->active_mode) {
1123 case MODE_INITIATOR:
1124 case MODE_DUAL:
1125 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1126 qla2xxx_wake_dpc(vha);
1127 break;
1128 case MODE_TARGET:
1129 default:
1130 /* no-op */
1131 break;
1132 }
1133 }
1134
1135 if (vha->fcport_count == 0)
1136 wake_up_all(&vha->fcport_waitQ);
1137 }
1138
1139 /* ha->tgt.sess_lock supposed to be held on entry */
1140 void qlt_unreg_sess(struct fc_port *sess)
1141 {
1142 struct scsi_qla_host *vha = sess->vha;
1143 unsigned long flags;
1144
1145 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1146 "%s sess %p for deletion %8phC\n",
1147 __func__, sess, sess->port_name);
1148
1149 spin_lock_irqsave(&sess->vha->work_lock, flags);
1150 if (sess->free_pending) {
1151 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1152 return;
1153 }
1154 sess->free_pending = 1;
1155 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1156
1157 if (sess->se_sess)
1158 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1159
1160 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1161 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1162 sess->last_rscn_gen = sess->rscn_gen;
1163 sess->last_login_gen = sess->login_gen;
1164
1165 queue_work(sess->vha->hw->wq, &sess->free_work);
1166 }
1167 EXPORT_SYMBOL(qlt_unreg_sess);
1168
1169 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1170 {
1171 struct qla_hw_data *ha = vha->hw;
1172 struct fc_port *sess = NULL;
1173 uint16_t loop_id;
1174 int res = 0;
1175 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1176 unsigned long flags;
1177
1178 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1179 if (loop_id == 0xFFFF) {
1180 /* Global event */
1181 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1182 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1183 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1184 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1185 } else {
1186 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1187 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1188 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1189 }
1190
1191 ql_dbg(ql_dbg_tgt, vha, 0xe000,
1192 "Using sess for qla_tgt_reset: %p\n", sess);
1193 if (!sess) {
1194 res = -ESRCH;
1195 return res;
1196 }
1197
1198 ql_dbg(ql_dbg_tgt, vha, 0xe047,
1199 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1200 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1201 mcmd, loop_id);
1202
1203 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1204 }
1205
1206 static void qla24xx_chk_fcp_state(struct fc_port *sess)
1207 {
1208 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1209 sess->logout_on_delete = 0;
1210 sess->logo_ack_needed = 0;
1211 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1212 }
1213 }
1214
1215 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1216 {
1217 struct qla_tgt *tgt = sess->tgt;
1218 unsigned long flags;
1219 u16 sec;
1220
1221 switch (sess->disc_state) {
1222 case DSC_DELETE_PEND:
1223 return;
1224 case DSC_DELETED:
1225 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
1226 wake_up_all(&tgt->waitQ);
1227 if (sess->vha->fcport_count == 0)
1228 wake_up_all(&sess->vha->fcport_waitQ);
1229
1230 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1231 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1232 return;
1233 break;
1234 case DSC_UPD_FCPORT:
1235 /*
1236 * This port is not done reporting to upper layer.
1237 * let it finish
1238 */
1239 sess->next_disc_state = DSC_DELETE_PEND;
1240 sec = jiffies_to_msecs(jiffies -
1241 sess->jiffies_at_registration)/1000;
1242 if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
1243 sess->sec_since_registration = sec;
1244 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1245 "%s %8phC : Slow Rport registration(%d Sec)\n",
1246 __func__, sess->port_name, sec);
1247 }
1248 return;
1249 default:
1250 break;
1251 }
1252
1253 spin_lock_irqsave(&sess->vha->work_lock, flags);
1254 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1255 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1256 return;
1257 }
1258 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1259 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1260
1261 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1262
1263 qla24xx_chk_fcp_state(sess);
1264
1265 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
1266 "Scheduling sess %p for deletion %8phC\n",
1267 sess, sess->port_name);
1268
1269 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1270 }
1271
1272 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1273 {
1274 struct fc_port *sess;
1275 scsi_qla_host_t *vha = tgt->vha;
1276
1277 list_for_each_entry(sess, &vha->vp_fcports, list) {
1278 if (sess->se_sess)
1279 qlt_schedule_sess_for_deletion(sess);
1280 }
1281
1282 /* At this point tgt could be already dead */
1283 }
1284
1285 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1286 uint16_t *loop_id)
1287 {
1288 struct qla_hw_data *ha = vha->hw;
1289 dma_addr_t gid_list_dma;
1290 struct gid_list_info *gid_list, *gid;
1291 int res, rc, i;
1292 uint16_t entries;
1293
1294 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1295 &gid_list_dma, GFP_KERNEL);
1296 if (!gid_list) {
1297 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1298 "qla_target(%d): DMA Alloc failed of %u\n",
1299 vha->vp_idx, qla2x00_gid_list_size(ha));
1300 return -ENOMEM;
1301 }
1302
1303 /* Get list of logged in devices */
1304 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1305 if (rc != QLA_SUCCESS) {
1306 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1307 "qla_target(%d): get_id_list() failed: %x\n",
1308 vha->vp_idx, rc);
1309 res = -EBUSY;
1310 goto out_free_id_list;
1311 }
1312
1313 gid = gid_list;
1314 res = -ENOENT;
1315 for (i = 0; i < entries; i++) {
1316 if (gid->al_pa == s_id.al_pa &&
1317 gid->area == s_id.area &&
1318 gid->domain == s_id.domain) {
1319 *loop_id = le16_to_cpu(gid->loop_id);
1320 res = 0;
1321 break;
1322 }
1323 gid = (void *)gid + ha->gid_list_info_size;
1324 }
1325
1326 out_free_id_list:
1327 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1328 gid_list, gid_list_dma);
1329 return res;
1330 }
1331
1332 /*
1333 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1334 * Caller must put it.
1335 */
1336 static struct fc_port *qlt_create_sess(
1337 struct scsi_qla_host *vha,
1338 fc_port_t *fcport,
1339 bool local)
1340 {
1341 struct qla_hw_data *ha = vha->hw;
1342 struct fc_port *sess = fcport;
1343 unsigned long flags;
1344
1345 if (vha->vha_tgt.qla_tgt->tgt_stop)
1346 return NULL;
1347
1348 if (fcport->se_sess) {
1349 if (!kref_get_unless_zero(&sess->sess_kref)) {
1350 ql_dbg(ql_dbg_disc, vha, 0x20f6,
1351 "%s: kref_get_unless_zero failed for %8phC\n",
1352 __func__, sess->port_name);
1353 return NULL;
1354 }
1355 return fcport;
1356 }
1357 sess->tgt = vha->vha_tgt.qla_tgt;
1358 sess->local = local;
1359
1360 /*
1361 * Under normal circumstances we want to logout from firmware when
1362 * session eventually ends and release corresponding nport handle.
1363 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1364 * code will adjust these flags as necessary.
1365 */
1366 sess->logout_on_delete = 1;
1367 sess->keep_nport_handle = 0;
1368 sess->logout_completed = 0;
1369
1370 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1371 &fcport->port_name[0], sess) < 0) {
1372 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1373 "(%d) %8phC check_initiator_node_acl failed\n",
1374 vha->vp_idx, fcport->port_name);
1375 return NULL;
1376 } else {
1377 kref_init(&fcport->sess_kref);
1378 /*
1379 * Take an extra reference to ->sess_kref here to handle
1380 * fc_port access across ->tgt.sess_lock reaquire.
1381 */
1382 if (!kref_get_unless_zero(&sess->sess_kref)) {
1383 ql_dbg(ql_dbg_disc, vha, 0x20f7,
1384 "%s: kref_get_unless_zero failed for %8phC\n",
1385 __func__, sess->port_name);
1386 return NULL;
1387 }
1388
1389 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1390 if (!IS_SW_RESV_ADDR(sess->d_id))
1391 vha->vha_tgt.qla_tgt->sess_count++;
1392
1393 qlt_do_generation_tick(vha, &sess->generation);
1394 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1395 }
1396
1397 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1398 "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
1399 sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1400 vha->vha_tgt.qla_tgt->sess_count);
1401
1402 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1403 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1404 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1405 vha->vp_idx, local ? "local " : "", fcport->port_name,
1406 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1407 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
1408
1409 return sess;
1410 }
1411
1412 /*
1413 * max_gen - specifies maximum session generation
1414 * at which this deletion requestion is still valid
1415 */
1416 void
1417 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1418 {
1419 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1420 struct fc_port *sess = fcport;
1421 unsigned long flags;
1422
1423 if (!vha->hw->tgt.tgt_ops)
1424 return;
1425
1426 if (!tgt)
1427 return;
1428
1429 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1430 if (tgt->tgt_stop) {
1431 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1432 return;
1433 }
1434 if (!sess->se_sess) {
1435 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1436 return;
1437 }
1438
1439 if (max_gen - sess->generation < 0) {
1440 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1441 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1442 "Ignoring stale deletion request for se_sess %p / sess %p"
1443 " for port %8phC, req_gen %d, sess_gen %d\n",
1444 sess->se_sess, sess, sess->port_name, max_gen,
1445 sess->generation);
1446 return;
1447 }
1448
1449 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1450
1451 sess->local = 1;
1452 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1453 qlt_schedule_sess_for_deletion(sess);
1454 }
1455
1456 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1457 {
1458 struct qla_hw_data *ha = tgt->ha;
1459 unsigned long flags;
1460 int res;
1461 /*
1462 * We need to protect against race, when tgt is freed before or
1463 * inside wake_up()
1464 */
1465 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1466 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1467 "tgt %p, sess_count=%d\n",
1468 tgt, tgt->sess_count);
1469 res = (tgt->sess_count == 0);
1470 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1471
1472 return res;
1473 }
1474
1475 /* Called by tcm_qla2xxx configfs code */
1476 int qlt_stop_phase1(struct qla_tgt *tgt)
1477 {
1478 struct scsi_qla_host *vha = tgt->vha;
1479 struct qla_hw_data *ha = tgt->ha;
1480 unsigned long flags;
1481
1482 mutex_lock(&ha->optrom_mutex);
1483 mutex_lock(&qla_tgt_mutex);
1484
1485 if (tgt->tgt_stop || tgt->tgt_stopped) {
1486 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1487 "Already in tgt->tgt_stop or tgt_stopped state\n");
1488 mutex_unlock(&qla_tgt_mutex);
1489 mutex_unlock(&ha->optrom_mutex);
1490 return -EPERM;
1491 }
1492
1493 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1494 vha->host_no, vha);
1495 /*
1496 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1497 * Lock is needed, because we still can get an incoming packet.
1498 */
1499 mutex_lock(&vha->vha_tgt.tgt_mutex);
1500 tgt->tgt_stop = 1;
1501 qlt_clear_tgt_db(tgt);
1502 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1503 mutex_unlock(&qla_tgt_mutex);
1504
1505 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1506 "Waiting for sess works (tgt %p)", tgt);
1507 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1508 while (!list_empty(&tgt->sess_works_list)) {
1509 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1510 flush_scheduled_work();
1511 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1512 }
1513 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1514
1515 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1516 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1517
1518 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1519
1520 /* Big hammer */
1521 if (!ha->flags.host_shutting_down &&
1522 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1523 qlt_disable_vha(vha);
1524
1525 /* Wait for sessions to clear out (just in case) */
1526 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1527 mutex_unlock(&ha->optrom_mutex);
1528
1529 return 0;
1530 }
1531 EXPORT_SYMBOL(qlt_stop_phase1);
1532
1533 /* Called by tcm_qla2xxx configfs code */
1534 void qlt_stop_phase2(struct qla_tgt *tgt)
1535 {
1536 scsi_qla_host_t *vha = tgt->vha;
1537
1538 if (tgt->tgt_stopped) {
1539 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1540 "Already in tgt->tgt_stopped state\n");
1541 dump_stack();
1542 return;
1543 }
1544 if (!tgt->tgt_stop) {
1545 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1546 "%s: phase1 stop is not completed\n", __func__);
1547 dump_stack();
1548 return;
1549 }
1550
1551 mutex_lock(&vha->vha_tgt.tgt_mutex);
1552 tgt->tgt_stop = 0;
1553 tgt->tgt_stopped = 1;
1554 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1555
1556 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1557 tgt);
1558
1559 switch (vha->qlini_mode) {
1560 case QLA2XXX_INI_MODE_EXCLUSIVE:
1561 vha->flags.online = 1;
1562 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1563 break;
1564 default:
1565 break;
1566 }
1567 }
1568 EXPORT_SYMBOL(qlt_stop_phase2);
1569
1570 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1571 static void qlt_release(struct qla_tgt *tgt)
1572 {
1573 scsi_qla_host_t *vha = tgt->vha;
1574 void *node;
1575 u64 key = 0;
1576 u16 i;
1577 struct qla_qpair_hint *h;
1578 struct qla_hw_data *ha = vha->hw;
1579
1580 if (!tgt->tgt_stop && !tgt->tgt_stopped)
1581 qlt_stop_phase1(tgt);
1582
1583 if (!tgt->tgt_stopped)
1584 qlt_stop_phase2(tgt);
1585
1586 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1587 unsigned long flags;
1588
1589 h = &tgt->qphints[i];
1590 if (h->qpair) {
1591 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1592 list_del(&h->hint_elem);
1593 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1594 h->qpair = NULL;
1595 }
1596 }
1597 kfree(tgt->qphints);
1598 mutex_lock(&qla_tgt_mutex);
1599 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1600 mutex_unlock(&qla_tgt_mutex);
1601
1602 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1603 btree_remove64(&tgt->lun_qpair_map, key);
1604
1605 btree_destroy64(&tgt->lun_qpair_map);
1606
1607 if (vha->vp_idx)
1608 if (ha->tgt.tgt_ops &&
1609 ha->tgt.tgt_ops->remove_target &&
1610 vha->vha_tgt.target_lport_ptr)
1611 ha->tgt.tgt_ops->remove_target(vha);
1612
1613 vha->vha_tgt.qla_tgt = NULL;
1614
1615 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1616 "Release of tgt %p finished\n", tgt);
1617
1618 kfree(tgt);
1619 }
1620
1621 /* ha->hardware_lock supposed to be held on entry */
1622 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1623 const void *param, unsigned int param_size)
1624 {
1625 struct qla_tgt_sess_work_param *prm;
1626 unsigned long flags;
1627
1628 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1629 if (!prm) {
1630 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1631 "qla_target(%d): Unable to create session "
1632 "work, command will be refused", 0);
1633 return -ENOMEM;
1634 }
1635
1636 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1637 "Scheduling work (type %d, prm %p)"
1638 " to find session for param %p (size %d, tgt %p)\n",
1639 type, prm, param, param_size, tgt);
1640
1641 prm->type = type;
1642 memcpy(&prm->tm_iocb, param, param_size);
1643
1644 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1645 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1646 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1647
1648 schedule_work(&tgt->sess_work);
1649
1650 return 0;
1651 }
1652
1653 /*
1654 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1655 */
1656 static void qlt_send_notify_ack(struct qla_qpair *qpair,
1657 struct imm_ntfy_from_isp *ntfy,
1658 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1659 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1660 {
1661 struct scsi_qla_host *vha = qpair->vha;
1662 struct qla_hw_data *ha = vha->hw;
1663 request_t *pkt;
1664 struct nack_to_isp *nack;
1665
1666 if (!ha->flags.fw_started)
1667 return;
1668
1669 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1670
1671 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1672 if (!pkt) {
1673 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1674 "qla_target(%d): %s failed: unable to allocate "
1675 "request packet\n", vha->vp_idx, __func__);
1676 return;
1677 }
1678
1679 if (vha->vha_tgt.qla_tgt != NULL)
1680 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1681
1682 pkt->entry_type = NOTIFY_ACK_TYPE;
1683 pkt->entry_count = 1;
1684
1685 nack = (struct nack_to_isp *)pkt;
1686 nack->ox_id = ntfy->ox_id;
1687
1688 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1689 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1690 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1691 nack->u.isp24.flags = ntfy->u.isp24.flags &
1692 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1693 }
1694 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1695 nack->u.isp24.status = ntfy->u.isp24.status;
1696 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1697 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1698 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1699 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1700 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1701 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1702 nack->u.isp24.srr_reject_code = srr_reject_code;
1703 nack->u.isp24.srr_reject_code_expl = srr_explan;
1704 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1705
1706 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1707 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1708 vha->vp_idx, nack->u.isp24.status);
1709
1710 /* Memory Barrier */
1711 wmb();
1712 qla2x00_start_iocbs(vha, qpair->req);
1713 }
1714
1715 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1716 {
1717 struct scsi_qla_host *vha = mcmd->vha;
1718 struct qla_hw_data *ha = vha->hw;
1719 struct abts_resp_to_24xx *resp;
1720 uint32_t f_ctl, h;
1721 uint8_t *p;
1722 int rc;
1723 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
1724 struct qla_qpair *qpair = mcmd->qpair;
1725
1726 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1727 "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1728 ha, mcmd->fc_tm_rsp);
1729
1730 rc = qlt_check_reserve_free_req(qpair, 1);
1731 if (rc) {
1732 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1733 "qla_target(%d): %s failed: unable to allocate request packet\n",
1734 vha->vp_idx, __func__);
1735 return -EAGAIN;
1736 }
1737
1738 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
1739 memset(resp, 0, sizeof(*resp));
1740
1741 h = qlt_make_handle(qpair);
1742 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1743 /*
1744 * CTIO type 7 from the firmware doesn't provide a way to
1745 * know the initiator's LOOP ID, hence we can't find
1746 * the session and, so, the command.
1747 */
1748 return -EAGAIN;
1749 } else {
1750 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
1751 }
1752
1753 resp->handle = MAKE_HANDLE(qpair->req->id, h);
1754 resp->entry_type = ABTS_RESP_24XX;
1755 resp->entry_count = 1;
1756 resp->nport_handle = abts->nport_handle;
1757 resp->vp_index = vha->vp_idx;
1758 resp->sof_type = abts->sof_type;
1759 resp->exchange_address = abts->exchange_address;
1760 resp->fcp_hdr_le = abts->fcp_hdr_le;
1761 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1762 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1763 F_CTL_SEQ_INITIATIVE);
1764 p = (uint8_t *)&f_ctl;
1765 resp->fcp_hdr_le.f_ctl[0] = *p++;
1766 resp->fcp_hdr_le.f_ctl[1] = *p++;
1767 resp->fcp_hdr_le.f_ctl[2] = *p;
1768
1769 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1770 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1771
1772 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1773 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
1774 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1775 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1776 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1777 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1778 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1779 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1780 } else {
1781 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1782 resp->payload.ba_rjt.reason_code =
1783 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1784 /* Other bytes are zero */
1785 }
1786
1787 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1788
1789 /* Memory Barrier */
1790 wmb();
1791 if (qpair->reqq_start_iocbs)
1792 qpair->reqq_start_iocbs(qpair);
1793 else
1794 qla2x00_start_iocbs(vha, qpair->req);
1795
1796 return rc;
1797 }
1798
1799 /*
1800 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1801 */
1802 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1803 struct abts_recv_from_24xx *abts, uint32_t status,
1804 bool ids_reversed)
1805 {
1806 struct scsi_qla_host *vha = qpair->vha;
1807 struct qla_hw_data *ha = vha->hw;
1808 struct abts_resp_to_24xx *resp;
1809 uint32_t f_ctl;
1810 uint8_t *p;
1811
1812 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1813 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1814 ha, abts, status);
1815
1816 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1817 NULL);
1818 if (!resp) {
1819 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1820 "qla_target(%d): %s failed: unable to allocate "
1821 "request packet", vha->vp_idx, __func__);
1822 return;
1823 }
1824
1825 resp->entry_type = ABTS_RESP_24XX;
1826 resp->handle = QLA_TGT_SKIP_HANDLE;
1827 resp->entry_count = 1;
1828 resp->nport_handle = abts->nport_handle;
1829 resp->vp_index = vha->vp_idx;
1830 resp->sof_type = abts->sof_type;
1831 resp->exchange_address = abts->exchange_address;
1832 resp->fcp_hdr_le = abts->fcp_hdr_le;
1833 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1834 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1835 F_CTL_SEQ_INITIATIVE);
1836 p = (uint8_t *)&f_ctl;
1837 resp->fcp_hdr_le.f_ctl[0] = *p++;
1838 resp->fcp_hdr_le.f_ctl[1] = *p++;
1839 resp->fcp_hdr_le.f_ctl[2] = *p;
1840 if (ids_reversed) {
1841 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
1842 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
1843 } else {
1844 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1845 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1846 }
1847 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1848 if (status == FCP_TMF_CMPL) {
1849 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1850 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1851 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1852 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1853 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1854 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1855 } else {
1856 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1857 resp->payload.ba_rjt.reason_code =
1858 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1859 /* Other bytes are zero */
1860 }
1861
1862 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1863
1864 /* Memory Barrier */
1865 wmb();
1866 if (qpair->reqq_start_iocbs)
1867 qpair->reqq_start_iocbs(qpair);
1868 else
1869 qla2x00_start_iocbs(vha, qpair->req);
1870 }
1871
1872 /*
1873 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1874 */
1875 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1876 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1877 {
1878 struct ctio7_to_24xx *ctio;
1879 u16 tmp;
1880 struct abts_recv_from_24xx *entry;
1881
1882 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1883 if (ctio == NULL) {
1884 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1885 "qla_target(%d): %s failed: unable to allocate "
1886 "request packet\n", vha->vp_idx, __func__);
1887 return;
1888 }
1889
1890 if (mcmd)
1891 /* abts from remote port */
1892 entry = &mcmd->orig_iocb.abts;
1893 else
1894 /* abts from this driver. */
1895 entry = (struct abts_recv_from_24xx *)pkt;
1896
1897 /*
1898 * We've got on entrance firmware's response on by us generated
1899 * ABTS response. So, in it ID fields are reversed.
1900 */
1901
1902 ctio->entry_type = CTIO_TYPE7;
1903 ctio->entry_count = 1;
1904 ctio->nport_handle = entry->nport_handle;
1905 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1906 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1907 ctio->vp_index = vha->vp_idx;
1908 ctio->exchange_addr = entry->exchange_addr_to_abort;
1909 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1910
1911 if (mcmd) {
1912 ctio->initiator_id = entry->fcp_hdr_le.s_id;
1913
1914 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1915 tmp |= (mcmd->abort_io_attr << 9);
1916 else if (qpair->retry_term_cnt & 1)
1917 tmp |= (0x4 << 9);
1918 } else {
1919 ctio->initiator_id = entry->fcp_hdr_le.d_id;
1920
1921 if (qpair->retry_term_cnt & 1)
1922 tmp |= (0x4 << 9);
1923 }
1924 ctio->u.status1.flags = cpu_to_le16(tmp);
1925 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1926
1927 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1928 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1929 le16_to_cpu(ctio->u.status1.flags),
1930 le16_to_cpu(ctio->u.status1.ox_id),
1931 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1932
1933 /* Memory Barrier */
1934 wmb();
1935 if (qpair->reqq_start_iocbs)
1936 qpair->reqq_start_iocbs(qpair);
1937 else
1938 qla2x00_start_iocbs(vha, qpair->req);
1939
1940 if (mcmd)
1941 qlt_build_abts_resp_iocb(mcmd);
1942 else
1943 qlt_24xx_send_abts_resp(qpair,
1944 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
1945
1946 }
1947
1948 /* drop cmds for the given lun
1949 * XXX only looks for cmds on the port through which lun reset was recieved
1950 * XXX does not go through the list of other port (which may have cmds
1951 * for the same lun)
1952 */
1953 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
1954 {
1955 struct qla_tgt_sess_op *op;
1956 struct qla_tgt_cmd *cmd;
1957 uint32_t key;
1958 unsigned long flags;
1959
1960 key = sid_to_key(s_id);
1961 spin_lock_irqsave(&vha->cmd_list_lock, flags);
1962 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1963 uint32_t op_key;
1964 u64 op_lun;
1965
1966 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1967 op_lun = scsilun_to_int(
1968 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1969 if (op_key == key && op_lun == lun)
1970 op->aborted = true;
1971 }
1972
1973 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1974 uint32_t op_key;
1975 u64 op_lun;
1976
1977 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1978 op_lun = scsilun_to_int(
1979 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1980 if (op_key == key && op_lun == lun)
1981 op->aborted = true;
1982 }
1983
1984 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1985 uint32_t cmd_key;
1986 u64 cmd_lun;
1987
1988 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1989 cmd_lun = scsilun_to_int(
1990 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1991 if (cmd_key == key && cmd_lun == lun)
1992 cmd->aborted = 1;
1993 }
1994 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1995 }
1996
1997 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
1998 uint64_t unpacked_lun)
1999 {
2000 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2001 struct qla_qpair_hint *h = NULL;
2002
2003 if (vha->flags.qpairs_available) {
2004 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
2005 if (!h)
2006 h = &tgt->qphints[0];
2007 } else {
2008 h = &tgt->qphints[0];
2009 }
2010
2011 return h;
2012 }
2013
2014 static void qlt_do_tmr_work(struct work_struct *work)
2015 {
2016 struct qla_tgt_mgmt_cmd *mcmd =
2017 container_of(work, struct qla_tgt_mgmt_cmd, work);
2018 struct qla_hw_data *ha = mcmd->vha->hw;
2019 int rc = EIO;
2020 uint32_t tag;
2021 unsigned long flags;
2022
2023 switch (mcmd->tmr_func) {
2024 case QLA_TGT_ABTS:
2025 tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
2026 break;
2027 default:
2028 tag = 0;
2029 break;
2030 }
2031
2032 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
2033 mcmd->tmr_func, tag);
2034
2035 if (rc != 0) {
2036 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
2037 switch (mcmd->tmr_func) {
2038 case QLA_TGT_ABTS:
2039 mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
2040 qlt_build_abts_resp_iocb(mcmd);
2041 break;
2042 case QLA_TGT_LUN_RESET:
2043 case QLA_TGT_CLEAR_TS:
2044 case QLA_TGT_ABORT_TS:
2045 case QLA_TGT_CLEAR_ACA:
2046 case QLA_TGT_TARGET_RESET:
2047 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
2048 qla_sam_status);
2049 break;
2050
2051 case QLA_TGT_ABORT_ALL:
2052 case QLA_TGT_NEXUS_LOSS_SESS:
2053 case QLA_TGT_NEXUS_LOSS:
2054 qlt_send_notify_ack(mcmd->qpair,
2055 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2056 break;
2057 }
2058 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
2059
2060 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2061 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2062 mcmd->vha->vp_idx, rc);
2063 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2064 }
2065 }
2066
2067 /* ha->hardware_lock supposed to be held on entry */
2068 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2069 struct abts_recv_from_24xx *abts, struct fc_port *sess)
2070 {
2071 struct qla_hw_data *ha = vha->hw;
2072 struct qla_tgt_mgmt_cmd *mcmd;
2073 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2074
2075 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2076 "qla_target(%d): task abort (tag=%d)\n",
2077 vha->vp_idx, abts->exchange_addr_to_abort);
2078
2079 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2080 if (mcmd == NULL) {
2081 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2082 "qla_target(%d): %s: Allocation of ABORT cmd failed",
2083 vha->vp_idx, __func__);
2084 return -ENOMEM;
2085 }
2086 memset(mcmd, 0, sizeof(*mcmd));
2087 mcmd->cmd_type = TYPE_TGT_TMCMD;
2088 mcmd->sess = sess;
2089 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2090 mcmd->reset_count = ha->base_qpair->chip_reset;
2091 mcmd->tmr_func = QLA_TGT_ABTS;
2092 mcmd->qpair = h->qpair;
2093 mcmd->vha = vha;
2094
2095 /*
2096 * LUN is looked up by target-core internally based on the passed
2097 * abts->exchange_addr_to_abort tag.
2098 */
2099 mcmd->se_cmd.cpuid = h->cpuid;
2100
2101 if (ha->tgt.tgt_ops->find_cmd_by_tag) {
2102 struct qla_tgt_cmd *abort_cmd;
2103
2104 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
2105 abts->exchange_addr_to_abort);
2106 if (abort_cmd && abort_cmd->qpair) {
2107 mcmd->qpair = abort_cmd->qpair;
2108 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2109 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
2110 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2111 }
2112 }
2113
2114 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
2115 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
2116
2117 return 0;
2118 }
2119
2120 /*
2121 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2122 */
2123 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2124 struct abts_recv_from_24xx *abts)
2125 {
2126 struct qla_hw_data *ha = vha->hw;
2127 struct fc_port *sess;
2128 uint32_t tag = abts->exchange_addr_to_abort;
2129 be_id_t s_id;
2130 int rc;
2131 unsigned long flags;
2132
2133 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
2134 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2135 "qla_target(%d): ABTS: Abort Sequence not "
2136 "supported\n", vha->vp_idx);
2137 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2138 false);
2139 return;
2140 }
2141
2142 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
2143 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2144 "qla_target(%d): ABTS: Unknown Exchange "
2145 "Address received\n", vha->vp_idx);
2146 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2147 false);
2148 return;
2149 }
2150
2151 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2152 "qla_target(%d): task abort (s_id=%x:%x:%x, "
2153 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2154 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
2155 le32_to_cpu(abts->fcp_hdr_le.parameter));
2156
2157 s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
2158
2159 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2160 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2161 if (!sess) {
2162 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2163 "qla_target(%d): task abort for non-existent session\n",
2164 vha->vp_idx);
2165 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2166
2167 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2168 false);
2169 return;
2170 }
2171 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2172
2173
2174 if (sess->deleted) {
2175 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2176 false);
2177 return;
2178 }
2179
2180 rc = __qlt_24xx_handle_abts(vha, abts, sess);
2181 if (rc != 0) {
2182 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2183 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2184 vha->vp_idx, rc);
2185 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2186 false);
2187 return;
2188 }
2189 }
2190
2191 /*
2192 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2193 */
2194 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2195 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2196 {
2197 struct scsi_qla_host *ha = mcmd->vha;
2198 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2199 struct ctio7_to_24xx *ctio;
2200 uint16_t temp;
2201
2202 ql_dbg(ql_dbg_tgt, ha, 0xe008,
2203 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2204 ha, atio, resp_code);
2205
2206
2207 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2208 if (ctio == NULL) {
2209 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2210 "qla_target(%d): %s failed: unable to allocate "
2211 "request packet\n", ha->vp_idx, __func__);
2212 return;
2213 }
2214
2215 ctio->entry_type = CTIO_TYPE7;
2216 ctio->entry_count = 1;
2217 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2218 ctio->nport_handle = mcmd->sess->loop_id;
2219 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2220 ctio->vp_index = ha->vp_idx;
2221 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2222 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2223 temp = (atio->u.isp24.attr << 9)|
2224 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2225 ctio->u.status1.flags = cpu_to_le16(temp);
2226 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2227 ctio->u.status1.ox_id = cpu_to_le16(temp);
2228 ctio->u.status1.scsi_status =
2229 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2230 ctio->u.status1.response_len = cpu_to_le16(8);
2231 ctio->u.status1.sense_data[0] = resp_code;
2232
2233 /* Memory Barrier */
2234 wmb();
2235 if (qpair->reqq_start_iocbs)
2236 qpair->reqq_start_iocbs(qpair);
2237 else
2238 qla2x00_start_iocbs(ha, qpair->req);
2239 }
2240
2241 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2242 {
2243 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2244 }
2245 EXPORT_SYMBOL(qlt_free_mcmd);
2246
2247 /*
2248 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2249 * reacquire
2250 */
2251 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2252 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2253 {
2254 struct atio_from_isp *atio = &cmd->atio;
2255 struct ctio7_to_24xx *ctio;
2256 uint16_t temp;
2257 struct scsi_qla_host *vha = cmd->vha;
2258
2259 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2260 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2261 "sense_key=%02x, asc=%02x, ascq=%02x",
2262 vha, atio, scsi_status, sense_key, asc, ascq);
2263
2264 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2265 if (!ctio) {
2266 ql_dbg(ql_dbg_async, vha, 0x3067,
2267 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2268 vha->host_no, __func__);
2269 goto out;
2270 }
2271
2272 ctio->entry_type = CTIO_TYPE7;
2273 ctio->entry_count = 1;
2274 ctio->handle = QLA_TGT_SKIP_HANDLE;
2275 ctio->nport_handle = cmd->sess->loop_id;
2276 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2277 ctio->vp_index = vha->vp_idx;
2278 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2279 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2280 temp = (atio->u.isp24.attr << 9) |
2281 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2282 ctio->u.status1.flags = cpu_to_le16(temp);
2283 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2284 ctio->u.status1.ox_id = cpu_to_le16(temp);
2285 ctio->u.status1.scsi_status =
2286 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2287 ctio->u.status1.response_len = cpu_to_le16(18);
2288 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2289
2290 if (ctio->u.status1.residual != 0)
2291 ctio->u.status1.scsi_status |=
2292 cpu_to_le16(SS_RESIDUAL_UNDER);
2293
2294 /* Fixed format sense data. */
2295 ctio->u.status1.sense_data[0] = 0x70;
2296 ctio->u.status1.sense_data[2] = sense_key;
2297 /* Additional sense length */
2298 ctio->u.status1.sense_data[7] = 0xa;
2299 /* ASC and ASCQ */
2300 ctio->u.status1.sense_data[12] = asc;
2301 ctio->u.status1.sense_data[13] = ascq;
2302
2303 /* Memory Barrier */
2304 wmb();
2305
2306 if (qpair->reqq_start_iocbs)
2307 qpair->reqq_start_iocbs(qpair);
2308 else
2309 qla2x00_start_iocbs(vha, qpair->req);
2310
2311 out:
2312 return;
2313 }
2314
2315 /* callback from target fabric module code */
2316 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2317 {
2318 struct scsi_qla_host *vha = mcmd->sess->vha;
2319 struct qla_hw_data *ha = vha->hw;
2320 unsigned long flags;
2321 struct qla_qpair *qpair = mcmd->qpair;
2322 bool free_mcmd = true;
2323
2324 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2325 "TM response mcmd (%p) status %#x state %#x",
2326 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2327
2328 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2329
2330 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2331 /*
2332 * Either the port is not online or this request was from
2333 * previous life, just abort the processing.
2334 */
2335 ql_dbg(ql_dbg_async, vha, 0xe100,
2336 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2337 vha->flags.online, qla2x00_reset_active(vha),
2338 mcmd->reset_count, qpair->chip_reset);
2339 ha->tgt.tgt_ops->free_mcmd(mcmd);
2340 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2341 return;
2342 }
2343
2344 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2345 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
2346 case ELS_LOGO:
2347 case ELS_PRLO:
2348 case ELS_TPRLO:
2349 ql_dbg(ql_dbg_disc, vha, 0x2106,
2350 "TM response logo %8phC status %#x state %#x",
2351 mcmd->sess->port_name, mcmd->fc_tm_rsp,
2352 mcmd->flags);
2353 qlt_schedule_sess_for_deletion(mcmd->sess);
2354 break;
2355 default:
2356 qlt_send_notify_ack(vha->hw->base_qpair,
2357 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2358 break;
2359 }
2360 } else {
2361 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
2362 qlt_build_abts_resp_iocb(mcmd);
2363 free_mcmd = false;
2364 } else
2365 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2366 mcmd->fc_tm_rsp);
2367 }
2368 /*
2369 * Make the callback for ->free_mcmd() to queue_work() and invoke
2370 * target_put_sess_cmd() to drop cmd_kref to 1. The final
2371 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2372 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2373 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2374 * qlt_xmit_tm_rsp() returns here..
2375 */
2376 if (free_mcmd)
2377 ha->tgt.tgt_ops->free_mcmd(mcmd);
2378
2379 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2380 }
2381 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2382
2383 /* No locks */
2384 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2385 {
2386 struct qla_tgt_cmd *cmd = prm->cmd;
2387
2388 BUG_ON(cmd->sg_cnt == 0);
2389
2390 prm->sg = (struct scatterlist *)cmd->sg;
2391 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2392 cmd->sg_cnt, cmd->dma_data_direction);
2393 if (unlikely(prm->seg_cnt == 0))
2394 goto out_err;
2395
2396 prm->cmd->sg_mapped = 1;
2397
2398 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2399 /*
2400 * If greater than four sg entries then we need to allocate
2401 * the continuation entries
2402 */
2403 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2404 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2405 QLA_TGT_DATASEGS_PER_CMD_24XX,
2406 QLA_TGT_DATASEGS_PER_CONT_24XX);
2407 } else {
2408 /* DIF */
2409 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2410 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2411 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2412 prm->tot_dsds = prm->seg_cnt;
2413 } else
2414 prm->tot_dsds = prm->seg_cnt;
2415
2416 if (cmd->prot_sg_cnt) {
2417 prm->prot_sg = cmd->prot_sg;
2418 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2419 cmd->prot_sg, cmd->prot_sg_cnt,
2420 cmd->dma_data_direction);
2421 if (unlikely(prm->prot_seg_cnt == 0))
2422 goto out_err;
2423
2424 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2425 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2426 /* Dif Bundling not support here */
2427 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2428 cmd->blk_sz);
2429 prm->tot_dsds += prm->prot_seg_cnt;
2430 } else
2431 prm->tot_dsds += prm->prot_seg_cnt;
2432 }
2433 }
2434
2435 return 0;
2436
2437 out_err:
2438 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2439 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2440 0, prm->cmd->sg_cnt);
2441 return -1;
2442 }
2443
2444 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2445 {
2446 struct qla_hw_data *ha;
2447 struct qla_qpair *qpair;
2448
2449 if (!cmd->sg_mapped)
2450 return;
2451
2452 qpair = cmd->qpair;
2453
2454 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2455 cmd->dma_data_direction);
2456 cmd->sg_mapped = 0;
2457
2458 if (cmd->prot_sg_cnt)
2459 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2460 cmd->dma_data_direction);
2461
2462 if (!cmd->ctx)
2463 return;
2464 ha = vha->hw;
2465 if (cmd->ctx_dsd_alloced)
2466 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2467
2468 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2469 }
2470
2471 static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2472 uint32_t req_cnt)
2473 {
2474 uint32_t cnt;
2475 struct req_que *req = qpair->req;
2476
2477 if (req->cnt < (req_cnt + 2)) {
2478 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2479 RD_REG_DWORD_RELAXED(req->req_q_out));
2480
2481 if (req->ring_index < cnt)
2482 req->cnt = cnt - req->ring_index;
2483 else
2484 req->cnt = req->length - (req->ring_index - cnt);
2485
2486 if (unlikely(req->cnt < (req_cnt + 2)))
2487 return -EAGAIN;
2488 }
2489
2490 req->cnt -= req_cnt;
2491
2492 return 0;
2493 }
2494
2495 /*
2496 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2497 */
2498 static inline void *qlt_get_req_pkt(struct req_que *req)
2499 {
2500 /* Adjust ring index. */
2501 req->ring_index++;
2502 if (req->ring_index == req->length) {
2503 req->ring_index = 0;
2504 req->ring_ptr = req->ring;
2505 } else {
2506 req->ring_ptr++;
2507 }
2508 return (cont_entry_t *)req->ring_ptr;
2509 }
2510
2511 /* ha->hardware_lock supposed to be held on entry */
2512 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2513 {
2514 uint32_t h;
2515 int index;
2516 uint8_t found = 0;
2517 struct req_que *req = qpair->req;
2518
2519 h = req->current_outstanding_cmd;
2520
2521 for (index = 1; index < req->num_outstanding_cmds; index++) {
2522 h++;
2523 if (h == req->num_outstanding_cmds)
2524 h = 1;
2525
2526 if (h == QLA_TGT_SKIP_HANDLE)
2527 continue;
2528
2529 if (!req->outstanding_cmds[h]) {
2530 found = 1;
2531 break;
2532 }
2533 }
2534
2535 if (found) {
2536 req->current_outstanding_cmd = h;
2537 } else {
2538 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2539 "qla_target(%d): Ran out of empty cmd slots\n",
2540 qpair->vha->vp_idx);
2541 h = QLA_TGT_NULL_HANDLE;
2542 }
2543
2544 return h;
2545 }
2546
2547 /* ha->hardware_lock supposed to be held on entry */
2548 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2549 struct qla_tgt_prm *prm)
2550 {
2551 uint32_t h;
2552 struct ctio7_to_24xx *pkt;
2553 struct atio_from_isp *atio = &prm->cmd->atio;
2554 uint16_t temp;
2555
2556 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2557 prm->pkt = pkt;
2558 memset(pkt, 0, sizeof(*pkt));
2559
2560 pkt->entry_type = CTIO_TYPE7;
2561 pkt->entry_count = (uint8_t)prm->req_cnt;
2562 pkt->vp_index = prm->cmd->vp_idx;
2563
2564 h = qlt_make_handle(qpair);
2565 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2566 /*
2567 * CTIO type 7 from the firmware doesn't provide a way to
2568 * know the initiator's LOOP ID, hence we can't find
2569 * the session and, so, the command.
2570 */
2571 return -EAGAIN;
2572 } else
2573 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2574
2575 pkt->handle = MAKE_HANDLE(qpair->req->id, h);
2576 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2577 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2578 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2579 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2580 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2581 temp = atio->u.isp24.attr << 9;
2582 pkt->u.status0.flags |= cpu_to_le16(temp);
2583 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2584 pkt->u.status0.ox_id = cpu_to_le16(temp);
2585 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2586
2587 return 0;
2588 }
2589
2590 /*
2591 * ha->hardware_lock supposed to be held on entry. We have already made sure
2592 * that there is sufficient amount of request entries to not drop it.
2593 */
2594 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2595 {
2596 int cnt;
2597 struct dsd64 *cur_dsd;
2598
2599 /* Build continuation packets */
2600 while (prm->seg_cnt > 0) {
2601 cont_a64_entry_t *cont_pkt64 =
2602 (cont_a64_entry_t *)qlt_get_req_pkt(
2603 prm->cmd->qpair->req);
2604
2605 /*
2606 * Make sure that from cont_pkt64 none of
2607 * 64-bit specific fields used for 32-bit
2608 * addressing. Cast to (cont_entry_t *) for
2609 * that.
2610 */
2611
2612 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2613
2614 cont_pkt64->entry_count = 1;
2615 cont_pkt64->sys_define = 0;
2616
2617 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2618 cur_dsd = cont_pkt64->dsd;
2619
2620 /* Load continuation entry data segments */
2621 for (cnt = 0;
2622 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2623 cnt++, prm->seg_cnt--) {
2624 append_dsd64(&cur_dsd, prm->sg);
2625 prm->sg = sg_next(prm->sg);
2626 }
2627 }
2628 }
2629
2630 /*
2631 * ha->hardware_lock supposed to be held on entry. We have already made sure
2632 * that there is sufficient amount of request entries to not drop it.
2633 */
2634 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2635 {
2636 int cnt;
2637 struct dsd64 *cur_dsd;
2638 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2639
2640 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2641
2642 /* Setup packet address segment pointer */
2643 cur_dsd = &pkt24->u.status0.dsd;
2644
2645 /* Set total data segment count */
2646 if (prm->seg_cnt)
2647 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2648
2649 if (prm->seg_cnt == 0) {
2650 /* No data transfer */
2651 cur_dsd->address = 0;
2652 cur_dsd->length = 0;
2653 return;
2654 }
2655
2656 /* If scatter gather */
2657
2658 /* Load command entry data segments */
2659 for (cnt = 0;
2660 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2661 cnt++, prm->seg_cnt--) {
2662 append_dsd64(&cur_dsd, prm->sg);
2663 prm->sg = sg_next(prm->sg);
2664 }
2665
2666 qlt_load_cont_data_segments(prm);
2667 }
2668
2669 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2670 {
2671 return cmd->bufflen > 0;
2672 }
2673
2674 static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2675 {
2676 struct qla_tgt_cmd *cmd;
2677 struct scsi_qla_host *vha;
2678
2679 /* asc 0x10=dif error */
2680 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2681 cmd = prm->cmd;
2682 vha = cmd->vha;
2683 /* ASCQ */
2684 switch (prm->sense_buffer[13]) {
2685 case 1:
2686 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2687 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2688 "se_cmd=%p tag[%x]",
2689 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2690 cmd->atio.u.isp24.exchange_addr);
2691 break;
2692 case 2:
2693 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2694 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2695 "se_cmd=%p tag[%x]",
2696 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2697 cmd->atio.u.isp24.exchange_addr);
2698 break;
2699 case 3:
2700 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2701 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2702 "se_cmd=%p tag[%x]",
2703 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2704 cmd->atio.u.isp24.exchange_addr);
2705 break;
2706 default:
2707 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2708 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2709 "se_cmd=%p tag[%x]",
2710 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2711 cmd->atio.u.isp24.exchange_addr);
2712 break;
2713 }
2714 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2715 }
2716 }
2717
2718 /*
2719 * Called without ha->hardware_lock held
2720 */
2721 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2722 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2723 uint32_t *full_req_cnt)
2724 {
2725 struct se_cmd *se_cmd = &cmd->se_cmd;
2726 struct qla_qpair *qpair = cmd->qpair;
2727
2728 prm->cmd = cmd;
2729 prm->tgt = cmd->tgt;
2730 prm->pkt = NULL;
2731 prm->rq_result = scsi_status;
2732 prm->sense_buffer = &cmd->sense_buffer[0];
2733 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2734 prm->sg = NULL;
2735 prm->seg_cnt = -1;
2736 prm->req_cnt = 1;
2737 prm->residual = 0;
2738 prm->add_status_pkt = 0;
2739 prm->prot_sg = NULL;
2740 prm->prot_seg_cnt = 0;
2741 prm->tot_dsds = 0;
2742
2743 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2744 if (qlt_pci_map_calc_cnt(prm) != 0)
2745 return -EAGAIN;
2746 }
2747
2748 *full_req_cnt = prm->req_cnt;
2749
2750 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2751 prm->residual = se_cmd->residual_count;
2752 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2753 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2754 prm->residual, se_cmd->tag,
2755 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2756 cmd->bufflen, prm->rq_result);
2757 prm->rq_result |= SS_RESIDUAL_UNDER;
2758 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2759 prm->residual = se_cmd->residual_count;
2760 ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2761 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2762 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2763 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2764 prm->rq_result |= SS_RESIDUAL_OVER;
2765 }
2766
2767 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2768 /*
2769 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2770 * ignored in *xmit_response() below
2771 */
2772 if (qlt_has_data(cmd)) {
2773 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2774 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2775 (prm->rq_result != 0))) {
2776 prm->add_status_pkt = 1;
2777 (*full_req_cnt)++;
2778 }
2779 }
2780 }
2781
2782 return 0;
2783 }
2784
2785 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2786 int sending_sense)
2787 {
2788 if (cmd->qpair->enable_class_2)
2789 return 0;
2790
2791 if (sending_sense)
2792 return cmd->conf_compl_supported;
2793 else
2794 return cmd->qpair->enable_explicit_conf &&
2795 cmd->conf_compl_supported;
2796 }
2797
2798 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2799 struct qla_tgt_prm *prm)
2800 {
2801 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2802 (uint32_t)sizeof(ctio->u.status1.sense_data));
2803 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2804 if (qlt_need_explicit_conf(prm->cmd, 0)) {
2805 ctio->u.status0.flags |= cpu_to_le16(
2806 CTIO7_FLAGS_EXPLICIT_CONFORM |
2807 CTIO7_FLAGS_CONFORM_REQ);
2808 }
2809 ctio->u.status0.residual = cpu_to_le32(prm->residual);
2810 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2811 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2812 int i;
2813
2814 if (qlt_need_explicit_conf(prm->cmd, 1)) {
2815 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2816 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2817 "Skipping EXPLICIT_CONFORM and "
2818 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2819 "non GOOD status\n");
2820 goto skip_explict_conf;
2821 }
2822 ctio->u.status1.flags |= cpu_to_le16(
2823 CTIO7_FLAGS_EXPLICIT_CONFORM |
2824 CTIO7_FLAGS_CONFORM_REQ);
2825 }
2826 skip_explict_conf:
2827 ctio->u.status1.flags &=
2828 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2829 ctio->u.status1.flags |=
2830 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2831 ctio->u.status1.scsi_status |=
2832 cpu_to_le16(SS_SENSE_LEN_VALID);
2833 ctio->u.status1.sense_length =
2834 cpu_to_le16(prm->sense_buffer_len);
2835 for (i = 0; i < prm->sense_buffer_len/4; i++)
2836 ((uint32_t *)ctio->u.status1.sense_data)[i] =
2837 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2838
2839 qlt_print_dif_err(prm);
2840
2841 } else {
2842 ctio->u.status1.flags &=
2843 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2844 ctio->u.status1.flags |=
2845 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2846 ctio->u.status1.sense_length = 0;
2847 memset(ctio->u.status1.sense_data, 0,
2848 sizeof(ctio->u.status1.sense_data));
2849 }
2850
2851 /* Sense with len > 24, is it possible ??? */
2852 }
2853
2854 static inline int
2855 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2856 {
2857 switch (se_cmd->prot_op) {
2858 case TARGET_PROT_DOUT_INSERT:
2859 case TARGET_PROT_DIN_STRIP:
2860 if (ql2xenablehba_err_chk >= 1)
2861 return 1;
2862 break;
2863 case TARGET_PROT_DOUT_PASS:
2864 case TARGET_PROT_DIN_PASS:
2865 if (ql2xenablehba_err_chk >= 2)
2866 return 1;
2867 break;
2868 case TARGET_PROT_DIN_INSERT:
2869 case TARGET_PROT_DOUT_STRIP:
2870 return 1;
2871 default:
2872 break;
2873 }
2874 return 0;
2875 }
2876
2877 static inline int
2878 qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2879 {
2880 switch (se_cmd->prot_op) {
2881 case TARGET_PROT_DIN_INSERT:
2882 case TARGET_PROT_DOUT_INSERT:
2883 case TARGET_PROT_DIN_STRIP:
2884 case TARGET_PROT_DOUT_STRIP:
2885 case TARGET_PROT_DIN_PASS:
2886 case TARGET_PROT_DOUT_PASS:
2887 return 1;
2888 default:
2889 return 0;
2890 }
2891 return 0;
2892 }
2893
2894 /*
2895 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2896 */
2897 static void
2898 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2899 uint16_t *pfw_prot_opts)
2900 {
2901 struct se_cmd *se_cmd = &cmd->se_cmd;
2902 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2903 scsi_qla_host_t *vha = cmd->tgt->vha;
2904 struct qla_hw_data *ha = vha->hw;
2905 uint32_t t32 = 0;
2906
2907 /*
2908 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2909 * have been immplemented by TCM, before AppTag is avail.
2910 * Look for modesense_handlers[]
2911 */
2912 ctx->app_tag = 0;
2913 ctx->app_tag_mask[0] = 0x0;
2914 ctx->app_tag_mask[1] = 0x0;
2915
2916 if (IS_PI_UNINIT_CAPABLE(ha)) {
2917 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2918 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2919 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2920 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2921 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2922 }
2923
2924 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2925
2926 switch (se_cmd->prot_type) {
2927 case TARGET_DIF_TYPE0_PROT:
2928 /*
2929 * No check for ql2xenablehba_err_chk, as it
2930 * would be an I/O error if hba tag generation
2931 * is not done.
2932 */
2933 ctx->ref_tag = cpu_to_le32(lba);
2934 /* enable ALL bytes of the ref tag */
2935 ctx->ref_tag_mask[0] = 0xff;
2936 ctx->ref_tag_mask[1] = 0xff;
2937 ctx->ref_tag_mask[2] = 0xff;
2938 ctx->ref_tag_mask[3] = 0xff;
2939 break;
2940 case TARGET_DIF_TYPE1_PROT:
2941 /*
2942 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2943 * REF tag, and 16 bit app tag.
2944 */
2945 ctx->ref_tag = cpu_to_le32(lba);
2946 if (!qla_tgt_ref_mask_check(se_cmd) ||
2947 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2948 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2949 break;
2950 }
2951 /* enable ALL bytes of the ref tag */
2952 ctx->ref_tag_mask[0] = 0xff;
2953 ctx->ref_tag_mask[1] = 0xff;
2954 ctx->ref_tag_mask[2] = 0xff;
2955 ctx->ref_tag_mask[3] = 0xff;
2956 break;
2957 case TARGET_DIF_TYPE2_PROT:
2958 /*
2959 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2960 * tag has to match LBA in CDB + N
2961 */
2962 ctx->ref_tag = cpu_to_le32(lba);
2963 if (!qla_tgt_ref_mask_check(se_cmd) ||
2964 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2965 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2966 break;
2967 }
2968 /* enable ALL bytes of the ref tag */
2969 ctx->ref_tag_mask[0] = 0xff;
2970 ctx->ref_tag_mask[1] = 0xff;
2971 ctx->ref_tag_mask[2] = 0xff;
2972 ctx->ref_tag_mask[3] = 0xff;
2973 break;
2974 case TARGET_DIF_TYPE3_PROT:
2975 /* For TYPE 3 protection: 16 bit GUARD only */
2976 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2977 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2978 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2979 break;
2980 }
2981 }
2982
2983 static inline int
2984 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
2985 {
2986 struct dsd64 *cur_dsd;
2987 uint32_t transfer_length = 0;
2988 uint32_t data_bytes;
2989 uint32_t dif_bytes;
2990 uint8_t bundling = 1;
2991 struct crc_context *crc_ctx_pkt = NULL;
2992 struct qla_hw_data *ha;
2993 struct ctio_crc2_to_fw *pkt;
2994 dma_addr_t crc_ctx_dma;
2995 uint16_t fw_prot_opts = 0;
2996 struct qla_tgt_cmd *cmd = prm->cmd;
2997 struct se_cmd *se_cmd = &cmd->se_cmd;
2998 uint32_t h;
2999 struct atio_from_isp *atio = &prm->cmd->atio;
3000 struct qla_tc_param tc;
3001 uint16_t t16;
3002 scsi_qla_host_t *vha = cmd->vha;
3003
3004 ha = vha->hw;
3005
3006 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
3007 prm->pkt = pkt;
3008 memset(pkt, 0, sizeof(*pkt));
3009
3010 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3011 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3012 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3013 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
3014
3015 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
3016 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
3017 bundling = 0;
3018
3019 /* Compute dif len and adjust data len to incude protection */
3020 data_bytes = cmd->bufflen;
3021 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
3022
3023 switch (se_cmd->prot_op) {
3024 case TARGET_PROT_DIN_INSERT:
3025 case TARGET_PROT_DOUT_STRIP:
3026 transfer_length = data_bytes;
3027 if (cmd->prot_sg_cnt)
3028 data_bytes += dif_bytes;
3029 break;
3030 case TARGET_PROT_DIN_STRIP:
3031 case TARGET_PROT_DOUT_INSERT:
3032 case TARGET_PROT_DIN_PASS:
3033 case TARGET_PROT_DOUT_PASS:
3034 transfer_length = data_bytes + dif_bytes;
3035 break;
3036 default:
3037 BUG();
3038 break;
3039 }
3040
3041 if (!qlt_hba_err_chk_enabled(se_cmd))
3042 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
3043 /* HBA error checking enabled */
3044 else if (IS_PI_UNINIT_CAPABLE(ha)) {
3045 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
3046 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
3047 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
3048 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
3049 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
3050 }
3051
3052 switch (se_cmd->prot_op) {
3053 case TARGET_PROT_DIN_INSERT:
3054 case TARGET_PROT_DOUT_INSERT:
3055 fw_prot_opts |= PO_MODE_DIF_INSERT;
3056 break;
3057 case TARGET_PROT_DIN_STRIP:
3058 case TARGET_PROT_DOUT_STRIP:
3059 fw_prot_opts |= PO_MODE_DIF_REMOVE;
3060 break;
3061 case TARGET_PROT_DIN_PASS:
3062 case TARGET_PROT_DOUT_PASS:
3063 fw_prot_opts |= PO_MODE_DIF_PASS;
3064 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3065 break;
3066 default:/* Normal Request */
3067 fw_prot_opts |= PO_MODE_DIF_PASS;
3068 break;
3069 }
3070
3071 /* ---- PKT ---- */
3072 /* Update entry type to indicate Command Type CRC_2 IOCB */
3073 pkt->entry_type = CTIO_CRC2;
3074 pkt->entry_count = 1;
3075 pkt->vp_index = cmd->vp_idx;
3076
3077 h = qlt_make_handle(qpair);
3078 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
3079 /*
3080 * CTIO type 7 from the firmware doesn't provide a way to
3081 * know the initiator's LOOP ID, hence we can't find
3082 * the session and, so, the command.
3083 */
3084 return -EAGAIN;
3085 } else
3086 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3087
3088 pkt->handle = MAKE_HANDLE(qpair->req->id, h);
3089 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3090 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3091 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3092 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3093 pkt->exchange_addr = atio->u.isp24.exchange_addr;
3094
3095 /* silence compile warning */
3096 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3097 pkt->ox_id = cpu_to_le16(t16);
3098
3099 t16 = (atio->u.isp24.attr << 9);
3100 pkt->flags |= cpu_to_le16(t16);
3101 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
3102
3103 /* Set transfer direction */
3104 if (cmd->dma_data_direction == DMA_TO_DEVICE)
3105 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
3106 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3107 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
3108
3109 pkt->dseg_count = prm->tot_dsds;
3110 /* Fibre channel byte count */
3111 pkt->transfer_length = cpu_to_le32(transfer_length);
3112
3113 /* ----- CRC context -------- */
3114
3115 /* Allocate CRC context from global pool */
3116 crc_ctx_pkt = cmd->ctx =
3117 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3118
3119 if (!crc_ctx_pkt)
3120 goto crc_queuing_error;
3121
3122 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
3123 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
3124
3125 /* Set handle */
3126 crc_ctx_pkt->handle = pkt->handle;
3127
3128 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3129
3130 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
3131 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
3132
3133 if (!bundling) {
3134 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
3135 } else {
3136 /*
3137 * Configure Bundling if we need to fetch interlaving
3138 * protection PCI accesses
3139 */
3140 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
3141 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3142 crc_ctx_pkt->u.bundling.dseg_count =
3143 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3144 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
3145 }
3146
3147 /* Finish the common fields of CRC pkt */
3148 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
3149 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
3150 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3151 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3152
3153 memset((uint8_t *)&tc, 0 , sizeof(tc));
3154 tc.vha = vha;
3155 tc.blk_sz = cmd->blk_sz;
3156 tc.bufflen = cmd->bufflen;
3157 tc.sg = cmd->sg;
3158 tc.prot_sg = cmd->prot_sg;
3159 tc.ctx = crc_ctx_pkt;
3160 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3161
3162 /* Walks data segments */
3163 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3164
3165 if (!bundling && prm->prot_seg_cnt) {
3166 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3167 prm->tot_dsds, &tc))
3168 goto crc_queuing_error;
3169 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3170 (prm->tot_dsds - prm->prot_seg_cnt), &tc))
3171 goto crc_queuing_error;
3172
3173 if (bundling && prm->prot_seg_cnt) {
3174 /* Walks dif segments */
3175 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3176
3177 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
3178 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3179 prm->prot_seg_cnt, cmd))
3180 goto crc_queuing_error;
3181 }
3182 return QLA_SUCCESS;
3183
3184 crc_queuing_error:
3185 /* Cleanup will be performed by the caller */
3186 qpair->req->outstanding_cmds[h] = NULL;
3187
3188 return QLA_FUNCTION_FAILED;
3189 }
3190
3191 /*
3192 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3193 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3194 */
3195 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3196 uint8_t scsi_status)
3197 {
3198 struct scsi_qla_host *vha = cmd->vha;
3199 struct qla_qpair *qpair = cmd->qpair;
3200 struct ctio7_to_24xx *pkt;
3201 struct qla_tgt_prm prm;
3202 uint32_t full_req_cnt = 0;
3203 unsigned long flags = 0;
3204 int res;
3205
3206 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3207 (cmd->sess && cmd->sess->deleted)) {
3208 cmd->state = QLA_TGT_STATE_PROCESSED;
3209 res = 0;
3210 goto free;
3211 }
3212
3213 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3214 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3215 (xmit_type & QLA_TGT_XMIT_STATUS) ?
3216 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3217 &cmd->se_cmd, qpair->id);
3218
3219 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3220 &full_req_cnt);
3221 if (unlikely(res != 0))
3222 goto free;
3223
3224 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3225
3226 if (xmit_type == QLA_TGT_XMIT_STATUS)
3227 qpair->tgt_counters.core_qla_snd_status++;
3228 else
3229 qpair->tgt_counters.core_qla_que_buf++;
3230
3231 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3232 /*
3233 * Either the port is not online or this request was from
3234 * previous life, just abort the processing.
3235 */
3236 cmd->state = QLA_TGT_STATE_PROCESSED;
3237 ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3238 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3239 vha->flags.online, qla2x00_reset_active(vha),
3240 cmd->reset_count, qpair->chip_reset);
3241 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3242 res = 0;
3243 goto free;
3244 }
3245
3246 /* Does F/W have an IOCBs for this request */
3247 res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3248 if (unlikely(res))
3249 goto out_unmap_unlock;
3250
3251 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3252 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3253 else
3254 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3255 if (unlikely(res != 0)) {
3256 qpair->req->cnt += full_req_cnt;
3257 goto out_unmap_unlock;
3258 }
3259
3260 pkt = (struct ctio7_to_24xx *)prm.pkt;
3261
3262 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3263 pkt->u.status0.flags |=
3264 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3265 CTIO7_FLAGS_STATUS_MODE_0);
3266
3267 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3268 qlt_load_data_segments(&prm);
3269
3270 if (prm.add_status_pkt == 0) {
3271 if (xmit_type & QLA_TGT_XMIT_STATUS) {
3272 pkt->u.status0.scsi_status =
3273 cpu_to_le16(prm.rq_result);
3274 pkt->u.status0.residual =
3275 cpu_to_le32(prm.residual);
3276 pkt->u.status0.flags |= cpu_to_le16(
3277 CTIO7_FLAGS_SEND_STATUS);
3278 if (qlt_need_explicit_conf(cmd, 0)) {
3279 pkt->u.status0.flags |=
3280 cpu_to_le16(
3281 CTIO7_FLAGS_EXPLICIT_CONFORM |
3282 CTIO7_FLAGS_CONFORM_REQ);
3283 }
3284 }
3285
3286 } else {
3287 /*
3288 * We have already made sure that there is sufficient
3289 * amount of request entries to not drop HW lock in
3290 * req_pkt().
3291 */
3292 struct ctio7_to_24xx *ctio =
3293 (struct ctio7_to_24xx *)qlt_get_req_pkt(
3294 qpair->req);
3295
3296 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3297 "Building additional status packet 0x%p.\n",
3298 ctio);
3299
3300 /*
3301 * T10Dif: ctio_crc2_to_fw overlay ontop of
3302 * ctio7_to_24xx
3303 */
3304 memcpy(ctio, pkt, sizeof(*ctio));
3305 /* reset back to CTIO7 */
3306 ctio->entry_count = 1;
3307 ctio->entry_type = CTIO_TYPE7;
3308 ctio->dseg_count = 0;
3309 ctio->u.status1.flags &= ~cpu_to_le16(
3310 CTIO7_FLAGS_DATA_IN);
3311
3312 /* Real finish is ctio_m1's finish */
3313 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3314 pkt->u.status0.flags |= cpu_to_le16(
3315 CTIO7_FLAGS_DONT_RET_CTIO);
3316
3317 /* qlt_24xx_init_ctio_to_isp will correct
3318 * all neccessary fields that's part of CTIO7.
3319 * There should be no residual of CTIO-CRC2 data.
3320 */
3321 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3322 &prm);
3323 }
3324 } else
3325 qlt_24xx_init_ctio_to_isp(pkt, &prm);
3326
3327
3328 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3329 cmd->cmd_sent_to_fw = 1;
3330 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3331
3332 /* Memory Barrier */
3333 wmb();
3334 if (qpair->reqq_start_iocbs)
3335 qpair->reqq_start_iocbs(qpair);
3336 else
3337 qla2x00_start_iocbs(vha, qpair->req);
3338 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3339
3340 return 0;
3341
3342 out_unmap_unlock:
3343 qlt_unmap_sg(vha, cmd);
3344 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3345
3346 free:
3347 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3348 return res;
3349 }
3350 EXPORT_SYMBOL(qlt_xmit_response);
3351
3352 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3353 {
3354 struct ctio7_to_24xx *pkt;
3355 struct scsi_qla_host *vha = cmd->vha;
3356 struct qla_tgt *tgt = cmd->tgt;
3357 struct qla_tgt_prm prm;
3358 unsigned long flags = 0;
3359 int res = 0;
3360 struct qla_qpair *qpair = cmd->qpair;
3361
3362 memset(&prm, 0, sizeof(prm));
3363 prm.cmd = cmd;
3364 prm.tgt = tgt;
3365 prm.sg = NULL;
3366 prm.req_cnt = 1;
3367
3368 /* Calculate number of entries and segments required */
3369 if (qlt_pci_map_calc_cnt(&prm) != 0)
3370 return -EAGAIN;
3371
3372 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3373 (cmd->sess && cmd->sess->deleted)) {
3374 /*
3375 * Either the port is not online or this request was from
3376 * previous life, just abort the processing.
3377 */
3378 cmd->aborted = 1;
3379 cmd->write_data_transferred = 0;
3380 cmd->state = QLA_TGT_STATE_DATA_IN;
3381 vha->hw->tgt.tgt_ops->handle_data(cmd);
3382 ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3383 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3384 vha->flags.online, qla2x00_reset_active(vha),
3385 cmd->reset_count, qpair->chip_reset);
3386 return 0;
3387 }
3388
3389 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3390 /* Does F/W have an IOCBs for this request */
3391 res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3392 if (res != 0)
3393 goto out_unlock_free_unmap;
3394 if (cmd->se_cmd.prot_op)
3395 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3396 else
3397 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3398
3399 if (unlikely(res != 0)) {
3400 qpair->req->cnt += prm.req_cnt;
3401 goto out_unlock_free_unmap;
3402 }
3403
3404 pkt = (struct ctio7_to_24xx *)prm.pkt;
3405 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3406 CTIO7_FLAGS_STATUS_MODE_0);
3407
3408 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3409 qlt_load_data_segments(&prm);
3410
3411 cmd->state = QLA_TGT_STATE_NEED_DATA;
3412 cmd->cmd_sent_to_fw = 1;
3413 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3414
3415 /* Memory Barrier */
3416 wmb();
3417 if (qpair->reqq_start_iocbs)
3418 qpair->reqq_start_iocbs(qpair);
3419 else
3420 qla2x00_start_iocbs(vha, qpair->req);
3421 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3422
3423 return res;
3424
3425 out_unlock_free_unmap:
3426 qlt_unmap_sg(vha, cmd);
3427 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3428
3429 return res;
3430 }
3431 EXPORT_SYMBOL(qlt_rdy_to_xfer);
3432
3433
3434 /*
3435 * it is assumed either hardware_lock or qpair lock is held.
3436 */
3437 static void
3438 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3439 struct ctio_crc_from_fw *sts)
3440 {
3441 uint8_t *ap = &sts->actual_dif[0];
3442 uint8_t *ep = &sts->expected_dif[0];
3443 uint64_t lba = cmd->se_cmd.t_task_lba;
3444 uint8_t scsi_status, sense_key, asc, ascq;
3445 unsigned long flags;
3446 struct scsi_qla_host *vha = cmd->vha;
3447
3448 cmd->trc_flags |= TRC_DIF_ERR;
3449
3450 cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
3451 cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
3452 cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3453
3454 cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
3455 cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
3456 cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3457
3458 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3459 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3460
3461 scsi_status = sense_key = asc = ascq = 0;
3462
3463 /* check appl tag */
3464 if (cmd->e_app_tag != cmd->a_app_tag) {
3465 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3466 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3467 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3468 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3469 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3470 cmd->atio.u.isp24.fcp_hdr.ox_id);
3471
3472 cmd->dif_err_code = DIF_ERR_APP;
3473 scsi_status = SAM_STAT_CHECK_CONDITION;
3474 sense_key = ABORTED_COMMAND;
3475 asc = 0x10;
3476 ascq = 0x2;
3477 }
3478
3479 /* check ref tag */
3480 if (cmd->e_ref_tag != cmd->a_ref_tag) {
3481 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3482 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3483 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3484 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3485 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3486 cmd->atio.u.isp24.fcp_hdr.ox_id);
3487
3488 cmd->dif_err_code = DIF_ERR_REF;
3489 scsi_status = SAM_STAT_CHECK_CONDITION;
3490 sense_key = ABORTED_COMMAND;
3491 asc = 0x10;
3492 ascq = 0x3;
3493 goto out;
3494 }
3495
3496 /* check guard */
3497 if (cmd->e_guard != cmd->a_guard) {
3498 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3499 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3500 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3501 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3502 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3503 cmd->atio.u.isp24.fcp_hdr.ox_id);
3504
3505 cmd->dif_err_code = DIF_ERR_GRD;
3506 scsi_status = SAM_STAT_CHECK_CONDITION;
3507 sense_key = ABORTED_COMMAND;
3508 asc = 0x10;
3509 ascq = 0x1;
3510 }
3511 out:
3512 switch (cmd->state) {
3513 case QLA_TGT_STATE_NEED_DATA:
3514 /* handle_data will load DIF error code */
3515 cmd->state = QLA_TGT_STATE_DATA_IN;
3516 vha->hw->tgt.tgt_ops->handle_data(cmd);
3517 break;
3518 default:
3519 spin_lock_irqsave(&cmd->cmd_lock, flags);
3520 if (cmd->aborted) {
3521 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3522 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3523 break;
3524 }
3525 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3526
3527 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3528 ascq);
3529 /* assume scsi status gets out on the wire.
3530 * Will not wait for completion.
3531 */
3532 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3533 break;
3534 }
3535 }
3536
3537 /* If hardware_lock held on entry, might drop it, then reaquire */
3538 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3539 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3540 struct imm_ntfy_from_isp *ntfy)
3541 {
3542 struct nack_to_isp *nack;
3543 struct qla_hw_data *ha = vha->hw;
3544 request_t *pkt;
3545 int ret = 0;
3546
3547 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3548 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3549
3550 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3551 if (pkt == NULL) {
3552 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3553 "qla_target(%d): %s failed: unable to allocate "
3554 "request packet\n", vha->vp_idx, __func__);
3555 return -ENOMEM;
3556 }
3557
3558 pkt->entry_type = NOTIFY_ACK_TYPE;
3559 pkt->entry_count = 1;
3560 pkt->handle = QLA_TGT_SKIP_HANDLE;
3561
3562 nack = (struct nack_to_isp *)pkt;
3563 nack->ox_id = ntfy->ox_id;
3564
3565 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3566 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3567 nack->u.isp24.flags = ntfy->u.isp24.flags &
3568 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3569 }
3570
3571 /* terminate */
3572 nack->u.isp24.flags |=
3573 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3574
3575 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3576 nack->u.isp24.status = ntfy->u.isp24.status;
3577 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3578 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3579 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3580 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3581 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3582 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3583
3584 qla2x00_start_iocbs(vha, vha->req);
3585 return ret;
3586 }
3587
3588 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3589 struct imm_ntfy_from_isp *imm, int ha_locked)
3590 {
3591 int rc;
3592
3593 WARN_ON_ONCE(!ha_locked);
3594 rc = __qlt_send_term_imm_notif(vha, imm);
3595 pr_debug("rc = %d\n", rc);
3596 }
3597
3598 /*
3599 * If hardware_lock held on entry, might drop it, then reaquire
3600 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3601 */
3602 static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3603 struct qla_tgt_cmd *cmd,
3604 struct atio_from_isp *atio)
3605 {
3606 struct scsi_qla_host *vha = qpair->vha;
3607 struct ctio7_to_24xx *ctio24;
3608 struct qla_hw_data *ha = vha->hw;
3609 request_t *pkt;
3610 int ret = 0;
3611 uint16_t temp;
3612
3613 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3614
3615 if (cmd)
3616 vha = cmd->vha;
3617
3618 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3619 if (pkt == NULL) {
3620 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3621 "qla_target(%d): %s failed: unable to allocate "
3622 "request packet\n", vha->vp_idx, __func__);
3623 return -ENOMEM;
3624 }
3625
3626 if (cmd != NULL) {
3627 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3628 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3629 "qla_target(%d): Terminating cmd %p with "
3630 "incorrect state %d\n", vha->vp_idx, cmd,
3631 cmd->state);
3632 } else
3633 ret = 1;
3634 }
3635
3636 qpair->tgt_counters.num_term_xchg_sent++;
3637 pkt->entry_count = 1;
3638 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3639
3640 ctio24 = (struct ctio7_to_24xx *)pkt;
3641 ctio24->entry_type = CTIO_TYPE7;
3642 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3643 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3644 ctio24->vp_index = vha->vp_idx;
3645 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3646 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3647 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3648 CTIO7_FLAGS_TERMINATE;
3649 ctio24->u.status1.flags = cpu_to_le16(temp);
3650 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3651 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3652
3653 /* Memory Barrier */
3654 wmb();
3655 if (qpair->reqq_start_iocbs)
3656 qpair->reqq_start_iocbs(qpair);
3657 else
3658 qla2x00_start_iocbs(vha, qpair->req);
3659 return ret;
3660 }
3661
3662 static void qlt_send_term_exchange(struct qla_qpair *qpair,
3663 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3664 int ul_abort)
3665 {
3666 struct scsi_qla_host *vha;
3667 unsigned long flags = 0;
3668 int rc;
3669
3670 /* why use different vha? NPIV */
3671 if (cmd)
3672 vha = cmd->vha;
3673 else
3674 vha = qpair->vha;
3675
3676 if (ha_locked) {
3677 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3678 if (rc == -ENOMEM)
3679 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3680 goto done;
3681 }
3682 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3683 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3684 if (rc == -ENOMEM)
3685 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3686
3687 done:
3688 if (cmd && !ul_abort && !cmd->aborted) {
3689 if (cmd->sg_mapped)
3690 qlt_unmap_sg(vha, cmd);
3691 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3692 }
3693
3694 if (!ha_locked)
3695 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3696
3697 return;
3698 }
3699
3700 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3701 {
3702 struct list_head free_list;
3703 struct qla_tgt_cmd *cmd, *tcmd;
3704
3705 vha->hw->tgt.leak_exchg_thresh_hold =
3706 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3707
3708 cmd = tcmd = NULL;
3709 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3710 INIT_LIST_HEAD(&free_list);
3711 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3712
3713 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3714 list_del(&cmd->cmd_list);
3715 /* This cmd was never sent to TCM. There is no need
3716 * to schedule free or call free_cmd
3717 */
3718 qlt_free_cmd(cmd);
3719 vha->hw->tgt.num_qfull_cmds_alloc--;
3720 }
3721 }
3722 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3723 }
3724
3725 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3726 {
3727 uint32_t total_leaked;
3728
3729 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3730
3731 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3732 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3733
3734 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3735 "Chip reset due to exchange starvation: %d/%d.\n",
3736 total_leaked, vha->hw->cur_fw_xcb_count);
3737
3738 if (IS_P3P_TYPE(vha->hw))
3739 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3740 else
3741 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3742 qla2xxx_wake_dpc(vha);
3743 }
3744
3745 }
3746
3747 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3748 {
3749 struct qla_tgt *tgt = cmd->tgt;
3750 struct scsi_qla_host *vha = tgt->vha;
3751 struct se_cmd *se_cmd = &cmd->se_cmd;
3752 unsigned long flags;
3753
3754 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3755 "qla_target(%d): terminating exchange for aborted cmd=%p "
3756 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3757 se_cmd->tag);
3758
3759 spin_lock_irqsave(&cmd->cmd_lock, flags);
3760 if (cmd->aborted) {
3761 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3762 /*
3763 * It's normal to see 2 calls in this path:
3764 * 1) XFER Rdy completion + CMD_T_ABORT
3765 * 2) TCM TMR - drain_state_list
3766 */
3767 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3768 "multiple abort. %p transport_state %x, t_state %x, "
3769 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3770 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3771 return EIO;
3772 }
3773 cmd->aborted = 1;
3774 cmd->trc_flags |= TRC_ABORT;
3775 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3776
3777 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3778 return 0;
3779 }
3780 EXPORT_SYMBOL(qlt_abort_cmd);
3781
3782 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3783 {
3784 struct fc_port *sess = cmd->sess;
3785
3786 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3787 "%s: se_cmd[%p] ox_id %04x\n",
3788 __func__, &cmd->se_cmd,
3789 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3790
3791 BUG_ON(cmd->cmd_in_wq);
3792
3793 if (cmd->sg_mapped)
3794 qlt_unmap_sg(cmd->vha, cmd);
3795
3796 if (!cmd->q_full)
3797 qlt_decr_num_pend_cmds(cmd->vha);
3798
3799 BUG_ON(cmd->sg_mapped);
3800 cmd->jiffies_at_free = get_jiffies_64();
3801 if (unlikely(cmd->free_sg))
3802 kfree(cmd->sg);
3803
3804 if (!sess || !sess->se_sess) {
3805 WARN_ON(1);
3806 return;
3807 }
3808 cmd->jiffies_at_free = get_jiffies_64();
3809 target_free_tag(sess->se_sess, &cmd->se_cmd);
3810 }
3811 EXPORT_SYMBOL(qlt_free_cmd);
3812
3813 /*
3814 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3815 */
3816 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3817 struct qla_tgt_cmd *cmd, uint32_t status)
3818 {
3819 int term = 0;
3820 struct scsi_qla_host *vha = qpair->vha;
3821
3822 if (cmd->se_cmd.prot_op)
3823 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3824 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3825 "se_cmd=%p tag[%x] op %#x/%s",
3826 cmd->lba, cmd->lba,
3827 cmd->num_blks, &cmd->se_cmd,
3828 cmd->atio.u.isp24.exchange_addr,
3829 cmd->se_cmd.prot_op,
3830 prot_op_str(cmd->se_cmd.prot_op));
3831
3832 if (ctio != NULL) {
3833 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3834
3835 term = !(c->flags &
3836 cpu_to_le16(OF_TERM_EXCH));
3837 } else
3838 term = 1;
3839
3840 if (term)
3841 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3842
3843 return term;
3844 }
3845
3846
3847 /* ha->hardware_lock supposed to be held on entry */
3848 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3849 struct rsp_que *rsp, uint32_t handle, void *ctio)
3850 {
3851 void *cmd = NULL;
3852 struct req_que *req;
3853 int qid = GET_QID(handle);
3854 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3855
3856 if (unlikely(h == QLA_TGT_SKIP_HANDLE))
3857 return NULL;
3858
3859 if (qid == rsp->req->id) {
3860 req = rsp->req;
3861 } else if (vha->hw->req_q_map[qid]) {
3862 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3863 "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3864 vha->vp_idx, rsp->id, handle);
3865 req = vha->hw->req_q_map[qid];
3866 } else {
3867 return NULL;
3868 }
3869
3870 h &= QLA_CMD_HANDLE_MASK;
3871
3872 if (h != QLA_TGT_NULL_HANDLE) {
3873 if (unlikely(h >= req->num_outstanding_cmds)) {
3874 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3875 "qla_target(%d): Wrong handle %x received\n",
3876 vha->vp_idx, handle);
3877 return NULL;
3878 }
3879
3880 cmd = (void *) req->outstanding_cmds[h];
3881 if (unlikely(cmd == NULL)) {
3882 ql_dbg(ql_dbg_async, vha, 0xe053,
3883 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3884 vha->vp_idx, handle, req->id, rsp->id);
3885 return NULL;
3886 }
3887 req->outstanding_cmds[h] = NULL;
3888 } else if (ctio != NULL) {
3889 /* We can't get loop ID from CTIO7 */
3890 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3891 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3892 "support NULL handles\n", vha->vp_idx);
3893 return NULL;
3894 }
3895
3896 return cmd;
3897 }
3898
3899 /*
3900 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3901 */
3902 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3903 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3904 {
3905 struct qla_hw_data *ha = vha->hw;
3906 struct se_cmd *se_cmd;
3907 struct qla_tgt_cmd *cmd;
3908 struct qla_qpair *qpair = rsp->qpair;
3909
3910 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3911 /* That could happen only in case of an error/reset/abort */
3912 if (status != CTIO_SUCCESS) {
3913 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3914 "Intermediate CTIO received"
3915 " (status %x)\n", status);
3916 }
3917 return;
3918 }
3919
3920 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3921 if (cmd == NULL)
3922 return;
3923
3924 se_cmd = &cmd->se_cmd;
3925 cmd->cmd_sent_to_fw = 0;
3926
3927 qlt_unmap_sg(vha, cmd);
3928
3929 if (unlikely(status != CTIO_SUCCESS)) {
3930 switch (status & 0xFFFF) {
3931 case CTIO_INVALID_RX_ID:
3932 if (printk_ratelimit())
3933 dev_info(&vha->hw->pdev->dev,
3934 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
3935 vha->vp_idx, cmd->atio.u.isp24.attr,
3936 ((cmd->ctio_flags >> 9) & 0xf),
3937 cmd->ctio_flags);
3938
3939 break;
3940 case CTIO_LIP_RESET:
3941 case CTIO_TARGET_RESET:
3942 case CTIO_ABORTED:
3943 /* driver request abort via Terminate exchange */
3944 case CTIO_TIMEOUT:
3945 /* They are OK */
3946 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3947 "qla_target(%d): CTIO with "
3948 "status %#x received, state %x, se_cmd %p, "
3949 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3950 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3951 status, cmd->state, se_cmd);
3952 break;
3953
3954 case CTIO_PORT_LOGGED_OUT:
3955 case CTIO_PORT_UNAVAILABLE:
3956 {
3957 int logged_out =
3958 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3959
3960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3961 "qla_target(%d): CTIO with %s status %x "
3962 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3963 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3964 status, cmd->state, se_cmd);
3965
3966 if (logged_out && cmd->sess) {
3967 /*
3968 * Session is already logged out, but we need
3969 * to notify initiator, who's not aware of this
3970 */
3971 cmd->sess->send_els_logo = 1;
3972 ql_dbg(ql_dbg_disc, vha, 0x20f8,
3973 "%s %d %8phC post del sess\n",
3974 __func__, __LINE__, cmd->sess->port_name);
3975
3976 qlt_schedule_sess_for_deletion(cmd->sess);
3977 }
3978 break;
3979 }
3980 case CTIO_DIF_ERROR: {
3981 struct ctio_crc_from_fw *crc =
3982 (struct ctio_crc_from_fw *)ctio;
3983 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3984 "qla_target(%d): CTIO with DIF_ERROR status %x "
3985 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
3986 "expect_dif[0x%llx]\n",
3987 vha->vp_idx, status, cmd->state, se_cmd,
3988 *((u64 *)&crc->actual_dif[0]),
3989 *((u64 *)&crc->expected_dif[0]));
3990
3991 qlt_handle_dif_error(qpair, cmd, ctio);
3992 return;
3993 }
3994 default:
3995 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3996 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3997 vha->vp_idx, status, cmd->state, se_cmd);
3998 break;
3999 }
4000
4001
4002 /* "cmd->aborted" means
4003 * cmd is already aborted/terminated, we don't
4004 * need to terminate again. The exchange is already
4005 * cleaned up/freed at FW level. Just cleanup at driver
4006 * level.
4007 */
4008 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
4009 (!cmd->aborted)) {
4010 cmd->trc_flags |= TRC_CTIO_ERR;
4011 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
4012 return;
4013 }
4014 }
4015
4016 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4017 cmd->trc_flags |= TRC_CTIO_DONE;
4018 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4019 cmd->state = QLA_TGT_STATE_DATA_IN;
4020
4021 if (status == CTIO_SUCCESS)
4022 cmd->write_data_transferred = 1;
4023
4024 ha->tgt.tgt_ops->handle_data(cmd);
4025 return;
4026 } else if (cmd->aborted) {
4027 cmd->trc_flags |= TRC_CTIO_ABORTED;
4028 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4029 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
4030 } else {
4031 cmd->trc_flags |= TRC_CTIO_STRANGE;
4032 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4033 "qla_target(%d): A command in state (%d) should "
4034 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4035 }
4036
4037 if (unlikely(status != CTIO_SUCCESS) &&
4038 !cmd->aborted) {
4039 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4040 dump_stack();
4041 }
4042
4043 ha->tgt.tgt_ops->free_cmd(cmd);
4044 }
4045
4046 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4047 uint8_t task_codes)
4048 {
4049 int fcp_task_attr;
4050
4051 switch (task_codes) {
4052 case ATIO_SIMPLE_QUEUE:
4053 fcp_task_attr = TCM_SIMPLE_TAG;
4054 break;
4055 case ATIO_HEAD_OF_QUEUE:
4056 fcp_task_attr = TCM_HEAD_TAG;
4057 break;
4058 case ATIO_ORDERED_QUEUE:
4059 fcp_task_attr = TCM_ORDERED_TAG;
4060 break;
4061 case ATIO_ACA_QUEUE:
4062 fcp_task_attr = TCM_ACA_TAG;
4063 break;
4064 case ATIO_UNTAGGED:
4065 fcp_task_attr = TCM_SIMPLE_TAG;
4066 break;
4067 default:
4068 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4069 "qla_target: unknown task code %x, use ORDERED instead\n",
4070 task_codes);
4071 fcp_task_attr = TCM_ORDERED_TAG;
4072 break;
4073 }
4074
4075 return fcp_task_attr;
4076 }
4077
4078 /*
4079 * Process context for I/O path into tcm_qla2xxx code
4080 */
4081 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4082 {
4083 scsi_qla_host_t *vha = cmd->vha;
4084 struct qla_hw_data *ha = vha->hw;
4085 struct fc_port *sess = cmd->sess;
4086 struct atio_from_isp *atio = &cmd->atio;
4087 unsigned char *cdb;
4088 unsigned long flags;
4089 uint32_t data_length;
4090 int ret, fcp_task_attr, data_dir, bidi = 0;
4091 struct qla_qpair *qpair = cmd->qpair;
4092
4093 cmd->cmd_in_wq = 0;
4094 cmd->trc_flags |= TRC_DO_WORK;
4095
4096 if (cmd->aborted) {
4097 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4098 "cmd with tag %u is aborted\n",
4099 cmd->atio.u.isp24.exchange_addr);
4100 goto out_term;
4101 }
4102
4103 spin_lock_init(&cmd->cmd_lock);
4104 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4105 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
4106
4107 if (atio->u.isp24.fcp_cmnd.rddata &&
4108 atio->u.isp24.fcp_cmnd.wrdata) {
4109 bidi = 1;
4110 data_dir = DMA_TO_DEVICE;
4111 } else if (atio->u.isp24.fcp_cmnd.rddata)
4112 data_dir = DMA_FROM_DEVICE;
4113 else if (atio->u.isp24.fcp_cmnd.wrdata)
4114 data_dir = DMA_TO_DEVICE;
4115 else
4116 data_dir = DMA_NONE;
4117
4118 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4119 atio->u.isp24.fcp_cmnd.task_attr);
4120 data_length = get_datalen_for_atio(atio);
4121
4122 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4123 fcp_task_attr, data_dir, bidi);
4124 if (ret != 0)
4125 goto out_term;
4126 /*
4127 * Drop extra session reference from qlt_handle_cmd_for_atio().
4128 */
4129 ha->tgt.tgt_ops->put_sess(sess);
4130 return;
4131
4132 out_term:
4133 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4134 /*
4135 * cmd has not sent to target yet, so pass NULL as the second
4136 * argument to qlt_send_term_exchange() and free the memory here.
4137 */
4138 cmd->trc_flags |= TRC_DO_WORK_ERR;
4139 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4140 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4141
4142 qlt_decr_num_pend_cmds(vha);
4143 target_free_tag(sess->se_sess, &cmd->se_cmd);
4144 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4145
4146 ha->tgt.tgt_ops->put_sess(sess);
4147 }
4148
4149 static void qlt_do_work(struct work_struct *work)
4150 {
4151 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4152 scsi_qla_host_t *vha = cmd->vha;
4153 unsigned long flags;
4154
4155 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4156 list_del(&cmd->cmd_list);
4157 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4158
4159 __qlt_do_work(cmd);
4160 }
4161
4162 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4163 {
4164 unsigned long flags;
4165 struct qla_hw_data *ha = vha->hw;
4166 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4167 void *node;
4168 u64 key = 0;
4169
4170 ql_log(ql_log_info, vha, 0x706c,
4171 "User update Number of Active Qpairs %d\n",
4172 ha->tgt.num_act_qpairs);
4173
4174 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4175
4176 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4177 btree_remove64(&tgt->lun_qpair_map, key);
4178
4179 ha->base_qpair->lun_cnt = 0;
4180 for (key = 0; key < ha->max_qpairs; key++)
4181 if (ha->queue_pair_map[key])
4182 ha->queue_pair_map[key]->lun_cnt = 0;
4183
4184 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4185 }
4186
4187 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4188 struct qla_tgt_cmd *cmd)
4189 {
4190 struct qla_qpair *qpair, *qp;
4191 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4192 struct qla_qpair_hint *h;
4193
4194 if (vha->flags.qpairs_available) {
4195 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4196 if (unlikely(!h)) {
4197 /* spread lun to qpair ratio evently */
4198 int lcnt = 0, rc;
4199 struct scsi_qla_host *base_vha =
4200 pci_get_drvdata(vha->hw->pdev);
4201
4202 qpair = vha->hw->base_qpair;
4203 if (qpair->lun_cnt == 0) {
4204 qpair->lun_cnt++;
4205 h = qla_qpair_to_hint(tgt, qpair);
4206 BUG_ON(!h);
4207 rc = btree_insert64(&tgt->lun_qpair_map,
4208 cmd->unpacked_lun, h, GFP_ATOMIC);
4209 if (rc) {
4210 qpair->lun_cnt--;
4211 ql_log(ql_log_info, vha, 0xd037,
4212 "Unable to insert lun %llx into lun_qpair_map\n",
4213 cmd->unpacked_lun);
4214 }
4215 goto out;
4216 } else {
4217 lcnt = qpair->lun_cnt;
4218 }
4219
4220 h = NULL;
4221 list_for_each_entry(qp, &base_vha->qp_list,
4222 qp_list_elem) {
4223 if (qp->lun_cnt == 0) {
4224 qp->lun_cnt++;
4225 h = qla_qpair_to_hint(tgt, qp);
4226 BUG_ON(!h);
4227 rc = btree_insert64(&tgt->lun_qpair_map,
4228 cmd->unpacked_lun, h, GFP_ATOMIC);
4229 if (rc) {
4230 qp->lun_cnt--;
4231 ql_log(ql_log_info, vha, 0xd038,
4232 "Unable to insert lun %llx into lun_qpair_map\n",
4233 cmd->unpacked_lun);
4234 }
4235 qpair = qp;
4236 goto out;
4237 } else {
4238 if (qp->lun_cnt < lcnt) {
4239 lcnt = qp->lun_cnt;
4240 qpair = qp;
4241 continue;
4242 }
4243 }
4244 }
4245 BUG_ON(!qpair);
4246 qpair->lun_cnt++;
4247 h = qla_qpair_to_hint(tgt, qpair);
4248 BUG_ON(!h);
4249 rc = btree_insert64(&tgt->lun_qpair_map,
4250 cmd->unpacked_lun, h, GFP_ATOMIC);
4251 if (rc) {
4252 qpair->lun_cnt--;
4253 ql_log(ql_log_info, vha, 0xd039,
4254 "Unable to insert lun %llx into lun_qpair_map\n",
4255 cmd->unpacked_lun);
4256 }
4257 }
4258 } else {
4259 h = &tgt->qphints[0];
4260 }
4261 out:
4262 cmd->qpair = h->qpair;
4263 cmd->se_cmd.cpuid = h->cpuid;
4264 }
4265
4266 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4267 struct fc_port *sess,
4268 struct atio_from_isp *atio)
4269 {
4270 struct se_session *se_sess = sess->se_sess;
4271 struct qla_tgt_cmd *cmd;
4272 int tag, cpu;
4273
4274 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
4275 if (tag < 0)
4276 return NULL;
4277
4278 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
4279 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4280 cmd->cmd_type = TYPE_TGT_CMD;
4281 memcpy(&cmd->atio, atio, sizeof(*atio));
4282 cmd->state = QLA_TGT_STATE_NEW;
4283 cmd->tgt = vha->vha_tgt.qla_tgt;
4284 qlt_incr_num_pend_cmds(vha);
4285 cmd->vha = vha;
4286 cmd->se_cmd.map_tag = tag;
4287 cmd->se_cmd.map_cpu = cpu;
4288 cmd->sess = sess;
4289 cmd->loop_id = sess->loop_id;
4290 cmd->conf_compl_supported = sess->conf_compl_supported;
4291
4292 cmd->trc_flags = 0;
4293 cmd->jiffies_at_alloc = get_jiffies_64();
4294
4295 cmd->unpacked_lun = scsilun_to_int(
4296 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4297 qlt_assign_qpair(vha, cmd);
4298 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4299 cmd->vp_idx = vha->vp_idx;
4300
4301 return cmd;
4302 }
4303
4304 /* ha->hardware_lock supposed to be held on entry */
4305 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4306 struct atio_from_isp *atio)
4307 {
4308 struct qla_hw_data *ha = vha->hw;
4309 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4310 struct fc_port *sess;
4311 struct qla_tgt_cmd *cmd;
4312 unsigned long flags;
4313 port_id_t id;
4314
4315 if (unlikely(tgt->tgt_stop)) {
4316 ql_dbg(ql_dbg_io, vha, 0x3061,
4317 "New command while device %p is shutting down\n", tgt);
4318 return -ENODEV;
4319 }
4320
4321 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
4322 if (IS_SW_RESV_ADDR(id))
4323 return -EBUSY;
4324
4325 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4326 if (unlikely(!sess))
4327 return -EFAULT;
4328
4329 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4330 * session deletion, but it's still in sess_del_work wq */
4331 if (sess->deleted) {
4332 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4333 "New command while old session %p is being deleted\n",
4334 sess);
4335 return -EFAULT;
4336 }
4337
4338 /*
4339 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4340 */
4341 if (!kref_get_unless_zero(&sess->sess_kref)) {
4342 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4343 "%s: kref_get fail, %8phC oxid %x \n",
4344 __func__, sess->port_name,
4345 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4346 return -EFAULT;
4347 }
4348
4349 cmd = qlt_get_tag(vha, sess, atio);
4350 if (!cmd) {
4351 ql_dbg(ql_dbg_io, vha, 0x3062,
4352 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4353 ha->tgt.tgt_ops->put_sess(sess);
4354 return -EBUSY;
4355 }
4356
4357 cmd->cmd_in_wq = 1;
4358 cmd->trc_flags |= TRC_NEW_CMD;
4359
4360 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4361 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4362 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4363
4364 INIT_WORK(&cmd->work, qlt_do_work);
4365 if (vha->flags.qpairs_available) {
4366 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4367 } else if (ha->msix_count) {
4368 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4369 queue_work_on(smp_processor_id(), qla_tgt_wq,
4370 &cmd->work);
4371 else
4372 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4373 &cmd->work);
4374 } else {
4375 queue_work(qla_tgt_wq, &cmd->work);
4376 }
4377
4378 return 0;
4379 }
4380
4381 /* ha->hardware_lock supposed to be held on entry */
4382 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4383 int fn, void *iocb, int flags)
4384 {
4385 struct scsi_qla_host *vha = sess->vha;
4386 struct qla_hw_data *ha = vha->hw;
4387 struct qla_tgt_mgmt_cmd *mcmd;
4388 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4389 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4390
4391 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4392 if (!mcmd) {
4393 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4394 "qla_target(%d): Allocation of management "
4395 "command failed, some commands and their data could "
4396 "leak\n", vha->vp_idx);
4397 return -ENOMEM;
4398 }
4399 memset(mcmd, 0, sizeof(*mcmd));
4400 mcmd->sess = sess;
4401
4402 if (iocb) {
4403 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4404 sizeof(mcmd->orig_iocb.imm_ntfy));
4405 }
4406 mcmd->tmr_func = fn;
4407 mcmd->flags = flags;
4408 mcmd->reset_count = ha->base_qpair->chip_reset;
4409 mcmd->qpair = h->qpair;
4410 mcmd->vha = vha;
4411 mcmd->se_cmd.cpuid = h->cpuid;
4412 mcmd->unpacked_lun = lun;
4413
4414 switch (fn) {
4415 case QLA_TGT_LUN_RESET:
4416 case QLA_TGT_CLEAR_TS:
4417 case QLA_TGT_ABORT_TS:
4418 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4419 /* fall through */
4420 case QLA_TGT_CLEAR_ACA:
4421 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4422 mcmd->qpair = h->qpair;
4423 mcmd->se_cmd.cpuid = h->cpuid;
4424 break;
4425
4426 case QLA_TGT_TARGET_RESET:
4427 case QLA_TGT_NEXUS_LOSS_SESS:
4428 case QLA_TGT_NEXUS_LOSS:
4429 case QLA_TGT_ABORT_ALL:
4430 default:
4431 /* no-op */
4432 break;
4433 }
4434
4435 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
4436 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
4437 &mcmd->work);
4438
4439 return 0;
4440 }
4441
4442 /* ha->hardware_lock supposed to be held on entry */
4443 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4444 {
4445 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4446 struct qla_hw_data *ha = vha->hw;
4447 struct fc_port *sess;
4448 u64 unpacked_lun;
4449 int fn;
4450 unsigned long flags;
4451
4452 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4453
4454 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4455 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4456 a->u.isp24.fcp_hdr.s_id);
4457 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4458
4459 unpacked_lun =
4460 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4461
4462 if (sess == NULL || sess->deleted)
4463 return -EFAULT;
4464
4465 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4466 }
4467
4468 /* ha->hardware_lock supposed to be held on entry */
4469 static int __qlt_abort_task(struct scsi_qla_host *vha,
4470 struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4471 {
4472 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4473 struct qla_hw_data *ha = vha->hw;
4474 struct qla_tgt_mgmt_cmd *mcmd;
4475 u64 unpacked_lun;
4476 int rc;
4477
4478 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4479 if (mcmd == NULL) {
4480 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4481 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4482 vha->vp_idx, __func__);
4483 return -ENOMEM;
4484 }
4485 memset(mcmd, 0, sizeof(*mcmd));
4486
4487 mcmd->sess = sess;
4488 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4489 sizeof(mcmd->orig_iocb.imm_ntfy));
4490
4491 unpacked_lun =
4492 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4493 mcmd->reset_count = ha->base_qpair->chip_reset;
4494 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4495 mcmd->qpair = ha->base_qpair;
4496
4497 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4498 le16_to_cpu(iocb->u.isp2x.seq_id));
4499 if (rc != 0) {
4500 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4501 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4502 vha->vp_idx, rc);
4503 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4504 return -EFAULT;
4505 }
4506
4507 return 0;
4508 }
4509
4510 /* ha->hardware_lock supposed to be held on entry */
4511 static int qlt_abort_task(struct scsi_qla_host *vha,
4512 struct imm_ntfy_from_isp *iocb)
4513 {
4514 struct qla_hw_data *ha = vha->hw;
4515 struct fc_port *sess;
4516 int loop_id;
4517 unsigned long flags;
4518
4519 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4520
4521 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4522 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4523 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4524
4525 if (sess == NULL) {
4526 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4527 "qla_target(%d): task abort for unexisting "
4528 "session\n", vha->vp_idx);
4529 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4530 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4531 }
4532
4533 return __qlt_abort_task(vha, iocb, sess);
4534 }
4535
4536 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4537 {
4538 if (rc != MBS_COMMAND_COMPLETE) {
4539 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4540 "%s: se_sess %p / sess %p from"
4541 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4542 " LOGO failed: %#x\n",
4543 __func__,
4544 fcport->se_sess,
4545 fcport,
4546 fcport->port_name, fcport->loop_id,
4547 fcport->d_id.b.domain, fcport->d_id.b.area,
4548 fcport->d_id.b.al_pa, rc);
4549 }
4550
4551 fcport->logout_completed = 1;
4552 }
4553
4554 /*
4555 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4556 *
4557 * Schedules sessions with matching port_id/loop_id but different wwn for
4558 * deletion. Returns existing session with matching wwn if present.
4559 * Null otherwise.
4560 */
4561 struct fc_port *
4562 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4563 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4564 {
4565 struct fc_port *sess = NULL, *other_sess;
4566 uint64_t other_wwn;
4567
4568 *conflict_sess = NULL;
4569
4570 list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4571
4572 other_wwn = wwn_to_u64(other_sess->port_name);
4573
4574 if (wwn == other_wwn) {
4575 WARN_ON(sess);
4576 sess = other_sess;
4577 continue;
4578 }
4579
4580 /* find other sess with nport_id collision */
4581 if (port_id.b24 == other_sess->d_id.b24) {
4582 if (loop_id != other_sess->loop_id) {
4583 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
4584 "Invalidating sess %p loop_id %d wwn %llx.\n",
4585 other_sess, other_sess->loop_id, other_wwn);
4586
4587 /*
4588 * logout_on_delete is set by default, but another
4589 * session that has the same s_id/loop_id combo
4590 * might have cleared it when requested this session
4591 * deletion, so don't touch it
4592 */
4593 qlt_schedule_sess_for_deletion(other_sess);
4594 } else {
4595 /*
4596 * Another wwn used to have our s_id/loop_id
4597 * kill the session, but don't free the loop_id
4598 */
4599 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
4600 "Invalidating sess %p loop_id %d wwn %llx.\n",
4601 other_sess, other_sess->loop_id, other_wwn);
4602
4603 other_sess->keep_nport_handle = 1;
4604 if (other_sess->disc_state != DSC_DELETED)
4605 *conflict_sess = other_sess;
4606 qlt_schedule_sess_for_deletion(other_sess);
4607 }
4608 continue;
4609 }
4610
4611 /* find other sess with nport handle collision */
4612 if ((loop_id == other_sess->loop_id) &&
4613 (loop_id != FC_NO_LOOP_ID)) {
4614 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
4615 "Invalidating sess %p loop_id %d wwn %llx.\n",
4616 other_sess, other_sess->loop_id, other_wwn);
4617
4618 /* Same loop_id but different s_id
4619 * Ok to kill and logout */
4620 qlt_schedule_sess_for_deletion(other_sess);
4621 }
4622 }
4623
4624 return sess;
4625 }
4626
4627 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4628 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4629 {
4630 struct qla_tgt_sess_op *op;
4631 struct qla_tgt_cmd *cmd;
4632 uint32_t key;
4633 int count = 0;
4634 unsigned long flags;
4635
4636 key = (((u32)s_id->b.domain << 16) |
4637 ((u32)s_id->b.area << 8) |
4638 ((u32)s_id->b.al_pa));
4639
4640 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4641 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4642 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4643
4644 if (op_key == key) {
4645 op->aborted = true;
4646 count++;
4647 }
4648 }
4649
4650 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4651 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4652
4653 if (op_key == key) {
4654 op->aborted = true;
4655 count++;
4656 }
4657 }
4658
4659 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4660 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4661
4662 if (cmd_key == key) {
4663 cmd->aborted = 1;
4664 count++;
4665 }
4666 }
4667 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4668
4669 return count;
4670 }
4671
4672 static int qlt_handle_login(struct scsi_qla_host *vha,
4673 struct imm_ntfy_from_isp *iocb)
4674 {
4675 struct fc_port *sess = NULL, *conflict_sess = NULL;
4676 uint64_t wwn;
4677 port_id_t port_id;
4678 uint16_t loop_id, wd3_lo;
4679 int res = 0;
4680 struct qlt_plogi_ack_t *pla;
4681 unsigned long flags;
4682
4683 lockdep_assert_held(&vha->hw->hardware_lock);
4684
4685 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4686
4687 port_id.b.domain = iocb->u.isp24.port_id[2];
4688 port_id.b.area = iocb->u.isp24.port_id[1];
4689 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4690 port_id.b.rsvd_1 = 0;
4691
4692 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4693
4694 /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4695 abort_cmds_for_s_id(vha, &port_id);
4696
4697 if (wwn) {
4698 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4699 sess = qlt_find_sess_invalidate_other(vha, wwn,
4700 port_id, loop_id, &conflict_sess);
4701 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4702 } else {
4703 ql_dbg(ql_dbg_disc, vha, 0xffff,
4704 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4705 __func__, __LINE__, loop_id, port_id.b24);
4706 qlt_send_term_imm_notif(vha, iocb, 1);
4707 goto out;
4708 }
4709
4710 if (IS_SW_RESV_ADDR(port_id)) {
4711 res = 1;
4712 goto out;
4713 }
4714
4715 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4716 if (!pla) {
4717 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4718 "%s %d %8phC Term INOT due to mem alloc fail",
4719 __func__, __LINE__,
4720 iocb->u.isp24.port_name);
4721 qlt_send_term_imm_notif(vha, iocb, 1);
4722 goto out;
4723 }
4724
4725 if (conflict_sess) {
4726 conflict_sess->login_gen++;
4727 qlt_plogi_ack_link(vha, pla, conflict_sess,
4728 QLT_PLOGI_LINK_CONFLICT);
4729 }
4730
4731 if (!sess) {
4732 pla->ref_count++;
4733 ql_dbg(ql_dbg_disc, vha, 0xffff,
4734 "%s %d %8phC post new sess\n",
4735 __func__, __LINE__, iocb->u.isp24.port_name);
4736 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
4737 qla24xx_post_newsess_work(vha, &port_id,
4738 iocb->u.isp24.port_name,
4739 iocb->u.isp24.u.plogi.node_name,
4740 pla, FC4_TYPE_UNKNOWN);
4741 else
4742 qla24xx_post_newsess_work(vha, &port_id,
4743 iocb->u.isp24.port_name, NULL,
4744 pla, FC4_TYPE_UNKNOWN);
4745
4746 goto out;
4747 }
4748
4749 if (sess->disc_state == DSC_UPD_FCPORT) {
4750 u16 sec;
4751
4752 /*
4753 * Remote port registration is still going on from
4754 * previous login. Allow it to finish before we
4755 * accept the new login.
4756 */
4757 sess->next_disc_state = DSC_DELETE_PEND;
4758 sec = jiffies_to_msecs(jiffies -
4759 sess->jiffies_at_registration) / 1000;
4760 if (sess->sec_since_registration < sec && sec &&
4761 !(sec % 5)) {
4762 sess->sec_since_registration = sec;
4763 ql_dbg(ql_dbg_disc, vha, 0xffff,
4764 "%s %8phC - Slow Rport registration (%d Sec)\n",
4765 __func__, sess->port_name, sec);
4766 }
4767
4768 if (!conflict_sess) {
4769 list_del(&pla->list);
4770 kmem_cache_free(qla_tgt_plogi_cachep, pla);
4771 }
4772
4773 qlt_send_term_imm_notif(vha, iocb, 1);
4774 goto out;
4775 }
4776
4777 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4778 sess->d_id = port_id;
4779 sess->login_gen++;
4780
4781 if (iocb->u.isp24.status_subcode == ELS_PRLI) {
4782 sess->fw_login_state = DSC_LS_PRLI_PEND;
4783 sess->local = 0;
4784 sess->loop_id = loop_id;
4785 sess->d_id = port_id;
4786 sess->fw_login_state = DSC_LS_PRLI_PEND;
4787 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4788
4789 if (wd3_lo & BIT_7)
4790 sess->conf_compl_supported = 1;
4791
4792 if ((wd3_lo & BIT_4) == 0)
4793 sess->port_type = FCT_INITIATOR;
4794 else
4795 sess->port_type = FCT_TARGET;
4796
4797 } else
4798 sess->fw_login_state = DSC_LS_PLOGI_PEND;
4799
4800
4801 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4802 "%s %d %8phC DS %d\n",
4803 __func__, __LINE__, sess->port_name, sess->disc_state);
4804
4805 switch (sess->disc_state) {
4806 case DSC_DELETED:
4807 case DSC_LOGIN_PEND:
4808 qlt_plogi_ack_unref(vha, pla);
4809 break;
4810
4811 default:
4812 /*
4813 * Under normal circumstances we want to release nport handle
4814 * during LOGO process to avoid nport handle leaks inside FW.
4815 * The exception is when LOGO is done while another PLOGI with
4816 * the same nport handle is waiting as might be the case here.
4817 * Note: there is always a possibily of a race where session
4818 * deletion has already started for other reasons (e.g. ACL
4819 * removal) and now PLOGI arrives:
4820 * 1. if PLOGI arrived in FW after nport handle has been freed,
4821 * FW must have assigned this PLOGI a new/same handle and we
4822 * can proceed ACK'ing it as usual when session deletion
4823 * completes.
4824 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4825 * bit reached it, the handle has now been released. We'll
4826 * get an error when we ACK this PLOGI. Nothing will be sent
4827 * back to initiator. Initiator should eventually retry
4828 * PLOGI and situation will correct itself.
4829 */
4830 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4831 (sess->d_id.b24 == port_id.b24));
4832
4833 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4834 "%s %d %8phC post del sess\n",
4835 __func__, __LINE__, sess->port_name);
4836
4837
4838 qlt_schedule_sess_for_deletion(sess);
4839 break;
4840 }
4841 out:
4842 return res;
4843 }
4844
4845 /*
4846 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4847 */
4848 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4849 struct imm_ntfy_from_isp *iocb)
4850 {
4851 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4852 struct qla_hw_data *ha = vha->hw;
4853 struct fc_port *sess = NULL, *conflict_sess = NULL;
4854 uint64_t wwn;
4855 port_id_t port_id;
4856 uint16_t loop_id;
4857 uint16_t wd3_lo;
4858 int res = 0;
4859 unsigned long flags;
4860
4861 lockdep_assert_held(&ha->hardware_lock);
4862
4863 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4864
4865 port_id.b.domain = iocb->u.isp24.port_id[2];
4866 port_id.b.area = iocb->u.isp24.port_id[1];
4867 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4868 port_id.b.rsvd_1 = 0;
4869
4870 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4871
4872 ql_dbg(ql_dbg_disc, vha, 0xf026,
4873 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4874 vha->vp_idx, iocb->u.isp24.port_id[2],
4875 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4876 iocb->u.isp24.status_subcode, loop_id,
4877 iocb->u.isp24.port_name);
4878
4879 /* res = 1 means ack at the end of thread
4880 * res = 0 means ack async/later.
4881 */
4882 switch (iocb->u.isp24.status_subcode) {
4883 case ELS_PLOGI:
4884 res = qlt_handle_login(vha, iocb);
4885 break;
4886
4887 case ELS_PRLI:
4888 if (N2N_TOPO(ha)) {
4889 sess = qla2x00_find_fcport_by_wwpn(vha,
4890 iocb->u.isp24.port_name, 1);
4891
4892 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
4893 ql_dbg(ql_dbg_disc, vha, 0xffff,
4894 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
4895 __func__, __LINE__,
4896 iocb->u.isp24.port_name);
4897 qlt_send_term_imm_notif(vha, iocb, 1);
4898 break;
4899 }
4900
4901 res = qlt_handle_login(vha, iocb);
4902 break;
4903 }
4904
4905 if (IS_SW_RESV_ADDR(port_id)) {
4906 res = 1;
4907 break;
4908 }
4909
4910 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4911
4912 if (wwn) {
4913 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4914 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
4915 loop_id, &conflict_sess);
4916 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4917 }
4918
4919 if (conflict_sess) {
4920 switch (conflict_sess->disc_state) {
4921 case DSC_DELETED:
4922 case DSC_DELETE_PEND:
4923 break;
4924 default:
4925 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4926 "PRLI with conflicting sess %p port %8phC\n",
4927 conflict_sess, conflict_sess->port_name);
4928 conflict_sess->fw_login_state =
4929 DSC_LS_PORT_UNAVAIL;
4930 qlt_send_term_imm_notif(vha, iocb, 1);
4931 res = 0;
4932 break;
4933 }
4934 }
4935
4936 if (sess != NULL) {
4937 bool delete = false;
4938 int sec;
4939
4940 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4941 switch (sess->fw_login_state) {
4942 case DSC_LS_PLOGI_PEND:
4943 case DSC_LS_PLOGI_COMP:
4944 case DSC_LS_PRLI_COMP:
4945 break;
4946 default:
4947 delete = true;
4948 break;
4949 }
4950
4951 switch (sess->disc_state) {
4952 case DSC_UPD_FCPORT:
4953 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4954 flags);
4955
4956 sec = jiffies_to_msecs(jiffies -
4957 sess->jiffies_at_registration)/1000;
4958 if (sess->sec_since_registration < sec && sec &&
4959 !(sec % 5)) {
4960 sess->sec_since_registration = sec;
4961 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
4962 "%s %8phC : Slow Rport registration(%d Sec)\n",
4963 __func__, sess->port_name, sec);
4964 }
4965 qlt_send_term_imm_notif(vha, iocb, 1);
4966 return 0;
4967
4968 case DSC_LOGIN_PEND:
4969 case DSC_GPDB:
4970 case DSC_LOGIN_COMPLETE:
4971 case DSC_ADISC:
4972 delete = false;
4973 break;
4974 default:
4975 break;
4976 }
4977
4978 if (delete) {
4979 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4980 flags);
4981 /*
4982 * Impatient initiator sent PRLI before last
4983 * PLOGI could finish. Will force him to re-try,
4984 * while last one finishes.
4985 */
4986 ql_log(ql_log_warn, sess->vha, 0xf095,
4987 "sess %p PRLI received, before plogi ack.\n",
4988 sess);
4989 qlt_send_term_imm_notif(vha, iocb, 1);
4990 res = 0;
4991 break;
4992 }
4993
4994 /*
4995 * This shouldn't happen under normal circumstances,
4996 * since we have deleted the old session during PLOGI
4997 */
4998 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4999 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
5000 sess->loop_id, sess, iocb->u.isp24.nport_handle);
5001
5002 sess->local = 0;
5003 sess->loop_id = loop_id;
5004 sess->d_id = port_id;
5005 sess->fw_login_state = DSC_LS_PRLI_PEND;
5006
5007 if (wd3_lo & BIT_7)
5008 sess->conf_compl_supported = 1;
5009
5010 if ((wd3_lo & BIT_4) == 0)
5011 sess->port_type = FCT_INITIATOR;
5012 else
5013 sess->port_type = FCT_TARGET;
5014
5015 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5016 }
5017 res = 1; /* send notify ack */
5018
5019 /* Make session global (not used in fabric mode) */
5020 if (ha->current_topology != ISP_CFG_F) {
5021 if (sess) {
5022 ql_dbg(ql_dbg_disc, vha, 0x20fa,
5023 "%s %d %8phC post nack\n",
5024 __func__, __LINE__, sess->port_name);
5025 qla24xx_post_nack_work(vha, sess, iocb,
5026 SRB_NACK_PRLI);
5027 res = 0;
5028 } else {
5029 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5030 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5031 qla2xxx_wake_dpc(vha);
5032 }
5033 } else {
5034 if (sess) {
5035 ql_dbg(ql_dbg_disc, vha, 0x20fb,
5036 "%s %d %8phC post nack\n",
5037 __func__, __LINE__, sess->port_name);
5038 qla24xx_post_nack_work(vha, sess, iocb,
5039 SRB_NACK_PRLI);
5040 res = 0;
5041 }
5042 }
5043 break;
5044
5045 case ELS_TPRLO:
5046 if (le16_to_cpu(iocb->u.isp24.flags) &
5047 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
5048 loop_id = 0xFFFF;
5049 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5050 res = 1;
5051 break;
5052 }
5053 /* fall through */
5054 case ELS_LOGO:
5055 case ELS_PRLO:
5056 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5057 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5058 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5059
5060 if (sess) {
5061 sess->login_gen++;
5062 sess->fw_login_state = DSC_LS_LOGO_PEND;
5063 sess->logo_ack_needed = 1;
5064 memcpy(sess->iocb, iocb, IOCB_SIZE);
5065 }
5066
5067 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5068
5069 ql_dbg(ql_dbg_disc, vha, 0x20fc,
5070 "%s: logo %llx res %d sess %p ",
5071 __func__, wwn, res, sess);
5072 if (res == 0) {
5073 /*
5074 * cmd went upper layer, look for qlt_xmit_tm_rsp()
5075 * for LOGO_ACK & sess delete
5076 */
5077 BUG_ON(!sess);
5078 res = 0;
5079 } else {
5080 /* cmd did not go to upper layer. */
5081 if (sess) {
5082 qlt_schedule_sess_for_deletion(sess);
5083 res = 0;
5084 }
5085 /* else logo will be ack */
5086 }
5087 break;
5088 case ELS_PDISC:
5089 case ELS_ADISC:
5090 {
5091 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5092
5093 if (tgt->link_reinit_iocb_pending) {
5094 qlt_send_notify_ack(ha->base_qpair,
5095 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5096 tgt->link_reinit_iocb_pending = 0;
5097 }
5098
5099 sess = qla2x00_find_fcport_by_wwpn(vha,
5100 iocb->u.isp24.port_name, 1);
5101 if (sess) {
5102 ql_dbg(ql_dbg_disc, vha, 0x20fd,
5103 "sess %p lid %d|%d DS %d LS %d\n",
5104 sess, sess->loop_id, loop_id,
5105 sess->disc_state, sess->fw_login_state);
5106 }
5107
5108 res = 1; /* send notify ack */
5109 break;
5110 }
5111
5112 case ELS_FLOGI: /* should never happen */
5113 default:
5114 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5115 "qla_target(%d): Unsupported ELS command %x "
5116 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5117 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5118 break;
5119 }
5120
5121 ql_dbg(ql_dbg_disc, vha, 0xf026,
5122 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5123 vha->vp_idx, iocb->u.isp24.status_subcode, res);
5124
5125 return res;
5126 }
5127
5128 /*
5129 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5130 */
5131 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5132 struct imm_ntfy_from_isp *iocb)
5133 {
5134 struct qla_hw_data *ha = vha->hw;
5135 uint32_t add_flags = 0;
5136 int send_notify_ack = 1;
5137 uint16_t status;
5138
5139 lockdep_assert_held(&ha->hardware_lock);
5140
5141 status = le16_to_cpu(iocb->u.isp2x.status);
5142 switch (status) {
5143 case IMM_NTFY_LIP_RESET:
5144 {
5145 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5146 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5147 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5148 iocb->u.isp24.status_subcode);
5149
5150 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5151 send_notify_ack = 0;
5152 break;
5153 }
5154
5155 case IMM_NTFY_LIP_LINK_REINIT:
5156 {
5157 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5158
5159 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5160 "qla_target(%d): LINK REINIT (loop %#x, "
5161 "subcode %x)\n", vha->vp_idx,
5162 le16_to_cpu(iocb->u.isp24.nport_handle),
5163 iocb->u.isp24.status_subcode);
5164 if (tgt->link_reinit_iocb_pending) {
5165 qlt_send_notify_ack(ha->base_qpair,
5166 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5167 }
5168 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5169 tgt->link_reinit_iocb_pending = 1;
5170 /*
5171 * QLogic requires to wait after LINK REINIT for possible
5172 * PDISC or ADISC ELS commands
5173 */
5174 send_notify_ack = 0;
5175 break;
5176 }
5177
5178 case IMM_NTFY_PORT_LOGOUT:
5179 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5180 "qla_target(%d): Port logout (loop "
5181 "%#x, subcode %x)\n", vha->vp_idx,
5182 le16_to_cpu(iocb->u.isp24.nport_handle),
5183 iocb->u.isp24.status_subcode);
5184
5185 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5186 send_notify_ack = 0;
5187 /* The sessions will be cleared in the callback, if needed */
5188 break;
5189
5190 case IMM_NTFY_GLBL_TPRLO:
5191 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5192 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5193 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5194 send_notify_ack = 0;
5195 /* The sessions will be cleared in the callback, if needed */
5196 break;
5197
5198 case IMM_NTFY_PORT_CONFIG:
5199 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5200 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5201 status);
5202 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5203 send_notify_ack = 0;
5204 /* The sessions will be cleared in the callback, if needed */
5205 break;
5206
5207 case IMM_NTFY_GLBL_LOGO:
5208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5209 "qla_target(%d): Link failure detected\n",
5210 vha->vp_idx);
5211 /* I_T nexus loss */
5212 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5213 send_notify_ack = 0;
5214 break;
5215
5216 case IMM_NTFY_IOCB_OVERFLOW:
5217 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5218 "qla_target(%d): Cannot provide requested "
5219 "capability (IOCB overflowed the immediate notify "
5220 "resource count)\n", vha->vp_idx);
5221 break;
5222
5223 case IMM_NTFY_ABORT_TASK:
5224 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5225 "qla_target(%d): Abort Task (S %08x I %#x -> "
5226 "L %#x)\n", vha->vp_idx,
5227 le16_to_cpu(iocb->u.isp2x.seq_id),
5228 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5229 le16_to_cpu(iocb->u.isp2x.lun));
5230 if (qlt_abort_task(vha, iocb) == 0)
5231 send_notify_ack = 0;
5232 break;
5233
5234 case IMM_NTFY_RESOURCE:
5235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5236 "qla_target(%d): Out of resources, host %ld\n",
5237 vha->vp_idx, vha->host_no);
5238 break;
5239
5240 case IMM_NTFY_MSG_RX:
5241 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5242 "qla_target(%d): Immediate notify task %x\n",
5243 vha->vp_idx, iocb->u.isp2x.task_flags);
5244 break;
5245
5246 case IMM_NTFY_ELS:
5247 if (qlt_24xx_handle_els(vha, iocb) == 0)
5248 send_notify_ack = 0;
5249 break;
5250 default:
5251 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5252 "qla_target(%d): Received unknown immediate "
5253 "notify status %x\n", vha->vp_idx, status);
5254 break;
5255 }
5256
5257 if (send_notify_ack)
5258 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
5259 0, 0);
5260 }
5261
5262 /*
5263 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5264 * This function sends busy to ISP 2xxx or 24xx.
5265 */
5266 static int __qlt_send_busy(struct qla_qpair *qpair,
5267 struct atio_from_isp *atio, uint16_t status)
5268 {
5269 struct scsi_qla_host *vha = qpair->vha;
5270 struct ctio7_to_24xx *ctio24;
5271 struct qla_hw_data *ha = vha->hw;
5272 request_t *pkt;
5273 struct fc_port *sess = NULL;
5274 unsigned long flags;
5275 u16 temp;
5276 port_id_t id;
5277
5278 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
5279
5280 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5281 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5282 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5283 if (!sess) {
5284 qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5285 return 0;
5286 }
5287 /* Sending marker isn't necessary, since we called from ISR */
5288
5289 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5290 if (!pkt) {
5291 ql_dbg(ql_dbg_io, vha, 0x3063,
5292 "qla_target(%d): %s failed: unable to allocate "
5293 "request packet", vha->vp_idx, __func__);
5294 return -ENOMEM;
5295 }
5296
5297 qpair->tgt_counters.num_q_full_sent++;
5298 pkt->entry_count = 1;
5299 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5300
5301 ctio24 = (struct ctio7_to_24xx *)pkt;
5302 ctio24->entry_type = CTIO_TYPE7;
5303 ctio24->nport_handle = sess->loop_id;
5304 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5305 ctio24->vp_index = vha->vp_idx;
5306 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
5307 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5308 temp = (atio->u.isp24.attr << 9) |
5309 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5310 CTIO7_FLAGS_DONT_RET_CTIO;
5311 ctio24->u.status1.flags = cpu_to_le16(temp);
5312 /*
5313 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5314 * if the explicit conformation is used.
5315 */
5316 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
5317 ctio24->u.status1.scsi_status = cpu_to_le16(status);
5318
5319 ctio24->u.status1.residual = get_datalen_for_atio(atio);
5320
5321 if (ctio24->u.status1.residual != 0)
5322 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
5323
5324 /* Memory Barrier */
5325 wmb();
5326 if (qpair->reqq_start_iocbs)
5327 qpair->reqq_start_iocbs(qpair);
5328 else
5329 qla2x00_start_iocbs(vha, qpair->req);
5330 return 0;
5331 }
5332
5333 /*
5334 * This routine is used to allocate a command for either a QFull condition
5335 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5336 * out previously.
5337 */
5338 static void
5339 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5340 struct atio_from_isp *atio, uint16_t status, int qfull)
5341 {
5342 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5343 struct qla_hw_data *ha = vha->hw;
5344 struct fc_port *sess;
5345 struct se_session *se_sess;
5346 struct qla_tgt_cmd *cmd;
5347 int tag, cpu;
5348 unsigned long flags;
5349
5350 if (unlikely(tgt->tgt_stop)) {
5351 ql_dbg(ql_dbg_io, vha, 0x300a,
5352 "New command while device %p is shutting down\n", tgt);
5353 return;
5354 }
5355
5356 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5357 vha->hw->tgt.num_qfull_cmds_dropped++;
5358 if (vha->hw->tgt.num_qfull_cmds_dropped >
5359 vha->qla_stats.stat_max_qfull_cmds_dropped)
5360 vha->qla_stats.stat_max_qfull_cmds_dropped =
5361 vha->hw->tgt.num_qfull_cmds_dropped;
5362
5363 ql_dbg(ql_dbg_io, vha, 0x3068,
5364 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5365 vha->vp_idx, __func__,
5366 vha->hw->tgt.num_qfull_cmds_dropped);
5367
5368 qlt_chk_exch_leak_thresh_hold(vha);
5369 return;
5370 }
5371
5372 sess = ha->tgt.tgt_ops->find_sess_by_s_id
5373 (vha, atio->u.isp24.fcp_hdr.s_id);
5374 if (!sess)
5375 return;
5376
5377 se_sess = sess->se_sess;
5378
5379 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
5380 if (tag < 0) {
5381 ql_dbg(ql_dbg_io, vha, 0x3009,
5382 "qla_target(%d): %s: Allocation of cmd failed\n",
5383 vha->vp_idx, __func__);
5384
5385 vha->hw->tgt.num_qfull_cmds_dropped++;
5386 if (vha->hw->tgt.num_qfull_cmds_dropped >
5387 vha->qla_stats.stat_max_qfull_cmds_dropped)
5388 vha->qla_stats.stat_max_qfull_cmds_dropped =
5389 vha->hw->tgt.num_qfull_cmds_dropped;
5390
5391 qlt_chk_exch_leak_thresh_hold(vha);
5392 return;
5393 }
5394
5395 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
5396 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
5397
5398 qlt_incr_num_pend_cmds(vha);
5399 INIT_LIST_HEAD(&cmd->cmd_list);
5400 memcpy(&cmd->atio, atio, sizeof(*atio));
5401
5402 cmd->tgt = vha->vha_tgt.qla_tgt;
5403 cmd->vha = vha;
5404 cmd->reset_count = ha->base_qpair->chip_reset;
5405 cmd->q_full = 1;
5406 cmd->qpair = ha->base_qpair;
5407 cmd->se_cmd.map_cpu = cpu;
5408
5409 if (qfull) {
5410 cmd->q_full = 1;
5411 /* NOTE: borrowing the state field to carry the status */
5412 cmd->state = status;
5413 } else
5414 cmd->term_exchg = 1;
5415
5416 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5417 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5418
5419 vha->hw->tgt.num_qfull_cmds_alloc++;
5420 if (vha->hw->tgt.num_qfull_cmds_alloc >
5421 vha->qla_stats.stat_max_qfull_cmds_alloc)
5422 vha->qla_stats.stat_max_qfull_cmds_alloc =
5423 vha->hw->tgt.num_qfull_cmds_alloc;
5424 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5425 }
5426
5427 int
5428 qlt_free_qfull_cmds(struct qla_qpair *qpair)
5429 {
5430 struct scsi_qla_host *vha = qpair->vha;
5431 struct qla_hw_data *ha = vha->hw;
5432 unsigned long flags;
5433 struct qla_tgt_cmd *cmd, *tcmd;
5434 struct list_head free_list, q_full_list;
5435 int rc = 0;
5436
5437 if (list_empty(&ha->tgt.q_full_list))
5438 return 0;
5439
5440 INIT_LIST_HEAD(&free_list);
5441 INIT_LIST_HEAD(&q_full_list);
5442
5443 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5444 if (list_empty(&ha->tgt.q_full_list)) {
5445 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5446 return 0;
5447 }
5448
5449 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5450 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5451
5452 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5453 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5454 if (cmd->q_full)
5455 /* cmd->state is a borrowed field to hold status */
5456 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5457 else if (cmd->term_exchg)
5458 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5459
5460 if (rc == -ENOMEM)
5461 break;
5462
5463 if (cmd->q_full)
5464 ql_dbg(ql_dbg_io, vha, 0x3006,
5465 "%s: busy sent for ox_id[%04x]\n", __func__,
5466 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5467 else if (cmd->term_exchg)
5468 ql_dbg(ql_dbg_io, vha, 0x3007,
5469 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5470 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5471 else
5472 ql_dbg(ql_dbg_io, vha, 0x3008,
5473 "%s: Unexpected cmd in QFull list %p\n", __func__,
5474 cmd);
5475
5476 list_del(&cmd->cmd_list);
5477 list_add_tail(&cmd->cmd_list, &free_list);
5478
5479 /* piggy back on hardware_lock for protection */
5480 vha->hw->tgt.num_qfull_cmds_alloc--;
5481 }
5482 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5483
5484 cmd = NULL;
5485
5486 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5487 list_del(&cmd->cmd_list);
5488 /* This cmd was never sent to TCM. There is no need
5489 * to schedule free or call free_cmd
5490 */
5491 qlt_free_cmd(cmd);
5492 }
5493
5494 if (!list_empty(&q_full_list)) {
5495 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5496 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5497 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5498 }
5499
5500 return rc;
5501 }
5502
5503 static void
5504 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
5505 uint16_t status)
5506 {
5507 int rc = 0;
5508 struct scsi_qla_host *vha = qpair->vha;
5509
5510 rc = __qlt_send_busy(qpair, atio, status);
5511 if (rc == -ENOMEM)
5512 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5513 }
5514
5515 static int
5516 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5517 struct atio_from_isp *atio, uint8_t ha_locked)
5518 {
5519 struct qla_hw_data *ha = vha->hw;
5520 unsigned long flags;
5521
5522 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5523 return 0;
5524
5525 if (!ha_locked)
5526 spin_lock_irqsave(&ha->hardware_lock, flags);
5527 qlt_send_busy(qpair, atio, qla_sam_status);
5528 if (!ha_locked)
5529 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5530
5531 return 1;
5532 }
5533
5534 /* ha->hardware_lock supposed to be held on entry */
5535 /* called via callback from qla2xxx */
5536 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5537 struct atio_from_isp *atio, uint8_t ha_locked)
5538 {
5539 struct qla_hw_data *ha = vha->hw;
5540 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5541 int rc;
5542 unsigned long flags = 0;
5543
5544 if (unlikely(tgt == NULL)) {
5545 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5546 "ATIO pkt, but no tgt (ha %p)", ha);
5547 return;
5548 }
5549 /*
5550 * In tgt_stop mode we also should allow all requests to pass.
5551 * Otherwise, some commands can stuck.
5552 */
5553
5554 tgt->atio_irq_cmd_count++;
5555
5556 switch (atio->u.raw.entry_type) {
5557 case ATIO_TYPE7:
5558 if (unlikely(atio->u.isp24.exchange_addr ==
5559 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5560 ql_dbg(ql_dbg_io, vha, 0x3065,
5561 "qla_target(%d): ATIO_TYPE7 "
5562 "received with UNKNOWN exchange address, "
5563 "sending QUEUE_FULL\n", vha->vp_idx);
5564 if (!ha_locked)
5565 spin_lock_irqsave(&ha->hardware_lock, flags);
5566 qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5567 if (!ha_locked)
5568 spin_unlock_irqrestore(&ha->hardware_lock,
5569 flags);
5570 break;
5571 }
5572
5573 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5574 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5575 atio, ha_locked);
5576 if (rc != 0) {
5577 tgt->atio_irq_cmd_count--;
5578 return;
5579 }
5580 rc = qlt_handle_cmd_for_atio(vha, atio);
5581 } else {
5582 rc = qlt_handle_task_mgmt(vha, atio);
5583 }
5584 if (unlikely(rc != 0)) {
5585 if (!ha_locked)
5586 spin_lock_irqsave(&ha->hardware_lock, flags);
5587 switch (rc) {
5588 case -ENODEV:
5589 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5590 "qla_target: Unable to send command to target\n");
5591 break;
5592 case -EBADF:
5593 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5594 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5595 qlt_send_term_exchange(ha->base_qpair, NULL,
5596 atio, 1, 0);
5597 break;
5598 case -EBUSY:
5599 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5600 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5601 vha->vp_idx);
5602 qlt_send_busy(ha->base_qpair, atio,
5603 tc_sam_status);
5604 break;
5605 default:
5606 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5607 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5608 vha->vp_idx);
5609 qlt_send_busy(ha->base_qpair, atio,
5610 qla_sam_status);
5611 break;
5612 }
5613 if (!ha_locked)
5614 spin_unlock_irqrestore(&ha->hardware_lock,
5615 flags);
5616 }
5617 break;
5618
5619 case IMMED_NOTIFY_TYPE:
5620 {
5621 if (unlikely(atio->u.isp2x.entry_status != 0)) {
5622 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5623 "qla_target(%d): Received ATIO packet %x "
5624 "with error status %x\n", vha->vp_idx,
5625 atio->u.raw.entry_type,
5626 atio->u.isp2x.entry_status);
5627 break;
5628 }
5629 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5630
5631 if (!ha_locked)
5632 spin_lock_irqsave(&ha->hardware_lock, flags);
5633 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5634 if (!ha_locked)
5635 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5636 break;
5637 }
5638
5639 default:
5640 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5641 "qla_target(%d): Received unknown ATIO atio "
5642 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5643 break;
5644 }
5645
5646 tgt->atio_irq_cmd_count--;
5647 }
5648
5649 /*
5650 * qpair lock is assume to be held
5651 * rc = 0 : send terminate & abts respond
5652 * rc != 0: do not send term & abts respond
5653 */
5654 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5655 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
5656 {
5657 struct qla_hw_data *ha = vha->hw;
5658 int rc = 0;
5659
5660 /*
5661 * Detect unresolved exchange. If the same ABTS is unable
5662 * to terminate an existing command and the same ABTS loops
5663 * between FW & Driver, then force FW dump. Under 1 jiff,
5664 * we should see multiple loops.
5665 */
5666 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
5667 qpair->retry_term_jiff == jiffies) {
5668 /* found existing exchange */
5669 qpair->retry_term_cnt++;
5670 if (qpair->retry_term_cnt >= 5) {
5671 rc = EIO;
5672 qpair->retry_term_cnt = 0;
5673 ql_log(ql_log_warn, vha, 0xffff,
5674 "Unable to send ABTS Respond. Dumping firmware.\n");
5675 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
5676 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5677
5678 if (qpair == ha->base_qpair)
5679 ha->isp_ops->fw_dump(vha, 1);
5680 else
5681 ha->isp_ops->fw_dump(vha, 0);
5682
5683 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5684 qla2xxx_wake_dpc(vha);
5685 }
5686 } else if (qpair->retry_term_jiff != jiffies) {
5687 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
5688 qpair->retry_term_cnt = 0;
5689 qpair->retry_term_jiff = jiffies;
5690 }
5691
5692 return rc;
5693 }
5694
5695
5696 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5697 struct rsp_que *rsp, response_t *pkt)
5698 {
5699 struct abts_resp_from_24xx_fw *entry =
5700 (struct abts_resp_from_24xx_fw *)pkt;
5701 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
5702 struct qla_tgt_mgmt_cmd *mcmd;
5703 struct qla_hw_data *ha = vha->hw;
5704
5705 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5706 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
5707 ql_dbg(ql_dbg_async, vha, 0xe064,
5708 "qla_target(%d): ABTS Comp without mcmd\n",
5709 vha->vp_idx);
5710 return;
5711 }
5712
5713 if (mcmd)
5714 vha = mcmd->vha;
5715 vha->vha_tgt.qla_tgt->abts_resp_expected--;
5716
5717 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5718 "ABTS_RESP_24XX: compl_status %x\n",
5719 entry->compl_status);
5720
5721 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
5722 if ((entry->error_subcode1 == 0x1E) &&
5723 (entry->error_subcode2 == 0)) {
5724 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5725 ha->tgt.tgt_ops->free_mcmd(mcmd);
5726 return;
5727 }
5728 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5729 pkt, mcmd);
5730 } else {
5731 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5732 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5733 vha->vp_idx, entry->compl_status,
5734 entry->error_subcode1,
5735 entry->error_subcode2);
5736 ha->tgt.tgt_ops->free_mcmd(mcmd);
5737 }
5738 } else if (mcmd) {
5739 ha->tgt.tgt_ops->free_mcmd(mcmd);
5740 }
5741 }
5742
5743 /* ha->hardware_lock supposed to be held on entry */
5744 /* called via callback from qla2xxx */
5745 static void qlt_response_pkt(struct scsi_qla_host *vha,
5746 struct rsp_que *rsp, response_t *pkt)
5747 {
5748 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5749
5750 if (unlikely(tgt == NULL)) {
5751 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5752 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5753 vha->vp_idx, pkt->entry_type, vha->hw);
5754 return;
5755 }
5756
5757 /*
5758 * In tgt_stop mode we also should allow all requests to pass.
5759 * Otherwise, some commands can stuck.
5760 */
5761
5762 switch (pkt->entry_type) {
5763 case CTIO_CRC2:
5764 case CTIO_TYPE7:
5765 {
5766 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5767
5768 qlt_do_ctio_completion(vha, rsp, entry->handle,
5769 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5770 entry);
5771 break;
5772 }
5773
5774 case ACCEPT_TGT_IO_TYPE:
5775 {
5776 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5777 int rc;
5778
5779 if (atio->u.isp2x.status !=
5780 cpu_to_le16(ATIO_CDB_VALID)) {
5781 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5782 "qla_target(%d): ATIO with error "
5783 "status %x received\n", vha->vp_idx,
5784 le16_to_cpu(atio->u.isp2x.status));
5785 break;
5786 }
5787
5788 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5789 if (rc != 0)
5790 return;
5791
5792 rc = qlt_handle_cmd_for_atio(vha, atio);
5793 if (unlikely(rc != 0)) {
5794 switch (rc) {
5795 case -ENODEV:
5796 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5797 "qla_target: Unable to send command to target\n");
5798 break;
5799 case -EBADF:
5800 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5801 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5802 qlt_send_term_exchange(rsp->qpair, NULL,
5803 atio, 1, 0);
5804 break;
5805 case -EBUSY:
5806 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5807 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5808 vha->vp_idx);
5809 qlt_send_busy(rsp->qpair, atio,
5810 tc_sam_status);
5811 break;
5812 default:
5813 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5814 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5815 vha->vp_idx);
5816 qlt_send_busy(rsp->qpair, atio,
5817 qla_sam_status);
5818 break;
5819 }
5820 }
5821 }
5822 break;
5823
5824 case CONTINUE_TGT_IO_TYPE:
5825 {
5826 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5827
5828 qlt_do_ctio_completion(vha, rsp, entry->handle,
5829 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5830 entry);
5831 break;
5832 }
5833
5834 case CTIO_A64_TYPE:
5835 {
5836 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5837
5838 qlt_do_ctio_completion(vha, rsp, entry->handle,
5839 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5840 entry);
5841 break;
5842 }
5843
5844 case IMMED_NOTIFY_TYPE:
5845 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5846 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5847 break;
5848
5849 case NOTIFY_ACK_TYPE:
5850 if (tgt->notify_ack_expected > 0) {
5851 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5852
5853 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5854 "NOTIFY_ACK seq %08x status %x\n",
5855 le16_to_cpu(entry->u.isp2x.seq_id),
5856 le16_to_cpu(entry->u.isp2x.status));
5857 tgt->notify_ack_expected--;
5858 if (entry->u.isp2x.status !=
5859 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5860 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5861 "qla_target(%d): NOTIFY_ACK "
5862 "failed %x\n", vha->vp_idx,
5863 le16_to_cpu(entry->u.isp2x.status));
5864 }
5865 } else {
5866 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5867 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5868 vha->vp_idx);
5869 }
5870 break;
5871
5872 case ABTS_RECV_24XX:
5873 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5874 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5875 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5876 break;
5877
5878 case ABTS_RESP_24XX:
5879 if (tgt->abts_resp_expected > 0) {
5880 qlt_handle_abts_completion(vha, rsp, pkt);
5881 } else {
5882 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5883 "qla_target(%d): Unexpected ABTS_RESP_24XX "
5884 "received\n", vha->vp_idx);
5885 }
5886 break;
5887
5888 default:
5889 ql_dbg(ql_dbg_tgt, vha, 0xe065,
5890 "qla_target(%d): Received unknown response pkt "
5891 "type %x\n", vha->vp_idx, pkt->entry_type);
5892 break;
5893 }
5894
5895 }
5896
5897 /*
5898 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5899 */
5900 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5901 uint16_t *mailbox)
5902 {
5903 struct qla_hw_data *ha = vha->hw;
5904 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5905 int login_code;
5906
5907 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
5908 return;
5909
5910 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5911 IS_QLA2100(ha))
5912 return;
5913 /*
5914 * In tgt_stop mode we also should allow all requests to pass.
5915 * Otherwise, some commands can stuck.
5916 */
5917
5918
5919 switch (code) {
5920 case MBA_RESET: /* Reset */
5921 case MBA_SYSTEM_ERR: /* System Error */
5922 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
5923 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
5924 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5925 "qla_target(%d): System error async event %#x "
5926 "occurred", vha->vp_idx, code);
5927 break;
5928 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
5929 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5930 break;
5931
5932 case MBA_LOOP_UP:
5933 {
5934 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5935 "qla_target(%d): Async LOOP_UP occurred "
5936 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5937 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5938 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5939 if (tgt->link_reinit_iocb_pending) {
5940 qlt_send_notify_ack(ha->base_qpair,
5941 (void *)&tgt->link_reinit_iocb,
5942 0, 0, 0, 0, 0, 0);
5943 tgt->link_reinit_iocb_pending = 0;
5944 }
5945 break;
5946 }
5947
5948 case MBA_LIP_OCCURRED:
5949 case MBA_LOOP_DOWN:
5950 case MBA_LIP_RESET:
5951 case MBA_RSCN_UPDATE:
5952 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5953 "qla_target(%d): Async event %#x occurred "
5954 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5955 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5956 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5957 break;
5958
5959 case MBA_REJECTED_FCP_CMD:
5960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
5961 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
5962 vha->vp_idx,
5963 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5964 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5965
5966 if (le16_to_cpu(mailbox[3]) == 1) {
5967 /* exchange starvation. */
5968 vha->hw->exch_starvation++;
5969 if (vha->hw->exch_starvation > 5) {
5970 ql_log(ql_log_warn, vha, 0xd03a,
5971 "Exchange starvation-. Resetting RISC\n");
5972
5973 vha->hw->exch_starvation = 0;
5974 if (IS_P3P_TYPE(vha->hw))
5975 set_bit(FCOE_CTX_RESET_NEEDED,
5976 &vha->dpc_flags);
5977 else
5978 set_bit(ISP_ABORT_NEEDED,
5979 &vha->dpc_flags);
5980 qla2xxx_wake_dpc(vha);
5981 }
5982 }
5983 break;
5984
5985 case MBA_PORT_UPDATE:
5986 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5987 "qla_target(%d): Port update async event %#x "
5988 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5989 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5990 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5991 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5992
5993 login_code = le16_to_cpu(mailbox[2]);
5994 if (login_code == 0x4) {
5995 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5996 "Async MB 2: Got PLOGI Complete\n");
5997 vha->hw->exch_starvation = 0;
5998 } else if (login_code == 0x7)
5999 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
6000 "Async MB 2: Port Logged Out\n");
6001 break;
6002 default:
6003 break;
6004 }
6005
6006 }
6007
6008 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6009 uint16_t loop_id)
6010 {
6011 fc_port_t *fcport, *tfcp, *del;
6012 int rc;
6013 unsigned long flags;
6014 u8 newfcport = 0;
6015
6016 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6017 if (!fcport) {
6018 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6019 "qla_target(%d): Allocation of tmp FC port failed",
6020 vha->vp_idx);
6021 return NULL;
6022 }
6023
6024 fcport->loop_id = loop_id;
6025
6026 rc = qla24xx_gpdb_wait(vha, fcport, 0);
6027 if (rc != QLA_SUCCESS) {
6028 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6029 "qla_target(%d): Failed to retrieve fcport "
6030 "information -- get_port_database() returned %x "
6031 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6032 kfree(fcport);
6033 return NULL;
6034 }
6035
6036 del = NULL;
6037 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6038 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6039
6040 if (tfcp) {
6041 tfcp->d_id = fcport->d_id;
6042 tfcp->port_type = fcport->port_type;
6043 tfcp->supported_classes = fcport->supported_classes;
6044 tfcp->flags |= fcport->flags;
6045 tfcp->scan_state = QLA_FCPORT_FOUND;
6046
6047 del = fcport;
6048 fcport = tfcp;
6049 } else {
6050 if (vha->hw->current_topology == ISP_CFG_F)
6051 fcport->flags |= FCF_FABRIC_DEVICE;
6052
6053 list_add_tail(&fcport->list, &vha->vp_fcports);
6054 if (!IS_SW_RESV_ADDR(fcport->d_id))
6055 vha->fcport_count++;
6056 fcport->login_gen++;
6057 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
6058 fcport->login_succ = 1;
6059 newfcport = 1;
6060 }
6061
6062 fcport->deleted = 0;
6063 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6064
6065 switch (vha->host->active_mode) {
6066 case MODE_INITIATOR:
6067 case MODE_DUAL:
6068 if (newfcport) {
6069 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6070 qla24xx_sched_upd_fcport(fcport);
6071 } else {
6072 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6073 "%s %d %8phC post gpsc fcp_cnt %d\n",
6074 __func__, __LINE__, fcport->port_name, vha->fcport_count);
6075 qla24xx_post_gpsc_work(vha, fcport);
6076 }
6077 }
6078 break;
6079
6080 case MODE_TARGET:
6081 default:
6082 break;
6083 }
6084 if (del)
6085 qla2x00_free_fcport(del);
6086
6087 return fcport;
6088 }
6089
6090 /* Must be called under tgt_mutex */
6091 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6092 be_id_t s_id)
6093 {
6094 struct fc_port *sess = NULL;
6095 fc_port_t *fcport = NULL;
6096 int rc, global_resets;
6097 uint16_t loop_id = 0;
6098
6099 if (s_id.domain == 0xFF && s_id.area == 0xFC) {
6100 /*
6101 * This is Domain Controller, so it should be
6102 * OK to drop SCSI commands from it.
6103 */
6104 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6105 "Unable to find initiator with S_ID %x:%x:%x",
6106 s_id.domain, s_id.area, s_id.al_pa);
6107 return NULL;
6108 }
6109
6110 mutex_lock(&vha->vha_tgt.tgt_mutex);
6111
6112 retry:
6113 global_resets =
6114 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6115
6116 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6117 if (rc != 0) {
6118 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6119
6120 ql_log(ql_log_info, vha, 0xf071,
6121 "qla_target(%d): Unable to find "
6122 "initiator with S_ID %x:%x:%x",
6123 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6124
6125 if (rc == -ENOENT) {
6126 qlt_port_logo_t logo;
6127
6128 logo.id = be_to_port_id(s_id);
6129 logo.cmd_count = 1;
6130 qlt_send_first_logo(vha, &logo);
6131 }
6132
6133 return NULL;
6134 }
6135
6136 fcport = qlt_get_port_database(vha, loop_id);
6137 if (!fcport) {
6138 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6139 return NULL;
6140 }
6141
6142 if (global_resets !=
6143 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6144 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6145 "qla_target(%d): global reset during session discovery "
6146 "(counter was %d, new %d), retrying", vha->vp_idx,
6147 global_resets,
6148 atomic_read(&vha->vha_tgt.
6149 qla_tgt->tgt_global_resets_count));
6150 goto retry;
6151 }
6152
6153 sess = qlt_create_sess(vha, fcport, true);
6154
6155 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6156
6157 return sess;
6158 }
6159
6160 static void qlt_abort_work(struct qla_tgt *tgt,
6161 struct qla_tgt_sess_work_param *prm)
6162 {
6163 struct scsi_qla_host *vha = tgt->vha;
6164 struct qla_hw_data *ha = vha->hw;
6165 struct fc_port *sess = NULL;
6166 unsigned long flags = 0, flags2 = 0;
6167 be_id_t s_id;
6168 int rc;
6169
6170 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6171
6172 if (tgt->tgt_stop)
6173 goto out_term2;
6174
6175 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
6176
6177 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6178 if (!sess) {
6179 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6180
6181 sess = qlt_make_local_sess(vha, s_id);
6182 /* sess has got an extra creation ref */
6183
6184 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6185 if (!sess)
6186 goto out_term2;
6187 } else {
6188 if (sess->deleted) {
6189 sess = NULL;
6190 goto out_term2;
6191 }
6192
6193 if (!kref_get_unless_zero(&sess->sess_kref)) {
6194 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6195 "%s: kref_get fail %8phC \n",
6196 __func__, sess->port_name);
6197 sess = NULL;
6198 goto out_term2;
6199 }
6200 }
6201
6202 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6203 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6204
6205 ha->tgt.tgt_ops->put_sess(sess);
6206
6207 if (rc != 0)
6208 goto out_term;
6209 return;
6210
6211 out_term2:
6212 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6213
6214 out_term:
6215 spin_lock_irqsave(&ha->hardware_lock, flags);
6216 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
6217 FCP_TMF_REJECTED, false);
6218 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6219 }
6220
6221 static void qlt_tmr_work(struct qla_tgt *tgt,
6222 struct qla_tgt_sess_work_param *prm)
6223 {
6224 struct atio_from_isp *a = &prm->tm_iocb2;
6225 struct scsi_qla_host *vha = tgt->vha;
6226 struct qla_hw_data *ha = vha->hw;
6227 struct fc_port *sess;
6228 unsigned long flags;
6229 be_id_t s_id;
6230 int rc;
6231 u64 unpacked_lun;
6232 int fn;
6233 void *iocb;
6234
6235 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6236
6237 if (tgt->tgt_stop)
6238 goto out_term2;
6239
6240 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
6241 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6242 if (!sess) {
6243 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6244
6245 sess = qlt_make_local_sess(vha, s_id);
6246 /* sess has got an extra creation ref */
6247
6248 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6249 if (!sess)
6250 goto out_term2;
6251 } else {
6252 if (sess->deleted) {
6253 goto out_term2;
6254 }
6255
6256 if (!kref_get_unless_zero(&sess->sess_kref)) {
6257 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6258 "%s: kref_get fail %8phC\n",
6259 __func__, sess->port_name);
6260 goto out_term2;
6261 }
6262 }
6263
6264 iocb = a;
6265 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6266 unpacked_lun =
6267 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6268
6269 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6270 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6271
6272 ha->tgt.tgt_ops->put_sess(sess);
6273
6274 if (rc != 0)
6275 goto out_term;
6276 return;
6277
6278 out_term2:
6279 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6280 out_term:
6281 qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6282 }
6283
6284 static void qlt_sess_work_fn(struct work_struct *work)
6285 {
6286 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6287 struct scsi_qla_host *vha = tgt->vha;
6288 unsigned long flags;
6289
6290 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6291
6292 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6293 while (!list_empty(&tgt->sess_works_list)) {
6294 struct qla_tgt_sess_work_param *prm = list_entry(
6295 tgt->sess_works_list.next, typeof(*prm),
6296 sess_works_list_entry);
6297
6298 /*
6299 * This work can be scheduled on several CPUs at time, so we
6300 * must delete the entry to eliminate double processing
6301 */
6302 list_del(&prm->sess_works_list_entry);
6303
6304 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6305
6306 switch (prm->type) {
6307 case QLA_TGT_SESS_WORK_ABORT:
6308 qlt_abort_work(tgt, prm);
6309 break;
6310 case QLA_TGT_SESS_WORK_TM:
6311 qlt_tmr_work(tgt, prm);
6312 break;
6313 default:
6314 BUG_ON(1);
6315 break;
6316 }
6317
6318 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6319
6320 kfree(prm);
6321 }
6322 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6323 }
6324
6325 /* Must be called under tgt_host_action_mutex */
6326 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6327 {
6328 struct qla_tgt *tgt;
6329 int rc, i;
6330 struct qla_qpair_hint *h;
6331
6332 if (!QLA_TGT_MODE_ENABLED())
6333 return 0;
6334
6335 if (!IS_TGT_MODE_CAPABLE(ha)) {
6336 ql_log(ql_log_warn, base_vha, 0xe070,
6337 "This adapter does not support target mode.\n");
6338 return 0;
6339 }
6340
6341 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6342 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6343
6344 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6345
6346 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6347 if (!tgt) {
6348 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6349 "Unable to allocate struct qla_tgt\n");
6350 return -ENOMEM;
6351 }
6352
6353 tgt->qphints = kcalloc(ha->max_qpairs + 1,
6354 sizeof(struct qla_qpair_hint),
6355 GFP_KERNEL);
6356 if (!tgt->qphints) {
6357 kfree(tgt);
6358 ql_log(ql_log_warn, base_vha, 0x0197,
6359 "Unable to allocate qpair hints.\n");
6360 return -ENOMEM;
6361 }
6362
6363 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6364 base_vha->host->hostt->supported_mode |= MODE_TARGET;
6365
6366 rc = btree_init64(&tgt->lun_qpair_map);
6367 if (rc) {
6368 kfree(tgt->qphints);
6369 kfree(tgt);
6370 ql_log(ql_log_info, base_vha, 0x0198,
6371 "Unable to initialize lun_qpair_map btree\n");
6372 return -EIO;
6373 }
6374 h = &tgt->qphints[0];
6375 h->qpair = ha->base_qpair;
6376 INIT_LIST_HEAD(&h->hint_elem);
6377 h->cpuid = ha->base_qpair->cpuid;
6378 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
6379
6380 for (i = 0; i < ha->max_qpairs; i++) {
6381 unsigned long flags;
6382
6383 struct qla_qpair *qpair = ha->queue_pair_map[i];
6384
6385 h = &tgt->qphints[i + 1];
6386 INIT_LIST_HEAD(&h->hint_elem);
6387 if (qpair) {
6388 h->qpair = qpair;
6389 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
6390 list_add_tail(&h->hint_elem, &qpair->hints_list);
6391 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6392 h->cpuid = qpair->cpuid;
6393 }
6394 }
6395
6396 tgt->ha = ha;
6397 tgt->vha = base_vha;
6398 init_waitqueue_head(&tgt->waitQ);
6399 INIT_LIST_HEAD(&tgt->del_sess_list);
6400 spin_lock_init(&tgt->sess_work_lock);
6401 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6402 INIT_LIST_HEAD(&tgt->sess_works_list);
6403 atomic_set(&tgt->tgt_global_resets_count, 0);
6404
6405 base_vha->vha_tgt.qla_tgt = tgt;
6406
6407 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6408 "qla_target(%d): using 64 Bit PCI addressing",
6409 base_vha->vp_idx);
6410 /* 3 is reserved */
6411 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6412
6413 mutex_lock(&qla_tgt_mutex);
6414 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6415 mutex_unlock(&qla_tgt_mutex);
6416
6417 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6418 ha->tgt.tgt_ops->add_target(base_vha);
6419
6420 return 0;
6421 }
6422
6423 /* Must be called under tgt_host_action_mutex */
6424 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6425 {
6426 if (!vha->vha_tgt.qla_tgt)
6427 return 0;
6428
6429 if (vha->fc_vport) {
6430 qlt_release(vha->vha_tgt.qla_tgt);
6431 return 0;
6432 }
6433
6434 /* free left over qfull cmds */
6435 qlt_init_term_exchange(vha);
6436
6437 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6438 vha->host_no, ha);
6439 qlt_release(vha->vha_tgt.qla_tgt);
6440
6441 return 0;
6442 }
6443
6444 void qlt_remove_target_resources(struct qla_hw_data *ha)
6445 {
6446 struct scsi_qla_host *node;
6447 u32 key = 0;
6448
6449 btree_for_each_safe32(&ha->tgt.host_map, key, node)
6450 btree_remove32(&ha->tgt.host_map, key);
6451
6452 btree_destroy32(&ha->tgt.host_map);
6453 }
6454
6455 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6456 unsigned char *b)
6457 {
6458 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6459 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6460 put_unaligned_be64(wwpn, b);
6461 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
6462 }
6463
6464 /**
6465 * qla_tgt_lport_register - register lport with external module
6466 *
6467 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6468 * @phys_wwpn: physical port WWPN
6469 * @npiv_wwpn: NPIV WWPN
6470 * @npiv_wwnn: NPIV WWNN
6471 * @callback: lport initialization callback for tcm_qla2xxx code
6472 */
6473 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6474 u64 npiv_wwpn, u64 npiv_wwnn,
6475 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6476 {
6477 struct qla_tgt *tgt;
6478 struct scsi_qla_host *vha;
6479 struct qla_hw_data *ha;
6480 struct Scsi_Host *host;
6481 unsigned long flags;
6482 int rc;
6483 u8 b[WWN_SIZE];
6484
6485 mutex_lock(&qla_tgt_mutex);
6486 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6487 vha = tgt->vha;
6488 ha = vha->hw;
6489
6490 host = vha->host;
6491 if (!host)
6492 continue;
6493
6494 if (!(host->hostt->supported_mode & MODE_TARGET))
6495 continue;
6496
6497 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6498 continue;
6499
6500 spin_lock_irqsave(&ha->hardware_lock, flags);
6501 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6502 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6503 host->host_no);
6504 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6505 continue;
6506 }
6507 if (tgt->tgt_stop) {
6508 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6509 host->host_no);
6510 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6511 continue;
6512 }
6513 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6514
6515 if (!scsi_host_get(host)) {
6516 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6517 "Unable to scsi_host_get() for"
6518 " qla2xxx scsi_host\n");
6519 continue;
6520 }
6521 qlt_lport_dump(vha, phys_wwpn, b);
6522
6523 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6524 scsi_host_put(host);
6525 continue;
6526 }
6527 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6528 if (rc != 0)
6529 scsi_host_put(host);
6530
6531 mutex_unlock(&qla_tgt_mutex);
6532 return rc;
6533 }
6534 mutex_unlock(&qla_tgt_mutex);
6535
6536 return -ENODEV;
6537 }
6538 EXPORT_SYMBOL(qlt_lport_register);
6539
6540 /**
6541 * qla_tgt_lport_deregister - Degister lport
6542 *
6543 * @vha: Registered scsi_qla_host pointer
6544 */
6545 void qlt_lport_deregister(struct scsi_qla_host *vha)
6546 {
6547 struct qla_hw_data *ha = vha->hw;
6548 struct Scsi_Host *sh = vha->host;
6549 /*
6550 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6551 */
6552 vha->vha_tgt.target_lport_ptr = NULL;
6553 ha->tgt.tgt_ops = NULL;
6554 /*
6555 * Release the Scsi_Host reference for the underlying qla2xxx host
6556 */
6557 scsi_host_put(sh);
6558 }
6559 EXPORT_SYMBOL(qlt_lport_deregister);
6560
6561 /* Must be called under HW lock */
6562 void qlt_set_mode(struct scsi_qla_host *vha)
6563 {
6564 switch (vha->qlini_mode) {
6565 case QLA2XXX_INI_MODE_DISABLED:
6566 case QLA2XXX_INI_MODE_EXCLUSIVE:
6567 vha->host->active_mode = MODE_TARGET;
6568 break;
6569 case QLA2XXX_INI_MODE_ENABLED:
6570 vha->host->active_mode = MODE_INITIATOR;
6571 break;
6572 case QLA2XXX_INI_MODE_DUAL:
6573 vha->host->active_mode = MODE_DUAL;
6574 break;
6575 default:
6576 break;
6577 }
6578 }
6579
6580 /* Must be called under HW lock */
6581 static void qlt_clear_mode(struct scsi_qla_host *vha)
6582 {
6583 switch (vha->qlini_mode) {
6584 case QLA2XXX_INI_MODE_DISABLED:
6585 vha->host->active_mode = MODE_UNKNOWN;
6586 break;
6587 case QLA2XXX_INI_MODE_EXCLUSIVE:
6588 vha->host->active_mode = MODE_INITIATOR;
6589 break;
6590 case QLA2XXX_INI_MODE_ENABLED:
6591 case QLA2XXX_INI_MODE_DUAL:
6592 vha->host->active_mode = MODE_INITIATOR;
6593 break;
6594 default:
6595 break;
6596 }
6597 }
6598
6599 /*
6600 * qla_tgt_enable_vha - NO LOCK HELD
6601 *
6602 * host_reset, bring up w/ Target Mode Enabled
6603 */
6604 void
6605 qlt_enable_vha(struct scsi_qla_host *vha)
6606 {
6607 struct qla_hw_data *ha = vha->hw;
6608 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6609 unsigned long flags;
6610 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6611
6612 if (!tgt) {
6613 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6614 "Unable to locate qla_tgt pointer from"
6615 " struct qla_hw_data\n");
6616 dump_stack();
6617 return;
6618 }
6619 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6620 return;
6621
6622 if (ha->tgt.num_act_qpairs > ha->max_qpairs)
6623 ha->tgt.num_act_qpairs = ha->max_qpairs;
6624 spin_lock_irqsave(&ha->hardware_lock, flags);
6625 tgt->tgt_stopped = 0;
6626 qlt_set_mode(vha);
6627 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6628
6629 mutex_lock(&ha->optrom_mutex);
6630 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6631 "%s.\n", __func__);
6632 if (vha->vp_idx) {
6633 qla24xx_disable_vp(vha);
6634 qla24xx_enable_vp(vha);
6635 } else {
6636 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6637 qla2xxx_wake_dpc(base_vha);
6638 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
6639 QLA_SUCCESS);
6640 }
6641 mutex_unlock(&ha->optrom_mutex);
6642 }
6643 EXPORT_SYMBOL(qlt_enable_vha);
6644
6645 /*
6646 * qla_tgt_disable_vha - NO LOCK HELD
6647 *
6648 * Disable Target Mode and reset the adapter
6649 */
6650 static void qlt_disable_vha(struct scsi_qla_host *vha)
6651 {
6652 struct qla_hw_data *ha = vha->hw;
6653 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6654 unsigned long flags;
6655
6656 if (!tgt) {
6657 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6658 "Unable to locate qla_tgt pointer from"
6659 " struct qla_hw_data\n");
6660 dump_stack();
6661 return;
6662 }
6663
6664 spin_lock_irqsave(&ha->hardware_lock, flags);
6665 qlt_clear_mode(vha);
6666 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6667
6668 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6669 qla2xxx_wake_dpc(vha);
6670 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6671 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6672 "qla2x00_wait_for_hba_online() failed\n");
6673 }
6674
6675 /*
6676 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6677 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6678 * members.
6679 */
6680 void
6681 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6682 {
6683 vha->vha_tgt.qla_tgt = NULL;
6684
6685 mutex_init(&vha->vha_tgt.tgt_mutex);
6686 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6687
6688 qlt_clear_mode(vha);
6689
6690 /*
6691 * NOTE: Currently the value is kept the same for <24xx and
6692 * >=24xx ISPs. If it is necessary to change it,
6693 * the check should be added for specific ISPs,
6694 * assigning the value appropriately.
6695 */
6696 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6697
6698 qlt_add_target(ha, vha);
6699 }
6700
6701 u8
6702 qlt_rff_id(struct scsi_qla_host *vha)
6703 {
6704 u8 fc4_feature = 0;
6705 /*
6706 * FC-4 Feature bit 0 indicates target functionality to the name server.
6707 */
6708 if (qla_tgt_mode_enabled(vha)) {
6709 fc4_feature = BIT_0;
6710 } else if (qla_ini_mode_enabled(vha)) {
6711 fc4_feature = BIT_1;
6712 } else if (qla_dual_mode_enabled(vha))
6713 fc4_feature = BIT_0 | BIT_1;
6714
6715 return fc4_feature;
6716 }
6717
6718 /*
6719 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6720 * @ha: HA context
6721 *
6722 * Beginning of ATIO ring has initialization control block already built
6723 * by nvram config routine.
6724 *
6725 * Returns 0 on success.
6726 */
6727 void
6728 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6729 {
6730 struct qla_hw_data *ha = vha->hw;
6731 uint16_t cnt;
6732 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6733
6734 if (qla_ini_mode_enabled(vha))
6735 return;
6736
6737 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6738 pkt->u.raw.signature = ATIO_PROCESSED;
6739 pkt++;
6740 }
6741
6742 }
6743
6744 /*
6745 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6746 * @ha: SCSI driver HA context
6747 */
6748 void
6749 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6750 {
6751 struct qla_hw_data *ha = vha->hw;
6752 struct atio_from_isp *pkt;
6753 int cnt, i;
6754
6755 if (!ha->flags.fw_started)
6756 return;
6757
6758 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6759 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6760 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6761 cnt = pkt->u.raw.entry_count;
6762
6763 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6764 /*
6765 * This packet is corrupted. The header + payload
6766 * can not be trusted. There is no point in passing
6767 * it further up.
6768 */
6769 ql_log(ql_log_warn, vha, 0xd03c,
6770 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6771 &pkt->u.isp24.fcp_hdr.s_id,
6772 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6773 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
6774
6775 adjust_corrupted_atio(pkt);
6776 qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
6777 ha_locked, 0);
6778 } else {
6779 qlt_24xx_atio_pkt_all_vps(vha,
6780 (struct atio_from_isp *)pkt, ha_locked);
6781 }
6782
6783 for (i = 0; i < cnt; i++) {
6784 ha->tgt.atio_ring_index++;
6785 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6786 ha->tgt.atio_ring_index = 0;
6787 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6788 } else
6789 ha->tgt.atio_ring_ptr++;
6790
6791 pkt->u.raw.signature = ATIO_PROCESSED;
6792 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6793 }
6794 wmb();
6795 }
6796
6797 /* Adjust ring index */
6798 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6799 }
6800
6801 void
6802 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6803 {
6804 struct qla_hw_data *ha = vha->hw;
6805 struct qla_msix_entry *msix = &ha->msix_entries[2];
6806 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6807
6808 if (!QLA_TGT_MODE_ENABLED())
6809 return;
6810
6811 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
6812 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6813 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6814
6815 if (ha->flags.msix_enabled) {
6816 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6817 if (IS_QLA2071(ha)) {
6818 /* 4 ports Baker: Enable Interrupt Handshake */
6819 icb->msix_atio = 0;
6820 icb->firmware_options_2 |= BIT_26;
6821 } else {
6822 icb->msix_atio = cpu_to_le16(msix->entry);
6823 icb->firmware_options_2 &= ~BIT_26;
6824 }
6825 ql_dbg(ql_dbg_init, vha, 0xf072,
6826 "Registering ICB vector 0x%x for atio que.\n",
6827 msix->entry);
6828 }
6829 } else {
6830 /* INTx|MSI */
6831 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6832 icb->msix_atio = 0;
6833 icb->firmware_options_2 |= BIT_26;
6834 ql_dbg(ql_dbg_init, vha, 0xf072,
6835 "%s: Use INTx for ATIOQ.\n", __func__);
6836 }
6837 }
6838 }
6839
6840 void
6841 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6842 {
6843 struct qla_hw_data *ha = vha->hw;
6844 u32 tmp;
6845
6846 if (!QLA_TGT_MODE_ENABLED())
6847 return;
6848
6849 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6850 if (!ha->tgt.saved_set) {
6851 /* We save only once */
6852 ha->tgt.saved_exchange_count = nv->exchange_count;
6853 ha->tgt.saved_firmware_options_1 =
6854 nv->firmware_options_1;
6855 ha->tgt.saved_firmware_options_2 =
6856 nv->firmware_options_2;
6857 ha->tgt.saved_firmware_options_3 =
6858 nv->firmware_options_3;
6859 ha->tgt.saved_set = 1;
6860 }
6861
6862 if (qla_tgt_mode_enabled(vha))
6863 nv->exchange_count = cpu_to_le16(0xFFFF);
6864 else /* dual */
6865 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6866
6867 /* Enable target mode */
6868 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6869
6870 /* Disable ini mode, if requested */
6871 if (qla_tgt_mode_enabled(vha))
6872 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6873
6874 /* Disable Full Login after LIP */
6875 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6876 /* Enable initial LIP */
6877 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6878 if (ql2xtgt_tape_enable)
6879 /* Enable FC Tape support */
6880 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6881 else
6882 /* Disable FC Tape support */
6883 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6884
6885 /* Disable Full Login after LIP */
6886 nv->host_p &= cpu_to_le32(~BIT_10);
6887
6888 /*
6889 * clear BIT 15 explicitly as we have seen at least
6890 * a couple of instances where this was set and this
6891 * was causing the firmware to not be initialized.
6892 */
6893 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6894 /* Enable target PRLI control */
6895 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6896
6897 if (IS_QLA25XX(ha)) {
6898 /* Change Loop-prefer to Pt-Pt */
6899 tmp = ~(BIT_4|BIT_5|BIT_6);
6900 nv->firmware_options_2 &= cpu_to_le32(tmp);
6901 tmp = P2P << 4;
6902 nv->firmware_options_2 |= cpu_to_le32(tmp);
6903 }
6904 } else {
6905 if (ha->tgt.saved_set) {
6906 nv->exchange_count = ha->tgt.saved_exchange_count;
6907 nv->firmware_options_1 =
6908 ha->tgt.saved_firmware_options_1;
6909 nv->firmware_options_2 =
6910 ha->tgt.saved_firmware_options_2;
6911 nv->firmware_options_3 =
6912 ha->tgt.saved_firmware_options_3;
6913 }
6914 return;
6915 }
6916
6917 if (ha->base_qpair->enable_class_2) {
6918 if (vha->flags.init_done)
6919 fc_host_supported_classes(vha->host) =
6920 FC_COS_CLASS2 | FC_COS_CLASS3;
6921
6922 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6923 } else {
6924 if (vha->flags.init_done)
6925 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6926
6927 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6928 }
6929 }
6930
6931 void
6932 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6933 struct init_cb_24xx *icb)
6934 {
6935 struct qla_hw_data *ha = vha->hw;
6936
6937 if (!QLA_TGT_MODE_ENABLED())
6938 return;
6939
6940 if (ha->tgt.node_name_set) {
6941 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6942 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6943 }
6944 }
6945
6946 void
6947 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6948 {
6949 struct qla_hw_data *ha = vha->hw;
6950 u32 tmp;
6951
6952 if (!QLA_TGT_MODE_ENABLED())
6953 return;
6954
6955 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6956 if (!ha->tgt.saved_set) {
6957 /* We save only once */
6958 ha->tgt.saved_exchange_count = nv->exchange_count;
6959 ha->tgt.saved_firmware_options_1 =
6960 nv->firmware_options_1;
6961 ha->tgt.saved_firmware_options_2 =
6962 nv->firmware_options_2;
6963 ha->tgt.saved_firmware_options_3 =
6964 nv->firmware_options_3;
6965 ha->tgt.saved_set = 1;
6966 }
6967
6968 if (qla_tgt_mode_enabled(vha))
6969 nv->exchange_count = cpu_to_le16(0xFFFF);
6970 else /* dual */
6971 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6972
6973 /* Enable target mode */
6974 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6975
6976 /* Disable ini mode, if requested */
6977 if (qla_tgt_mode_enabled(vha))
6978 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6979 /* Disable Full Login after LIP */
6980 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6981 /* Enable initial LIP */
6982 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6983 /*
6984 * clear BIT 15 explicitly as we have seen at
6985 * least a couple of instances where this was set
6986 * and this was causing the firmware to not be
6987 * initialized.
6988 */
6989 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6990 if (ql2xtgt_tape_enable)
6991 /* Enable FC tape support */
6992 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6993 else
6994 /* Disable FC tape support */
6995 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6996
6997 /* Disable Full Login after LIP */
6998 nv->host_p &= cpu_to_le32(~BIT_10);
6999 /* Enable target PRLI control */
7000 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
7001
7002 /* Change Loop-prefer to Pt-Pt */
7003 tmp = ~(BIT_4|BIT_5|BIT_6);
7004 nv->firmware_options_2 &= cpu_to_le32(tmp);
7005 tmp = P2P << 4;
7006 nv->firmware_options_2 |= cpu_to_le32(tmp);
7007 } else {
7008 if (ha->tgt.saved_set) {
7009 nv->exchange_count = ha->tgt.saved_exchange_count;
7010 nv->firmware_options_1 =
7011 ha->tgt.saved_firmware_options_1;
7012 nv->firmware_options_2 =
7013 ha->tgt.saved_firmware_options_2;
7014 nv->firmware_options_3 =
7015 ha->tgt.saved_firmware_options_3;
7016 }
7017 return;
7018 }
7019
7020 if (ha->base_qpair->enable_class_2) {
7021 if (vha->flags.init_done)
7022 fc_host_supported_classes(vha->host) =
7023 FC_COS_CLASS2 | FC_COS_CLASS3;
7024
7025 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7026 } else {
7027 if (vha->flags.init_done)
7028 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7029
7030 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7031 }
7032 }
7033
7034 void
7035 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7036 struct init_cb_81xx *icb)
7037 {
7038 struct qla_hw_data *ha = vha->hw;
7039
7040 if (!QLA_TGT_MODE_ENABLED())
7041 return;
7042
7043 if (ha->tgt.node_name_set) {
7044 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7045 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7046 }
7047 }
7048
7049 void
7050 qlt_83xx_iospace_config(struct qla_hw_data *ha)
7051 {
7052 if (!QLA_TGT_MODE_ENABLED())
7053 return;
7054
7055 ha->msix_count += 1; /* For ATIO Q */
7056 }
7057
7058
7059 void
7060 qlt_modify_vp_config(struct scsi_qla_host *vha,
7061 struct vp_config_entry_24xx *vpmod)
7062 {
7063 /* enable target mode. Bit5 = 1 => disable */
7064 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7065 vpmod->options_idx1 &= ~BIT_5;
7066
7067 /* Disable ini mode, if requested. bit4 = 1 => disable */
7068 if (qla_tgt_mode_enabled(vha))
7069 vpmod->options_idx1 &= ~BIT_4;
7070 }
7071
7072 void
7073 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
7074 {
7075 int rc;
7076
7077 if (!QLA_TGT_MODE_ENABLED())
7078 return;
7079
7080 if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
7081 IS_QLA28XX(ha)) {
7082 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
7083 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
7084 } else {
7085 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
7086 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
7087 }
7088
7089 mutex_init(&base_vha->vha_tgt.tgt_mutex);
7090 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
7091
7092 INIT_LIST_HEAD(&base_vha->unknown_atio_list);
7093 INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
7094 qlt_unknown_atio_work_fn);
7095
7096 qlt_clear_mode(base_vha);
7097
7098 rc = btree_init32(&ha->tgt.host_map);
7099 if (rc)
7100 ql_log(ql_log_info, base_vha, 0xd03d,
7101 "Unable to initialize ha->host_map btree\n");
7102
7103 qlt_update_vp_map(base_vha, SET_VP_IDX);
7104 }
7105
7106 irqreturn_t
7107 qla83xx_msix_atio_q(int irq, void *dev_id)
7108 {
7109 struct rsp_que *rsp;
7110 scsi_qla_host_t *vha;
7111 struct qla_hw_data *ha;
7112 unsigned long flags;
7113
7114 rsp = (struct rsp_que *) dev_id;
7115 ha = rsp->hw;
7116 vha = pci_get_drvdata(ha->pdev);
7117
7118 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7119
7120 qlt_24xx_process_atio_queue(vha, 0);
7121
7122 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7123
7124 return IRQ_HANDLED;
7125 }
7126
7127 static void
7128 qlt_handle_abts_recv_work(struct work_struct *work)
7129 {
7130 struct qla_tgt_sess_op *op = container_of(work,
7131 struct qla_tgt_sess_op, work);
7132 scsi_qla_host_t *vha = op->vha;
7133 struct qla_hw_data *ha = vha->hw;
7134 unsigned long flags;
7135
7136 if (qla2x00_reset_active(vha) ||
7137 (op->chip_reset != ha->base_qpair->chip_reset))
7138 return;
7139
7140 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7141 qlt_24xx_process_atio_queue(vha, 0);
7142 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7143
7144 spin_lock_irqsave(&ha->hardware_lock, flags);
7145 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7146 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7147
7148 kfree(op);
7149 }
7150
7151 void
7152 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7153 response_t *pkt)
7154 {
7155 struct qla_tgt_sess_op *op;
7156
7157 op = kzalloc(sizeof(*op), GFP_ATOMIC);
7158
7159 if (!op) {
7160 /* do not reach for ATIO queue here. This is best effort err
7161 * recovery at this point.
7162 */
7163 qlt_response_pkt_all_vps(vha, rsp, pkt);
7164 return;
7165 }
7166
7167 memcpy(&op->atio, pkt, sizeof(*pkt));
7168 op->vha = vha;
7169 op->chip_reset = vha->hw->base_qpair->chip_reset;
7170 op->rsp = rsp;
7171 INIT_WORK(&op->work, qlt_handle_abts_recv_work);
7172 queue_work(qla_tgt_wq, &op->work);
7173 return;
7174 }
7175
7176 int
7177 qlt_mem_alloc(struct qla_hw_data *ha)
7178 {
7179 if (!QLA_TGT_MODE_ENABLED())
7180 return 0;
7181
7182 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
7183 sizeof(struct qla_tgt_vp_map),
7184 GFP_KERNEL);
7185 if (!ha->tgt.tgt_vp_map)
7186 return -ENOMEM;
7187
7188 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
7189 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
7190 &ha->tgt.atio_dma, GFP_KERNEL);
7191 if (!ha->tgt.atio_ring) {
7192 kfree(ha->tgt.tgt_vp_map);
7193 return -ENOMEM;
7194 }
7195 return 0;
7196 }
7197
7198 void
7199 qlt_mem_free(struct qla_hw_data *ha)
7200 {
7201 if (!QLA_TGT_MODE_ENABLED())
7202 return;
7203
7204 if (ha->tgt.atio_ring) {
7205 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
7206 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
7207 ha->tgt.atio_dma);
7208 }
7209 ha->tgt.atio_ring = NULL;
7210 ha->tgt.atio_dma = 0;
7211 kfree(ha->tgt.tgt_vp_map);
7212 ha->tgt.tgt_vp_map = NULL;
7213 }
7214
7215 /* vport_slock to be held by the caller */
7216 void
7217 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
7218 {
7219 void *slot;
7220 u32 key;
7221 int rc;
7222
7223 if (!QLA_TGT_MODE_ENABLED())
7224 return;
7225
7226 key = vha->d_id.b24;
7227
7228 switch (cmd) {
7229 case SET_VP_IDX:
7230 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7231 break;
7232 case SET_AL_PA:
7233 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7234 if (!slot) {
7235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7236 "Save vha in host_map %p %06x\n", vha, key);
7237 rc = btree_insert32(&vha->hw->tgt.host_map,
7238 key, vha, GFP_ATOMIC);
7239 if (rc)
7240 ql_log(ql_log_info, vha, 0xd03e,
7241 "Unable to insert s_id into host_map: %06x\n",
7242 key);
7243 return;
7244 }
7245 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
7246 "replace existing vha in host_map %p %06x\n", vha, key);
7247 btree_update32(&vha->hw->tgt.host_map, key, vha);
7248 break;
7249 case RESET_VP_IDX:
7250 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7251 break;
7252 case RESET_AL_PA:
7253 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7254 "clear vha in host_map %p %06x\n", vha, key);
7255 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7256 if (slot)
7257 btree_remove32(&vha->hw->tgt.host_map, key);
7258 vha->d_id.b24 = 0;
7259 break;
7260 }
7261 }
7262
7263 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
7264 {
7265
7266 if (!vha->d_id.b24) {
7267 vha->d_id = id;
7268 qlt_update_vp_map(vha, SET_AL_PA);
7269 } else if (vha->d_id.b24 != id.b24) {
7270 qlt_update_vp_map(vha, RESET_AL_PA);
7271 vha->d_id = id;
7272 qlt_update_vp_map(vha, SET_AL_PA);
7273 }
7274 }
7275
7276 static int __init qlt_parse_ini_mode(void)
7277 {
7278 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
7279 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
7280 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
7281 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
7282 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
7283 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7284 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
7285 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7286 else
7287 return false;
7288
7289 return true;
7290 }
7291
7292 int __init qlt_init(void)
7293 {
7294 int ret;
7295
7296 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
7297 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
7298
7299 if (!qlt_parse_ini_mode()) {
7300 ql_log(ql_log_fatal, NULL, 0xe06b,
7301 "qlt_parse_ini_mode() failed\n");
7302 return -EINVAL;
7303 }
7304
7305 if (!QLA_TGT_MODE_ENABLED())
7306 return 0;
7307
7308 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7309 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
7310 qla_tgt_mgmt_cmd), 0, NULL);
7311 if (!qla_tgt_mgmt_cmd_cachep) {
7312 ql_log(ql_log_fatal, NULL, 0xd04b,
7313 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7314 return -ENOMEM;
7315 }
7316
7317 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7318 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
7319 0, NULL);
7320
7321 if (!qla_tgt_plogi_cachep) {
7322 ql_log(ql_log_fatal, NULL, 0xe06d,
7323 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7324 ret = -ENOMEM;
7325 goto out_mgmt_cmd_cachep;
7326 }
7327
7328 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
7329 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
7330 if (!qla_tgt_mgmt_cmd_mempool) {
7331 ql_log(ql_log_fatal, NULL, 0xe06e,
7332 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7333 ret = -ENOMEM;
7334 goto out_plogi_cachep;
7335 }
7336
7337 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
7338 if (!qla_tgt_wq) {
7339 ql_log(ql_log_fatal, NULL, 0xe06f,
7340 "alloc_workqueue for qla_tgt_wq failed\n");
7341 ret = -ENOMEM;
7342 goto out_cmd_mempool;
7343 }
7344 /*
7345 * Return 1 to signal that initiator-mode is being disabled
7346 */
7347 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
7348
7349 out_cmd_mempool:
7350 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7351 out_plogi_cachep:
7352 kmem_cache_destroy(qla_tgt_plogi_cachep);
7353 out_mgmt_cmd_cachep:
7354 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7355 return ret;
7356 }
7357
7358 void qlt_exit(void)
7359 {
7360 if (!QLA_TGT_MODE_ENABLED())
7361 return;
7362
7363 destroy_workqueue(qla_tgt_wq);
7364 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7365 kmem_cache_destroy(qla_tgt_plogi_cachep);
7366 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7367 }