]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/qla2xxx/tcm_qla2xxx.c
tcm_qla2xxx: Convert to TFO->put_session() usage
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / qla2xxx / tcm_qla2xxx.c
CommitLineData
75f8c1f6
NB
1/*******************************************************************************
2 * This file contains tcm implementation using v4 configfs fabric infrastructure
3 * for QLogic target mode HBAs
4 *
5 * ?? Copyright 2010-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL)
8 * version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
13 * the TCM_FC / Open-FCoE.org fabric module.
14 *
15 * Copyright (c) 2010 Cisco Systems, Inc
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 ****************************************************************************/
27
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <generated/utsrelease.h>
32#include <linux/utsname.h>
33#include <linux/init.h>
34#include <linux/list.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <linux/string.h>
42#include <linux/ctype.h>
43#include <asm/unaligned.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_device.h>
47#include <scsi/scsi_cmnd.h>
48#include <target/target_core_base.h>
49#include <target/target_core_fabric.h>
50#include <target/target_core_fabric_configfs.h>
51#include <target/target_core_configfs.h>
52#include <target/configfs_macros.h>
53
54#include "qla_def.h"
55#include "qla_target.h"
56#include "tcm_qla2xxx.h"
57
58struct workqueue_struct *tcm_qla2xxx_free_wq;
59struct workqueue_struct *tcm_qla2xxx_cmd_wq;
60
61static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
62{
63 return 1;
64}
65
66static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
67{
68 return 0;
69}
70
71/*
72 * Parse WWN.
73 * If strict, we require lower-case hex and colon separators to be sure
74 * the name is the same as what would be generated by ft_format_wwn()
75 * so the name and wwn are mapped one-to-one.
76 */
77static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
78{
79 const char *cp;
80 char c;
81 u32 nibble;
82 u32 byte = 0;
83 u32 pos = 0;
84 u32 err;
85
86 *wwn = 0;
87 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
88 c = *cp;
89 if (c == '\n' && cp[1] == '\0')
90 continue;
91 if (strict && pos++ == 2 && byte++ < 7) {
92 pos = 0;
93 if (c == ':')
94 continue;
95 err = 1;
96 goto fail;
97 }
98 if (c == '\0') {
99 err = 2;
100 if (strict && byte != 8)
101 goto fail;
102 return cp - name;
103 }
104 err = 3;
105 if (isdigit(c))
106 nibble = c - '0';
107 else if (isxdigit(c) && (islower(c) || !strict))
108 nibble = tolower(c) - 'a' + 10;
109 else
110 goto fail;
111 *wwn = (*wwn << 4) | nibble;
112 }
113 err = 4;
114fail:
115 pr_debug("err %u len %zu pos %u byte %u\n",
116 err, cp - name, pos, byte);
117 return -1;
118}
119
120static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
121{
122 u8 b[8];
123
124 put_unaligned_be64(wwn, b);
125 return snprintf(buf, len,
126 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
127 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
128}
129
130static char *tcm_qla2xxx_get_fabric_name(void)
131{
132 return "qla2xxx";
133}
134
135/*
136 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
137 */
138static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
139{
140 unsigned int i, j, value;
141 u8 wwn[8];
142
143 memset(wwn, 0, sizeof(wwn));
144
145 /* Validate and store the new name */
146 for (i = 0, j = 0; i < 16; i++) {
147 value = hex_to_bin(*ns++);
148 if (value >= 0)
149 j = (j << 4) | value;
150 else
151 return -EINVAL;
152
153 if (i % 2) {
154 wwn[i/2] = j & 0xff;
155 j = 0;
156 }
157 }
158
159 *nm = wwn_to_u64(wwn);
160 return 0;
161}
162
163/*
164 * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
165 * store_fc_host_vport_create()
166 */
167static int tcm_qla2xxx_npiv_parse_wwn(
168 const char *name,
169 size_t count,
170 u64 *wwpn,
171 u64 *wwnn)
172{
173 unsigned int cnt = count;
174 int rc;
175
176 *wwpn = 0;
177 *wwnn = 0;
178
179 /* count may include a LF at end of string */
180 if (name[cnt-1] == '\n')
181 cnt--;
182
183 /* validate we have enough characters for WWPN */
184 if ((cnt != (16+1+16)) || (name[16] != ':'))
185 return -EINVAL;
186
187 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
188 if (rc != 0)
189 return rc;
190
191 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
192 if (rc != 0)
193 return rc;
194
195 return 0;
196}
197
198static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
199 u64 wwpn, u64 wwnn)
200{
201 u8 b[8], b2[8];
202
203 put_unaligned_be64(wwpn, b);
204 put_unaligned_be64(wwnn, b2);
205 return snprintf(buf, len,
206 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
207 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
208 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
209 b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
210}
211
212static char *tcm_qla2xxx_npiv_get_fabric_name(void)
213{
214 return "qla2xxx_npiv";
215}
216
217static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
218{
219 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
220 struct tcm_qla2xxx_tpg, se_tpg);
221 struct tcm_qla2xxx_lport *lport = tpg->lport;
222 u8 proto_id;
223
224 switch (lport->lport_proto_id) {
225 case SCSI_PROTOCOL_FCP:
226 default:
227 proto_id = fc_get_fabric_proto_ident(se_tpg);
228 break;
229 }
230
231 return proto_id;
232}
233
234static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
235{
236 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
237 struct tcm_qla2xxx_tpg, se_tpg);
238 struct tcm_qla2xxx_lport *lport = tpg->lport;
239
240 return &lport->lport_name[0];
241}
242
243static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
244{
245 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
246 struct tcm_qla2xxx_tpg, se_tpg);
247 struct tcm_qla2xxx_lport *lport = tpg->lport;
248
249 return &lport->lport_npiv_name[0];
250}
251
252static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
253{
254 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
255 struct tcm_qla2xxx_tpg, se_tpg);
256 return tpg->lport_tpgt;
257}
258
259static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
260{
261 return 1;
262}
263
264static u32 tcm_qla2xxx_get_pr_transport_id(
265 struct se_portal_group *se_tpg,
266 struct se_node_acl *se_nacl,
267 struct t10_pr_registration *pr_reg,
268 int *format_code,
269 unsigned char *buf)
270{
271 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
272 struct tcm_qla2xxx_tpg, se_tpg);
273 struct tcm_qla2xxx_lport *lport = tpg->lport;
274 int ret = 0;
275
276 switch (lport->lport_proto_id) {
277 case SCSI_PROTOCOL_FCP:
278 default:
279 ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
280 format_code, buf);
281 break;
282 }
283
284 return ret;
285}
286
287static u32 tcm_qla2xxx_get_pr_transport_id_len(
288 struct se_portal_group *se_tpg,
289 struct se_node_acl *se_nacl,
290 struct t10_pr_registration *pr_reg,
291 int *format_code)
292{
293 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
294 struct tcm_qla2xxx_tpg, se_tpg);
295 struct tcm_qla2xxx_lport *lport = tpg->lport;
296 int ret = 0;
297
298 switch (lport->lport_proto_id) {
299 case SCSI_PROTOCOL_FCP:
300 default:
301 ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
302 format_code);
303 break;
304 }
305
306 return ret;
307}
308
309static char *tcm_qla2xxx_parse_pr_out_transport_id(
310 struct se_portal_group *se_tpg,
311 const char *buf,
312 u32 *out_tid_len,
313 char **port_nexus_ptr)
314{
315 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
316 struct tcm_qla2xxx_tpg, se_tpg);
317 struct tcm_qla2xxx_lport *lport = tpg->lport;
318 char *tid = NULL;
319
320 switch (lport->lport_proto_id) {
321 case SCSI_PROTOCOL_FCP:
322 default:
323 tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
324 port_nexus_ptr);
325 break;
326 }
327
328 return tid;
329}
330
331static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
332{
333 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
334 struct tcm_qla2xxx_tpg, se_tpg);
335
336 return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
337}
338
339static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
340{
341 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
342 struct tcm_qla2xxx_tpg, se_tpg);
343
344 return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
345}
346
347static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
348{
349 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
350 struct tcm_qla2xxx_tpg, se_tpg);
351
352 return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
353}
354
355static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
356{
357 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
358 struct tcm_qla2xxx_tpg, se_tpg);
359
360 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
361}
362
363static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
364 struct se_portal_group *se_tpg)
365{
366 struct tcm_qla2xxx_nacl *nacl;
367
368 nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
369 if (!nacl) {
370 pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
371 return NULL;
372 }
373
374 return &nacl->se_node_acl;
375}
376
377static void tcm_qla2xxx_release_fabric_acl(
378 struct se_portal_group *se_tpg,
379 struct se_node_acl *se_nacl)
380{
381 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
382 struct tcm_qla2xxx_nacl, se_node_acl);
383 kfree(nacl);
384}
385
386static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
387{
388 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
389 struct tcm_qla2xxx_tpg, se_tpg);
390
391 return tpg->lport_tpgt;
392}
393
394static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
395{
396 struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
397 struct qla_tgt_mgmt_cmd, free_work);
398
399 transport_generic_free_cmd(&mcmd->se_cmd, 0);
400}
401
402/*
403 * Called from qla_target_template->free_mcmd(), and will call
404 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
405 * release callback. qla_hw_data->hardware_lock is expected to be held
406 */
407static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
408{
409 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
410 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
411}
412
413static void tcm_qla2xxx_complete_free(struct work_struct *work)
414{
415 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
416
417 transport_generic_free_cmd(&cmd->se_cmd, 0);
418}
419
420/*
421 * Called from qla_target_template->free_cmd(), and will call
422 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
423 * release callback. qla_hw_data->hardware_lock is expected to be held
424 */
425static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
426{
427 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
428 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
429}
430
431/*
432 * Called from struct target_core_fabric_ops->check_stop_free() context
433 */
434static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
435{
436 return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
437}
438
439/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
440 * fabric descriptor @se_cmd command to release
441 */
442static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
443{
444 struct qla_tgt_cmd *cmd;
445
446 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
447 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
448 struct qla_tgt_mgmt_cmd, se_cmd);
449 qlt_free_mcmd(mcmd);
450 return;
451 }
452
453 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
454 qlt_free_cmd(cmd);
455}
456
457static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
458{
459 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
460 struct scsi_qla_host *vha;
461 unsigned long flags;
462
463 BUG_ON(!sess);
464 vha = sess->vha;
465
466 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
467 sess->tearing_down = 1;
468 target_splice_sess_cmd_list(se_sess);
469 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
470
471 return 1;
472}
473
474static void tcm_qla2xxx_close_session(struct se_session *se_sess)
475{
476 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
477 struct scsi_qla_host *vha;
478 unsigned long flags;
479
480 BUG_ON(!sess);
481 vha = sess->vha;
482
483 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
484 qlt_unreg_sess(sess);
485 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
486}
487
488static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
489{
490 return 0;
491}
492
493/*
494 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
495 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
496 * that data is coming from the target (eg handling a READ). However,
497 * this is just the opposite of what we have to tell the DMA mapping
498 * layer -- eg when handling a READ, the HBA will have to DMA the data
499 * out of memory so it can send it to the initiator, which means we
500 * need to use DMA_TO_DEVICE when we map the data.
501 */
502static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
503{
504 if (se_cmd->se_cmd_flags & SCF_BIDI)
505 return DMA_BIDIRECTIONAL;
506
507 switch (se_cmd->data_direction) {
508 case DMA_TO_DEVICE:
509 return DMA_FROM_DEVICE;
510 case DMA_FROM_DEVICE:
511 return DMA_TO_DEVICE;
512 case DMA_NONE:
513 default:
514 return DMA_NONE;
515 }
516}
517
518static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
519{
520 struct qla_tgt_cmd *cmd = container_of(se_cmd,
521 struct qla_tgt_cmd, se_cmd);
522
523 cmd->bufflen = se_cmd->data_length;
524 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
525
526 cmd->sg_cnt = se_cmd->t_data_nents;
527 cmd->sg = se_cmd->t_data_sg;
528
529 /*
530 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
531 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
532 */
533 return qlt_rdy_to_xfer(cmd);
534}
535
536static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
537{
538 unsigned long flags;
539 /*
540 * Check for WRITE_PENDING status to determine if we need to wait for
541 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
542 */
543 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
544 if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
545 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
546 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
547 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
548 3000);
549 return 0;
550 }
551 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
552
553 return 0;
554}
555
556static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
557{
558 return;
559}
560
561static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
562{
563 struct qla_tgt_cmd *cmd = container_of(se_cmd,
564 struct qla_tgt_cmd, se_cmd);
565
566 return cmd->tag;
567}
568
569static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
570{
571 return 0;
572}
573
574/*
575 * Called from process context in qla_target.c:qlt_do_work() code
576 */
577static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
578 unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
579 int data_dir, int bidi)
580{
581 struct se_cmd *se_cmd = &cmd->se_cmd;
582 struct se_session *se_sess;
583 struct qla_tgt_sess *sess;
584 int flags = TARGET_SCF_ACK_KREF;
585
586 if (bidi)
587 flags |= TARGET_SCF_BIDI_OP;
588
589 sess = cmd->sess;
590 if (!sess) {
591 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
592 return -EINVAL;
593 }
594
595 se_sess = sess->se_sess;
596 if (!se_sess) {
597 pr_err("Unable to locate active struct se_session\n");
598 return -EINVAL;
599 }
600
601 target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
602 cmd->unpacked_lun, data_length, fcp_task_attr,
603 data_dir, flags);
604 return 0;
605}
606
607static void tcm_qla2xxx_do_rsp(struct work_struct *work)
608{
609 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
610 /*
611 * Dispatch ->queue_status from workqueue process context
612 */
613 transport_generic_request_failure(&cmd->se_cmd);
614}
615
616/*
617 * Called from qla_target.c:qlt_do_ctio_completion()
618 */
619static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
620{
621 struct se_cmd *se_cmd = &cmd->se_cmd;
622 unsigned long flags;
623 /*
624 * Ensure that the complete FCP WRITE payload has been received.
625 * Otherwise return an exception via CHECK_CONDITION status.
626 */
627 if (!cmd->write_data_transferred) {
628 /*
629 * Check if se_cmd has already been aborted via LUN_RESET, and
630 * waiting upon completion in tcm_qla2xxx_write_pending_status()
631 */
632 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
633 if (se_cmd->transport_state & CMD_T_ABORTED) {
634 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
635 complete(&se_cmd->t_transport_stop_comp);
636 return 0;
637 }
638 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
639
640 se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
641 INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
642 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
643 return 0;
644 }
645 /*
646 * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
647 * status to the backstore processing thread.
648 */
649 return transport_generic_handle_data(&cmd->se_cmd);
650}
651
652/*
653 * Called from qla_target.c:qlt_issue_task_mgmt()
654 */
655int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
656 uint8_t tmr_func, uint32_t tag)
657{
658 struct qla_tgt_sess *sess = mcmd->sess;
659 struct se_cmd *se_cmd = &mcmd->se_cmd;
660
661 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
662 tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
663}
664
665static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
666{
667 struct qla_tgt_cmd *cmd = container_of(se_cmd,
668 struct qla_tgt_cmd, se_cmd);
669
670 cmd->bufflen = se_cmd->data_length;
671 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
672 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
673
674 cmd->sg_cnt = se_cmd->t_data_nents;
675 cmd->sg = se_cmd->t_data_sg;
676 cmd->offset = 0;
677
678 /*
679 * Now queue completed DATA_IN the qla2xxx LLD and response ring
680 */
681 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
682 se_cmd->scsi_status);
683}
684
685static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
686{
687 struct qla_tgt_cmd *cmd = container_of(se_cmd,
688 struct qla_tgt_cmd, se_cmd);
689 int xmit_type = QLA_TGT_XMIT_STATUS;
690
691 cmd->bufflen = se_cmd->data_length;
692 cmd->sg = NULL;
693 cmd->sg_cnt = 0;
694 cmd->offset = 0;
695 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
696 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
697
698 if (se_cmd->data_direction == DMA_FROM_DEVICE) {
699 /*
700 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
701 * for qla_tgt_xmit_response LLD code
702 */
703 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
704 se_cmd->residual_count = se_cmd->data_length;
705
706 cmd->bufflen = 0;
707 }
708 /*
709 * Now queue status response to qla2xxx LLD code and response ring
710 */
711 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
712}
713
714static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
715{
716 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
717 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
718 struct qla_tgt_mgmt_cmd, se_cmd);
719
720 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
721 mcmd, se_tmr->function, se_tmr->response);
722 /*
723 * Do translation between TCM TM response codes and
724 * QLA2xxx FC TM response codes.
725 */
726 switch (se_tmr->response) {
727 case TMR_FUNCTION_COMPLETE:
728 mcmd->fc_tm_rsp = FC_TM_SUCCESS;
729 break;
730 case TMR_TASK_DOES_NOT_EXIST:
731 mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
732 break;
733 case TMR_FUNCTION_REJECTED:
734 mcmd->fc_tm_rsp = FC_TM_REJECT;
735 break;
736 case TMR_LUN_DOES_NOT_EXIST:
737 default:
738 mcmd->fc_tm_rsp = FC_TM_FAILED;
739 break;
740 }
741 /*
742 * Queue the TM response to QLA2xxx LLD to build a
743 * CTIO response packet.
744 */
745 qlt_xmit_tm_rsp(mcmd);
746
747 return 0;
748}
749
750static u16 tcm_qla2xxx_get_fabric_sense_len(void)
751{
752 return 0;
753}
754
755static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
756 u32 sense_length)
757{
758 return 0;
759}
760
761/* Local pointer to allocated TCM configfs fabric module */
762struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
763struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
764
765static int tcm_qla2xxx_setup_nacl_from_rport(
766 struct se_portal_group *se_tpg,
767 struct se_node_acl *se_nacl,
768 struct tcm_qla2xxx_lport *lport,
769 struct tcm_qla2xxx_nacl *nacl,
770 u64 rport_wwnn)
771{
772 struct scsi_qla_host *vha = lport->qla_vha;
773 struct Scsi_Host *sh = vha->host;
774 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
775 struct fc_rport *rport;
776 unsigned long flags;
777 void *node;
778 int rc;
779
780 /*
781 * Scan the existing rports, and create a session for the
782 * explict NodeACL is an matching rport->node_name already
783 * exists.
784 */
785 spin_lock_irqsave(sh->host_lock, flags);
786 list_for_each_entry(rport, &fc_host->rports, peers) {
787 if (rport_wwnn != rport->node_name)
788 continue;
789
790 pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n",
791 rport->node_name, rport->port_id);
792 nacl->nport_id = rport->port_id;
793
794 spin_unlock_irqrestore(sh->host_lock, flags);
795
796 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
797 node = btree_lookup32(&lport->lport_fcport_map, rport->port_id);
798 if (node) {
799 rc = btree_update32(&lport->lport_fcport_map,
800 rport->port_id, se_nacl);
801 } else {
802 rc = btree_insert32(&lport->lport_fcport_map,
803 rport->port_id, se_nacl,
804 GFP_ATOMIC);
805 }
806 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
807
808 if (rc) {
809 pr_err("Unable to insert se_nacl into fcport_map");
810 WARN_ON(rc > 0);
811 return rc;
812 }
813
814 pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n",
815 se_nacl, rport_wwnn, nacl->nport_id);
816
817 return 1;
818 }
819 spin_unlock_irqrestore(sh->host_lock, flags);
820
821 return 0;
822}
823
824/*
825 * Expected to be called with struct qla_hw_data->hardware_lock held
826 */
827static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
828{
829 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
830 struct se_portal_group *se_tpg = se_nacl->se_tpg;
831 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
832 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
833 struct tcm_qla2xxx_lport, lport_wwn);
834 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
835 struct tcm_qla2xxx_nacl, se_node_acl);
836 void *node;
837
838 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
839
840 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
841 WARN_ON(node && (node != se_nacl));
842
843 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
844 se_nacl, nacl->nport_wwnn, nacl->nport_id);
845}
846
aaf68b75
JE
847static void tcm_qla2xxx_release_session(struct kref *kref)
848{
849 struct se_session *se_sess = container_of(kref,
850 struct se_session, sess_kref);
851
852 qlt_unreg_sess(se_sess->fabric_sess_ptr);
853}
854
855static void tcm_qla2xxx_put_session(struct se_session *se_sess)
856{
857 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
858 struct qla_hw_data *ha = sess->vha->hw;
859 unsigned long flags;
860
861 spin_lock_irqsave(&ha->hardware_lock, flags);
862 kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
863 spin_unlock_irqrestore(&ha->hardware_lock, flags);
864}
865
75f8c1f6
NB
866static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
867{
aaf68b75 868 tcm_qla2xxx_put_session(sess->se_sess);
75f8c1f6
NB
869}
870
871static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
872{
873 tcm_qla2xxx_shutdown_session(sess->se_sess);
874}
875
876static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
877 struct se_portal_group *se_tpg,
878 struct config_group *group,
879 const char *name)
880{
881 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
882 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
883 struct tcm_qla2xxx_lport, lport_wwn);
884 struct se_node_acl *se_nacl, *se_nacl_new;
885 struct tcm_qla2xxx_nacl *nacl;
886 u64 wwnn;
887 u32 qla2xxx_nexus_depth;
888 int rc;
889
890 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
891 return ERR_PTR(-EINVAL);
892
893 se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
894 if (!se_nacl_new)
895 return ERR_PTR(-ENOMEM);
896/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
897 qla2xxx_nexus_depth = 1;
898
899 /*
900 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
901 * when converting a NodeACL from demo mode -> explict
902 */
903 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
904 name, qla2xxx_nexus_depth);
905 if (IS_ERR(se_nacl)) {
906 tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
907 return se_nacl;
908 }
909 /*
910 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
911 */
912 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
913 nacl->nport_wwnn = wwnn;
914 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
915 /*
916 * Setup a se_nacl handle based on an a matching struct fc_rport setup
917 * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
918 */
919 rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport,
920 nacl, wwnn);
921 if (rc < 0) {
922 tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
923 return ERR_PTR(rc);
924 }
925
926 return se_nacl;
927}
928
929static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
930{
931 struct se_portal_group *se_tpg = se_acl->se_tpg;
932 struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
933 struct tcm_qla2xxx_nacl, se_node_acl);
934
935 core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
936 kfree(nacl);
937}
938
939/* Start items for tcm_qla2xxx_tpg_attrib_cit */
940
941#define DEF_QLA_TPG_ATTRIB(name) \
942 \
943static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
944 struct se_portal_group *se_tpg, \
945 char *page) \
946{ \
947 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
948 struct tcm_qla2xxx_tpg, se_tpg); \
949 \
950 return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
951} \
952 \
953static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
954 struct se_portal_group *se_tpg, \
955 const char *page, \
956 size_t count) \
957{ \
958 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
959 struct tcm_qla2xxx_tpg, se_tpg); \
960 unsigned long val; \
961 int ret; \
962 \
963 ret = kstrtoul(page, 0, &val); \
964 if (ret < 0) { \
965 pr_err("kstrtoul() failed with" \
966 " ret: %d\n", ret); \
967 return -EINVAL; \
968 } \
969 ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
970 \
971 return (!ret) ? count : -EINVAL; \
972}
973
974#define DEF_QLA_TPG_ATTR_BOOL(_name) \
975 \
976static int tcm_qla2xxx_set_attrib_##_name( \
977 struct tcm_qla2xxx_tpg *tpg, \
978 unsigned long val) \
979{ \
980 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
981 \
982 if ((val != 0) && (val != 1)) { \
983 pr_err("Illegal boolean value %lu\n", val); \
984 return -EINVAL; \
985 } \
986 \
987 a->_name = val; \
988 return 0; \
989}
990
991#define QLA_TPG_ATTR(_name, _mode) \
992 TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
993
994/*
995 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
996 */
997DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
998DEF_QLA_TPG_ATTRIB(generate_node_acls);
999QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
1000
1001/*
1002 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
1003 */
1004DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
1005DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
1006QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
1007
1008/*
1009 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
1010 */
1011DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
1012DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
1013QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
1014
1015/*
1016 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
1017 */
1018DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
1019DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
1020QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
1021
1022static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
1023 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
1024 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
1025 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
1026 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
1027 NULL,
1028};
1029
1030/* End items for tcm_qla2xxx_tpg_attrib_cit */
1031
1032static ssize_t tcm_qla2xxx_tpg_show_enable(
1033 struct se_portal_group *se_tpg,
1034 char *page)
1035{
1036 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1037 struct tcm_qla2xxx_tpg, se_tpg);
1038
1039 return snprintf(page, PAGE_SIZE, "%d\n",
1040 atomic_read(&tpg->lport_tpg_enabled));
1041}
1042
1043static ssize_t tcm_qla2xxx_tpg_store_enable(
1044 struct se_portal_group *se_tpg,
1045 const char *page,
1046 size_t count)
1047{
1048 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
1049 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
1050 struct tcm_qla2xxx_lport, lport_wwn);
1051 struct scsi_qla_host *vha = lport->qla_vha;
1052 struct qla_hw_data *ha = vha->hw;
1053 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1054 struct tcm_qla2xxx_tpg, se_tpg);
1055 unsigned long op;
1056 int rc;
1057
1058 rc = kstrtoul(page, 0, &op);
1059 if (rc < 0) {
1060 pr_err("kstrtoul() returned %d\n", rc);
1061 return -EINVAL;
1062 }
1063 if ((op != 1) && (op != 0)) {
1064 pr_err("Illegal value for tpg_enable: %lu\n", op);
1065 return -EINVAL;
1066 }
1067
1068 if (op) {
1069 atomic_set(&tpg->lport_tpg_enabled, 1);
1070 qlt_enable_vha(vha);
1071 } else {
1072 if (!ha->tgt.qla_tgt) {
1073 pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
1074 return -ENODEV;
1075 }
1076 atomic_set(&tpg->lport_tpg_enabled, 0);
1077 qlt_stop_phase1(ha->tgt.qla_tgt);
1078 }
1079
1080 return count;
1081}
1082
1083TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
1084
1085static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
1086 &tcm_qla2xxx_tpg_enable.attr,
1087 NULL,
1088};
1089
1090static struct se_portal_group *tcm_qla2xxx_make_tpg(
1091 struct se_wwn *wwn,
1092 struct config_group *group,
1093 const char *name)
1094{
1095 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1096 struct tcm_qla2xxx_lport, lport_wwn);
1097 struct tcm_qla2xxx_tpg *tpg;
1098 unsigned long tpgt;
1099 int ret;
1100
1101 if (strstr(name, "tpgt_") != name)
1102 return ERR_PTR(-EINVAL);
1103 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1104 return ERR_PTR(-EINVAL);
1105
1106 if (!lport->qla_npiv_vp && (tpgt != 1)) {
1107 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
1108 return ERR_PTR(-ENOSYS);
1109 }
1110
1111 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1112 if (!tpg) {
1113 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1114 return ERR_PTR(-ENOMEM);
1115 }
1116 tpg->lport = lport;
1117 tpg->lport_tpgt = tpgt;
1118 /*
1119 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1120 * NodeACLs
1121 */
1122 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
1123 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
1124 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
1125
1126 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
1127 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1128 if (ret < 0) {
1129 kfree(tpg);
1130 return NULL;
1131 }
1132 /*
1133 * Setup local TPG=1 pointer for non NPIV mode.
1134 */
1135 if (lport->qla_npiv_vp == NULL)
1136 lport->tpg_1 = tpg;
1137
1138 return &tpg->se_tpg;
1139}
1140
1141static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
1142{
1143 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1144 struct tcm_qla2xxx_tpg, se_tpg);
1145 struct tcm_qla2xxx_lport *lport = tpg->lport;
1146 struct scsi_qla_host *vha = lport->qla_vha;
1147 struct qla_hw_data *ha = vha->hw;
1148 /*
1149 * Call into qla2x_target.c LLD logic to shutdown the active
1150 * FC Nexuses and disable target mode operation for this qla_hw_data
1151 */
1152 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
1153 qlt_stop_phase1(ha->tgt.qla_tgt);
1154
1155 core_tpg_deregister(se_tpg);
1156 /*
1157 * Clear local TPG=1 pointer for non NPIV mode.
1158 */
1159 if (lport->qla_npiv_vp == NULL)
1160 lport->tpg_1 = NULL;
1161
1162 kfree(tpg);
1163}
1164
1165static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1166 struct se_wwn *wwn,
1167 struct config_group *group,
1168 const char *name)
1169{
1170 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1171 struct tcm_qla2xxx_lport, lport_wwn);
1172 struct tcm_qla2xxx_tpg *tpg;
1173 unsigned long tpgt;
1174 int ret;
1175
1176 if (strstr(name, "tpgt_") != name)
1177 return ERR_PTR(-EINVAL);
1178 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1179 return ERR_PTR(-EINVAL);
1180
1181 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1182 if (!tpg) {
1183 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1184 return ERR_PTR(-ENOMEM);
1185 }
1186 tpg->lport = lport;
1187 tpg->lport_tpgt = tpgt;
1188
1189 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
1190 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1191 if (ret < 0) {
1192 kfree(tpg);
1193 return NULL;
1194 }
1195 return &tpg->se_tpg;
1196}
1197
1198/*
1199 * Expected to be called with struct qla_hw_data->hardware_lock held
1200 */
1201static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1202 scsi_qla_host_t *vha,
1203 const uint8_t *s_id)
1204{
1205 struct qla_hw_data *ha = vha->hw;
1206 struct tcm_qla2xxx_lport *lport;
1207 struct se_node_acl *se_nacl;
1208 struct tcm_qla2xxx_nacl *nacl;
1209 u32 key;
1210
1211 lport = ha->tgt.target_lport_ptr;
1212 if (!lport) {
1213 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1214 dump_stack();
1215 return NULL;
1216 }
1217
1218 key = (((unsigned long)s_id[0] << 16) |
1219 ((unsigned long)s_id[1] << 8) |
1220 (unsigned long)s_id[2]);
1221 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1222
1223 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
1224 if (!se_nacl) {
1225 pr_debug("Unable to locate s_id: 0x%06x\n", key);
1226 return NULL;
1227 }
1228 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
1229 se_nacl, se_nacl->initiatorname);
1230
1231 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1232 if (!nacl->qla_tgt_sess) {
1233 pr_err("Unable to locate struct qla_tgt_sess\n");
1234 return NULL;
1235 }
1236
1237 return nacl->qla_tgt_sess;
1238}
1239
1240/*
1241 * Expected to be called with struct qla_hw_data->hardware_lock held
1242 */
1243static void tcm_qla2xxx_set_sess_by_s_id(
1244 struct tcm_qla2xxx_lport *lport,
1245 struct se_node_acl *new_se_nacl,
1246 struct tcm_qla2xxx_nacl *nacl,
1247 struct se_session *se_sess,
1248 struct qla_tgt_sess *qla_tgt_sess,
1249 uint8_t *s_id)
1250{
1251 u32 key;
1252 void *slot;
1253 int rc;
1254
1255 key = (((unsigned long)s_id[0] << 16) |
1256 ((unsigned long)s_id[1] << 8) |
1257 (unsigned long)s_id[2]);
1258 pr_debug("set_sess_by_s_id: %06x\n", key);
1259
1260 slot = btree_lookup32(&lport->lport_fcport_map, key);
1261 if (!slot) {
1262 if (new_se_nacl) {
1263 pr_debug("Setting up new fc_port entry to new_se_nacl\n");
1264 nacl->nport_id = key;
1265 rc = btree_insert32(&lport->lport_fcport_map, key,
1266 new_se_nacl, GFP_ATOMIC);
1267 if (rc)
1268 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
1269 (int)key);
1270 } else {
1271 pr_debug("Wiping nonexisting fc_port entry\n");
1272 }
1273
1274 qla_tgt_sess->se_sess = se_sess;
1275 nacl->qla_tgt_sess = qla_tgt_sess;
1276 return;
1277 }
1278
1279 if (nacl->qla_tgt_sess) {
1280 if (new_se_nacl == NULL) {
1281 pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
1282 btree_remove32(&lport->lport_fcport_map, key);
1283 nacl->qla_tgt_sess = NULL;
1284 return;
1285 }
1286 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
1287 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1288 qla_tgt_sess->se_sess = se_sess;
1289 nacl->qla_tgt_sess = qla_tgt_sess;
1290 return;
1291 }
1292
1293 if (new_se_nacl == NULL) {
1294 pr_debug("Clearing existing fc_port entry\n");
1295 btree_remove32(&lport->lport_fcport_map, key);
1296 return;
1297 }
1298
1299 pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
1300 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1301 qla_tgt_sess->se_sess = se_sess;
1302 nacl->qla_tgt_sess = qla_tgt_sess;
1303
1304 pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
1305 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1306}
1307
1308/*
1309 * Expected to be called with struct qla_hw_data->hardware_lock held
1310 */
1311static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1312 scsi_qla_host_t *vha,
1313 const uint16_t loop_id)
1314{
1315 struct qla_hw_data *ha = vha->hw;
1316 struct tcm_qla2xxx_lport *lport;
1317 struct se_node_acl *se_nacl;
1318 struct tcm_qla2xxx_nacl *nacl;
1319 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1320
1321 lport = ha->tgt.target_lport_ptr;
1322 if (!lport) {
1323 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1324 dump_stack();
1325 return NULL;
1326 }
1327
1328 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1329
1330 fc_loopid = lport->lport_loopid_map + loop_id;
1331 se_nacl = fc_loopid->se_nacl;
1332 if (!se_nacl) {
1333 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
1334 loop_id);
1335 return NULL;
1336 }
1337
1338 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1339
1340 if (!nacl->qla_tgt_sess) {
1341 pr_err("Unable to locate struct qla_tgt_sess\n");
1342 return NULL;
1343 }
1344
1345 return nacl->qla_tgt_sess;
1346}
1347
1348/*
1349 * Expected to be called with struct qla_hw_data->hardware_lock held
1350 */
1351static void tcm_qla2xxx_set_sess_by_loop_id(
1352 struct tcm_qla2xxx_lport *lport,
1353 struct se_node_acl *new_se_nacl,
1354 struct tcm_qla2xxx_nacl *nacl,
1355 struct se_session *se_sess,
1356 struct qla_tgt_sess *qla_tgt_sess,
1357 uint16_t loop_id)
1358{
1359 struct se_node_acl *saved_nacl;
1360 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1361
1362 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1363
1364 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
1365 lport->lport_loopid_map)[loop_id];
1366
1367 saved_nacl = fc_loopid->se_nacl;
1368 if (!saved_nacl) {
1369 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
1370 fc_loopid->se_nacl = new_se_nacl;
1371 if (qla_tgt_sess->se_sess != se_sess)
1372 qla_tgt_sess->se_sess = se_sess;
1373 if (nacl->qla_tgt_sess != qla_tgt_sess)
1374 nacl->qla_tgt_sess = qla_tgt_sess;
1375 return;
1376 }
1377
1378 if (nacl->qla_tgt_sess) {
1379 if (new_se_nacl == NULL) {
1380 pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1381 fc_loopid->se_nacl = NULL;
1382 nacl->qla_tgt_sess = NULL;
1383 return;
1384 }
1385
1386 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1387 fc_loopid->se_nacl = new_se_nacl;
1388 if (qla_tgt_sess->se_sess != se_sess)
1389 qla_tgt_sess->se_sess = se_sess;
1390 if (nacl->qla_tgt_sess != qla_tgt_sess)
1391 nacl->qla_tgt_sess = qla_tgt_sess;
1392 return;
1393 }
1394
1395 if (new_se_nacl == NULL) {
1396 pr_debug("Clearing fc_loopid->se_nacl\n");
1397 fc_loopid->se_nacl = NULL;
1398 return;
1399 }
1400
1401 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
1402 fc_loopid->se_nacl = new_se_nacl;
1403 if (qla_tgt_sess->se_sess != se_sess)
1404 qla_tgt_sess->se_sess = se_sess;
1405 if (nacl->qla_tgt_sess != qla_tgt_sess)
1406 nacl->qla_tgt_sess = qla_tgt_sess;
1407
1408 pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
1409 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1410}
1411
1412static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1413{
1414 struct qla_tgt *tgt = sess->tgt;
1415 struct qla_hw_data *ha = tgt->ha;
1416 struct se_session *se_sess;
1417 struct se_node_acl *se_nacl;
1418 struct tcm_qla2xxx_lport *lport;
1419 struct tcm_qla2xxx_nacl *nacl;
1420 unsigned char be_sid[3];
1421 unsigned long flags;
1422
1423 BUG_ON(in_interrupt());
1424
1425 se_sess = sess->se_sess;
1426 if (!se_sess) {
1427 pr_err("struct qla_tgt_sess->se_sess is NULL\n");
1428 dump_stack();
1429 return;
1430 }
1431 se_nacl = se_sess->se_node_acl;
1432 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1433
1434 lport = ha->tgt.target_lport_ptr;
1435 if (!lport) {
1436 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1437 dump_stack();
1438 return;
1439 }
1440 target_wait_for_sess_cmds(se_sess, 0);
1441 /*
1442 * And now clear the se_nacl and session pointers from our HW lport
1443 * mappings for fabric S_ID and LOOP_ID.
1444 */
1445 memset(&be_sid, 0, 3);
1446 be_sid[0] = sess->s_id.b.domain;
1447 be_sid[1] = sess->s_id.b.area;
1448 be_sid[2] = sess->s_id.b.al_pa;
1449
1450 spin_lock_irqsave(&ha->hardware_lock, flags);
1451 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
1452 sess, be_sid);
1453 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
1454 sess, sess->loop_id);
1455 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1456
1457 transport_deregister_session_configfs(sess->se_sess);
1458 transport_deregister_session(sess->se_sess);
1459}
1460
1461/*
1462 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
1463 * to locate struct se_node_acl
1464 */
1465static int tcm_qla2xxx_check_initiator_node_acl(
1466 scsi_qla_host_t *vha,
1467 unsigned char *fc_wwpn,
1468 void *qla_tgt_sess,
1469 uint8_t *s_id,
1470 uint16_t loop_id)
1471{
1472 struct qla_hw_data *ha = vha->hw;
1473 struct tcm_qla2xxx_lport *lport;
1474 struct tcm_qla2xxx_tpg *tpg;
1475 struct tcm_qla2xxx_nacl *nacl;
1476 struct se_portal_group *se_tpg;
1477 struct se_node_acl *se_nacl;
1478 struct se_session *se_sess;
1479 struct qla_tgt_sess *sess = qla_tgt_sess;
1480 unsigned char port_name[36];
1481 unsigned long flags;
1482
1483 lport = ha->tgt.target_lport_ptr;
1484 if (!lport) {
1485 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1486 dump_stack();
1487 return -EINVAL;
1488 }
1489 /*
1490 * Locate the TPG=1 reference..
1491 */
1492 tpg = lport->tpg_1;
1493 if (!tpg) {
1494 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
1495 return -EINVAL;
1496 }
1497 se_tpg = &tpg->se_tpg;
1498
1499 se_sess = transport_init_session();
1500 if (IS_ERR(se_sess)) {
1501 pr_err("Unable to initialize struct se_session\n");
1502 return PTR_ERR(se_sess);
1503 }
1504 /*
1505 * Format the FCP Initiator port_name into colon seperated values to
1506 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
1507 */
1508 memset(&port_name, 0, 36);
1509 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1510 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
1511 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
1512 /*
1513 * Locate our struct se_node_acl either from an explict NodeACL created
1514 * via ConfigFS, or via running in TPG demo mode.
1515 */
1516 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
1517 port_name);
1518 if (!se_sess->se_node_acl) {
1519 transport_free_session(se_sess);
1520 return -EINVAL;
1521 }
1522 se_nacl = se_sess->se_node_acl;
1523 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1524 /*
1525 * And now setup the new se_nacl and session pointers into our HW lport
1526 * mappings for fabric S_ID and LOOP_ID.
1527 */
1528 spin_lock_irqsave(&ha->hardware_lock, flags);
1529 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
1530 qla_tgt_sess, s_id);
1531 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
1532 qla_tgt_sess, loop_id);
1533 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1534 /*
1535 * Finally register the new FC Nexus with TCM
1536 */
1537 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1538
1539 return 0;
1540}
1541
1542/*
1543 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
1544 */
1545static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1546 .handle_cmd = tcm_qla2xxx_handle_cmd,
1547 .handle_data = tcm_qla2xxx_handle_data,
1548 .handle_tmr = tcm_qla2xxx_handle_tmr,
1549 .free_cmd = tcm_qla2xxx_free_cmd,
1550 .free_mcmd = tcm_qla2xxx_free_mcmd,
1551 .free_session = tcm_qla2xxx_free_session,
1552 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
1553 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
1554 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
1555 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1556 .put_sess = tcm_qla2xxx_put_sess,
1557 .shutdown_sess = tcm_qla2xxx_shutdown_sess,
1558};
1559
1560static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
1561{
1562 int rc;
1563
1564 rc = btree_init32(&lport->lport_fcport_map);
1565 if (rc) {
1566 pr_err("Unable to initialize lport->lport_fcport_map btree\n");
1567 return rc;
1568 }
1569
1570 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
1571 65536);
1572 if (!lport->lport_loopid_map) {
1573 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
1574 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1575 btree_destroy32(&lport->lport_fcport_map);
1576 return -ENOMEM;
1577 }
1578 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
1579 * 65536);
1580 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
1581 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1582 return 0;
1583}
1584
1585static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
1586{
1587 struct qla_hw_data *ha = vha->hw;
1588 struct tcm_qla2xxx_lport *lport;
1589 /*
1590 * Setup local pointer to vha, NPIV VP pointer (if present) and
1591 * vha->tcm_lport pointer
1592 */
1593 lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
1594 lport->qla_vha = vha;
1595
1596 return 0;
1597}
1598
1599static struct se_wwn *tcm_qla2xxx_make_lport(
1600 struct target_fabric_configfs *tf,
1601 struct config_group *group,
1602 const char *name)
1603{
1604 struct tcm_qla2xxx_lport *lport;
1605 u64 wwpn;
1606 int ret = -ENODEV;
1607
1608 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
1609 return ERR_PTR(-EINVAL);
1610
1611 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1612 if (!lport) {
1613 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
1614 return ERR_PTR(-ENOMEM);
1615 }
1616 lport->lport_wwpn = wwpn;
1617 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
1618 wwpn);
1619
1620 ret = tcm_qla2xxx_init_lport(lport);
1621 if (ret != 0)
1622 goto out;
1623
1624 ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
1625 tcm_qla2xxx_lport_register_cb, lport);
1626 if (ret != 0)
1627 goto out_lport;
1628
1629 return &lport->lport_wwn;
1630out_lport:
1631 vfree(lport->lport_loopid_map);
1632 btree_destroy32(&lport->lport_fcport_map);
1633out:
1634 kfree(lport);
1635 return ERR_PTR(ret);
1636}
1637
1638static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1639{
1640 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1641 struct tcm_qla2xxx_lport, lport_wwn);
1642 struct scsi_qla_host *vha = lport->qla_vha;
1643 struct qla_hw_data *ha = vha->hw;
1644 struct se_node_acl *node;
1645 u32 key = 0;
1646
1647 /*
1648 * Call into qla2x_target.c LLD logic to complete the
1649 * shutdown of struct qla_tgt after the call to
1650 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
1651 */
1652 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
1653 qlt_stop_phase2(ha->tgt.qla_tgt);
1654
1655 qlt_lport_deregister(vha);
1656
1657 vfree(lport->lport_loopid_map);
1658 btree_for_each_safe32(&lport->lport_fcport_map, key, node)
1659 btree_remove32(&lport->lport_fcport_map, key);
1660 btree_destroy32(&lport->lport_fcport_map);
1661 kfree(lport);
1662}
1663
1664static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1665 struct target_fabric_configfs *tf,
1666 struct config_group *group,
1667 const char *name)
1668{
1669 struct tcm_qla2xxx_lport *lport;
1670 u64 npiv_wwpn, npiv_wwnn;
1671 int ret;
1672
1673 if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
1674 &npiv_wwpn, &npiv_wwnn) < 0)
1675 return ERR_PTR(-EINVAL);
1676
1677 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1678 if (!lport) {
1679 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
1680 return ERR_PTR(-ENOMEM);
1681 }
1682 lport->lport_npiv_wwpn = npiv_wwpn;
1683 lport->lport_npiv_wwnn = npiv_wwnn;
1684 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
1685 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
1686
1687/* FIXME: tcm_qla2xxx_npiv_make_lport */
1688 ret = -ENOSYS;
1689 if (ret != 0)
1690 goto out;
1691
1692 return &lport->lport_wwn;
1693out:
1694 kfree(lport);
1695 return ERR_PTR(ret);
1696}
1697
1698static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
1699{
1700 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1701 struct tcm_qla2xxx_lport, lport_wwn);
1702 struct scsi_qla_host *vha = lport->qla_vha;
1703 struct Scsi_Host *sh = vha->host;
1704 /*
1705 * Notify libfc that we want to release the lport->npiv_vport
1706 */
1707 fc_vport_terminate(lport->npiv_vport);
1708
1709 scsi_host_put(sh);
1710 kfree(lport);
1711}
1712
1713
1714static ssize_t tcm_qla2xxx_wwn_show_attr_version(
1715 struct target_fabric_configfs *tf,
1716 char *page)
1717{
1718 return sprintf(page,
1719 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1720 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1721 utsname()->machine);
1722}
1723
1724TF_WWN_ATTR_RO(tcm_qla2xxx, version);
1725
1726static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
1727 &tcm_qla2xxx_wwn_version.attr,
1728 NULL,
1729};
1730
1731static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1732 .get_fabric_name = tcm_qla2xxx_get_fabric_name,
1733 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1734 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1735 .tpg_get_tag = tcm_qla2xxx_get_tag,
1736 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1737 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1738 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1739 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1740 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
1741 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
1742 .tpg_check_demo_mode_write_protect =
1743 tcm_qla2xxx_check_demo_write_protect,
1744 .tpg_check_prod_mode_write_protect =
1745 tcm_qla2xxx_check_prod_write_protect,
1746 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1747 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1748 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1749 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1750 .new_cmd_map = NULL,
1751 .check_stop_free = tcm_qla2xxx_check_stop_free,
1752 .release_cmd = tcm_qla2xxx_release_cmd,
aaf68b75 1753 .put_session = tcm_qla2xxx_put_session,
75f8c1f6
NB
1754 .shutdown_session = tcm_qla2xxx_shutdown_session,
1755 .close_session = tcm_qla2xxx_close_session,
1756 .sess_get_index = tcm_qla2xxx_sess_get_index,
1757 .sess_get_initiator_sid = NULL,
1758 .write_pending = tcm_qla2xxx_write_pending,
1759 .write_pending_status = tcm_qla2xxx_write_pending_status,
1760 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1761 .get_task_tag = tcm_qla2xxx_get_task_tag,
1762 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1763 .queue_data_in = tcm_qla2xxx_queue_data_in,
1764 .queue_status = tcm_qla2xxx_queue_status,
1765 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1766 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1767 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1768 /*
1769 * Setup function pointers for generic logic in
1770 * target_core_fabric_configfs.c
1771 */
1772 .fabric_make_wwn = tcm_qla2xxx_make_lport,
1773 .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
1774 .fabric_make_tpg = tcm_qla2xxx_make_tpg,
1775 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1776 .fabric_post_link = NULL,
1777 .fabric_pre_unlink = NULL,
1778 .fabric_make_np = NULL,
1779 .fabric_drop_np = NULL,
1780 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1781 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1782};
1783
1784static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1785 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
1786 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1787 .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
1788 .tpg_get_tag = tcm_qla2xxx_get_tag,
1789 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1790 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1791 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1792 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1793 .tpg_check_demo_mode = tcm_qla2xxx_check_false,
1794 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
1795 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
1796 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
1797 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1798 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1799 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1800 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1801 .release_cmd = tcm_qla2xxx_release_cmd,
aaf68b75 1802 .put_session = tcm_qla2xxx_put_session,
75f8c1f6
NB
1803 .shutdown_session = tcm_qla2xxx_shutdown_session,
1804 .close_session = tcm_qla2xxx_close_session,
1805 .sess_get_index = tcm_qla2xxx_sess_get_index,
1806 .sess_get_initiator_sid = NULL,
1807 .write_pending = tcm_qla2xxx_write_pending,
1808 .write_pending_status = tcm_qla2xxx_write_pending_status,
1809 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1810 .get_task_tag = tcm_qla2xxx_get_task_tag,
1811 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1812 .queue_data_in = tcm_qla2xxx_queue_data_in,
1813 .queue_status = tcm_qla2xxx_queue_status,
1814 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1815 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1816 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1817 /*
1818 * Setup function pointers for generic logic in
1819 * target_core_fabric_configfs.c
1820 */
1821 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
1822 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
1823 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
1824 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1825 .fabric_post_link = NULL,
1826 .fabric_pre_unlink = NULL,
1827 .fabric_make_np = NULL,
1828 .fabric_drop_np = NULL,
1829 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1830 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1831};
1832
1833static int tcm_qla2xxx_register_configfs(void)
1834{
1835 struct target_fabric_configfs *fabric, *npiv_fabric;
1836 int ret;
1837
1838 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
1839 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1840 utsname()->machine);
1841 /*
1842 * Register the top level struct config_item_type with TCM core
1843 */
1844 fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
1845 if (IS_ERR(fabric)) {
1846 pr_err("target_fabric_configfs_init() failed\n");
1847 return PTR_ERR(fabric);
1848 }
1849 /*
1850 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
1851 */
1852 fabric->tf_ops = tcm_qla2xxx_ops;
1853 /*
1854 * Setup default attribute lists for various fabric->tf_cit_tmpl
1855 */
1856 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1857 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
1858 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
1859 tcm_qla2xxx_tpg_attrib_attrs;
1860 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1861 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1862 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1863 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1864 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1865 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1866 /*
1867 * Register the fabric for use within TCM
1868 */
1869 ret = target_fabric_configfs_register(fabric);
1870 if (ret < 0) {
1871 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1872 return ret;
1873 }
1874 /*
1875 * Setup our local pointer to *fabric
1876 */
1877 tcm_qla2xxx_fabric_configfs = fabric;
1878 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
1879
1880 /*
1881 * Register the top level struct config_item_type for NPIV with TCM core
1882 */
1883 npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
1884 if (IS_ERR(npiv_fabric)) {
1885 pr_err("target_fabric_configfs_init() failed\n");
1886 ret = PTR_ERR(npiv_fabric);
1887 goto out_fabric;
1888 }
1889 /*
1890 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
1891 */
1892 npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
1893 /*
1894 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
1895 */
1896 TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1897 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
1898 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1899 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1900 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1901 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1902 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1903 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1904 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1905 /*
1906 * Register the npiv_fabric for use within TCM
1907 */
1908 ret = target_fabric_configfs_register(npiv_fabric);
1909 if (ret < 0) {
1910 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1911 goto out_fabric;
1912 }
1913 /*
1914 * Setup our local pointer to *npiv_fabric
1915 */
1916 tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
1917 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
1918
1919 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
1920 WQ_MEM_RECLAIM, 0);
1921 if (!tcm_qla2xxx_free_wq) {
1922 ret = -ENOMEM;
1923 goto out_fabric_npiv;
1924 }
1925
1926 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
1927 if (!tcm_qla2xxx_cmd_wq) {
1928 ret = -ENOMEM;
1929 goto out_free_wq;
1930 }
1931
1932 return 0;
1933
1934out_free_wq:
1935 destroy_workqueue(tcm_qla2xxx_free_wq);
1936out_fabric_npiv:
1937 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1938out_fabric:
1939 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1940 return ret;
1941}
1942
1943static void tcm_qla2xxx_deregister_configfs(void)
1944{
1945 destroy_workqueue(tcm_qla2xxx_cmd_wq);
1946 destroy_workqueue(tcm_qla2xxx_free_wq);
1947
1948 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1949 tcm_qla2xxx_fabric_configfs = NULL;
1950 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
1951
1952 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1953 tcm_qla2xxx_npiv_fabric_configfs = NULL;
1954 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
1955}
1956
1957static int __init tcm_qla2xxx_init(void)
1958{
1959 int ret;
1960
1961 ret = tcm_qla2xxx_register_configfs();
1962 if (ret < 0)
1963 return ret;
1964
1965 return 0;
1966}
1967
1968static void __exit tcm_qla2xxx_exit(void)
1969{
1970 tcm_qla2xxx_deregister_configfs();
1971}
1972
1973MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
1974MODULE_LICENSE("GPL");
1975module_init(tcm_qla2xxx_init);
1976module_exit(tcm_qla2xxx_exit);