]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/ibmvscsi/ibmvfc.c
scsi: ibmvfc: Move event pool init/free routines
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / ibmvscsi / ibmvfc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4 *
5 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) IBM Corporation, 2008
8 */
9
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/kthread.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/pm.h>
20 #include <linux/stringify.h>
21 #include <linux/bsg-lib.h>
22 #include <asm/firmware.h>
23 #include <asm/irq.h>
24 #include <asm/vio.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include "ibmvfc.h"
33
34 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
35 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
36 static u64 max_lun = IBMVFC_MAX_LUN;
37 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
38 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
39 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
40 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
41 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
42 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
43 static LIST_HEAD(ibmvfc_head);
44 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
45 static struct scsi_transport_template *ibmvfc_transport_template;
46
47 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
48 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
49 MODULE_LICENSE("GPL");
50 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
51
52 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
53 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
54 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
55 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(default_timeout,
57 "Default timeout in seconds for initialization and EH commands. "
58 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
59 module_param_named(max_requests, max_requests, uint, S_IRUGO);
60 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
61 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
62 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
63 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
64 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
65 module_param_named(max_targets, max_targets, uint, S_IRUGO);
66 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
67 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
68 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
69 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
70 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
71 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
72 MODULE_PARM_DESC(debug, "Enable driver debug information. "
73 "[Default=" __stringify(IBMVFC_DEBUG) "]");
74 module_param_named(log_level, log_level, uint, 0);
75 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
76 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
77 module_param_named(cls3_error, cls3_error, uint, 0);
78 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
79 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
80
81 static const struct {
82 u16 status;
83 u16 error;
84 u8 result;
85 u8 retry;
86 int log;
87 char *name;
88 } cmd_status [] = {
89 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
90 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
91 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
92 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
93 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
94 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
95 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
96 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
97 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
98 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
99 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
100 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
101 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
102 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
103
104 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
105 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
106 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
107 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
108 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
109 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
110 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
111 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
112 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
113 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
114
115 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
116 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
117 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
118 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
119 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
120 { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
121 { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
122 { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
123 { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
124 { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
125 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
126
127 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
128 { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
129 };
130
131 static void ibmvfc_npiv_login(struct ibmvfc_host *);
132 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
133 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
134 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
135 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
136 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
137 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
138
139 static const char *unknown_error = "unknown error";
140
141 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
142 {
143 u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
144
145 return (host_caps & cap_flags) ? 1 : 0;
146 }
147
148 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
149 struct ibmvfc_cmd *vfc_cmd)
150 {
151 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
152 return &vfc_cmd->v2.iu;
153 else
154 return &vfc_cmd->v1.iu;
155 }
156
157 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
158 struct ibmvfc_cmd *vfc_cmd)
159 {
160 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
161 return &vfc_cmd->v2.rsp;
162 else
163 return &vfc_cmd->v1.rsp;
164 }
165
166 #ifdef CONFIG_SCSI_IBMVFC_TRACE
167 /**
168 * ibmvfc_trc_start - Log a start trace entry
169 * @evt: ibmvfc event struct
170 *
171 **/
172 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
173 {
174 struct ibmvfc_host *vhost = evt->vhost;
175 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
176 struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
177 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
178 struct ibmvfc_trace_entry *entry;
179 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
180
181 entry = &vhost->trace[index];
182 entry->evt = evt;
183 entry->time = jiffies;
184 entry->fmt = evt->crq.format;
185 entry->type = IBMVFC_TRC_START;
186
187 switch (entry->fmt) {
188 case IBMVFC_CMD_FORMAT:
189 entry->op_code = iu->cdb[0];
190 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
191 entry->lun = scsilun_to_int(&iu->lun);
192 entry->tmf_flags = iu->tmf_flags;
193 entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
194 break;
195 case IBMVFC_MAD_FORMAT:
196 entry->op_code = be32_to_cpu(mad->opcode);
197 break;
198 default:
199 break;
200 }
201 }
202
203 /**
204 * ibmvfc_trc_end - Log an end trace entry
205 * @evt: ibmvfc event struct
206 *
207 **/
208 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
209 {
210 struct ibmvfc_host *vhost = evt->vhost;
211 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
212 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
213 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
214 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
215 struct ibmvfc_trace_entry *entry;
216 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
217
218 entry = &vhost->trace[index];
219 entry->evt = evt;
220 entry->time = jiffies;
221 entry->fmt = evt->crq.format;
222 entry->type = IBMVFC_TRC_END;
223
224 switch (entry->fmt) {
225 case IBMVFC_CMD_FORMAT:
226 entry->op_code = iu->cdb[0];
227 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
228 entry->lun = scsilun_to_int(&iu->lun);
229 entry->tmf_flags = iu->tmf_flags;
230 entry->u.end.status = be16_to_cpu(vfc_cmd->status);
231 entry->u.end.error = be16_to_cpu(vfc_cmd->error);
232 entry->u.end.fcp_rsp_flags = rsp->flags;
233 entry->u.end.rsp_code = rsp->data.info.rsp_code;
234 entry->u.end.scsi_status = rsp->scsi_status;
235 break;
236 case IBMVFC_MAD_FORMAT:
237 entry->op_code = be32_to_cpu(mad->opcode);
238 entry->u.end.status = be16_to_cpu(mad->status);
239 break;
240 default:
241 break;
242
243 }
244 }
245
246 #else
247 #define ibmvfc_trc_start(evt) do { } while (0)
248 #define ibmvfc_trc_end(evt) do { } while (0)
249 #endif
250
251 /**
252 * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
253 * @status: status / error class
254 * @error: error
255 *
256 * Return value:
257 * index into cmd_status / -EINVAL on failure
258 **/
259 static int ibmvfc_get_err_index(u16 status, u16 error)
260 {
261 int i;
262
263 for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
264 if ((cmd_status[i].status & status) == cmd_status[i].status &&
265 cmd_status[i].error == error)
266 return i;
267
268 return -EINVAL;
269 }
270
271 /**
272 * ibmvfc_get_cmd_error - Find the error description for the fcp response
273 * @status: status / error class
274 * @error: error
275 *
276 * Return value:
277 * error description string
278 **/
279 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
280 {
281 int rc = ibmvfc_get_err_index(status, error);
282 if (rc >= 0)
283 return cmd_status[rc].name;
284 return unknown_error;
285 }
286
287 /**
288 * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
289 * @vfc_cmd: ibmvfc command struct
290 *
291 * Return value:
292 * SCSI result value to return for completed command
293 **/
294 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
295 {
296 int err;
297 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
298 int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
299
300 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
301 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
302 rsp->data.info.rsp_code))
303 return DID_ERROR << 16;
304
305 err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
306 if (err >= 0)
307 return rsp->scsi_status | (cmd_status[err].result << 16);
308 return rsp->scsi_status | (DID_ERROR << 16);
309 }
310
311 /**
312 * ibmvfc_retry_cmd - Determine if error status is retryable
313 * @status: status / error class
314 * @error: error
315 *
316 * Return value:
317 * 1 if error should be retried / 0 if it should not
318 **/
319 static int ibmvfc_retry_cmd(u16 status, u16 error)
320 {
321 int rc = ibmvfc_get_err_index(status, error);
322
323 if (rc >= 0)
324 return cmd_status[rc].retry;
325 return 1;
326 }
327
328 static const char *unknown_fc_explain = "unknown fc explain";
329
330 static const struct {
331 u16 fc_explain;
332 char *name;
333 } ls_explain [] = {
334 { 0x00, "no additional explanation" },
335 { 0x01, "service parameter error - options" },
336 { 0x03, "service parameter error - initiator control" },
337 { 0x05, "service parameter error - recipient control" },
338 { 0x07, "service parameter error - received data field size" },
339 { 0x09, "service parameter error - concurrent seq" },
340 { 0x0B, "service parameter error - credit" },
341 { 0x0D, "invalid N_Port/F_Port_Name" },
342 { 0x0E, "invalid node/Fabric Name" },
343 { 0x0F, "invalid common service parameters" },
344 { 0x11, "invalid association header" },
345 { 0x13, "association header required" },
346 { 0x15, "invalid originator S_ID" },
347 { 0x17, "invalid OX_ID-RX-ID combination" },
348 { 0x19, "command (request) already in progress" },
349 { 0x1E, "N_Port Login requested" },
350 { 0x1F, "Invalid N_Port_ID" },
351 };
352
353 static const struct {
354 u16 fc_explain;
355 char *name;
356 } gs_explain [] = {
357 { 0x00, "no additional explanation" },
358 { 0x01, "port identifier not registered" },
359 { 0x02, "port name not registered" },
360 { 0x03, "node name not registered" },
361 { 0x04, "class of service not registered" },
362 { 0x06, "initial process associator not registered" },
363 { 0x07, "FC-4 TYPEs not registered" },
364 { 0x08, "symbolic port name not registered" },
365 { 0x09, "symbolic node name not registered" },
366 { 0x0A, "port type not registered" },
367 { 0xF0, "authorization exception" },
368 { 0xF1, "authentication exception" },
369 { 0xF2, "data base full" },
370 { 0xF3, "data base empty" },
371 { 0xF4, "processing request" },
372 { 0xF5, "unable to verify connection" },
373 { 0xF6, "devices not in a common zone" },
374 };
375
376 /**
377 * ibmvfc_get_ls_explain - Return the FC Explain description text
378 * @status: FC Explain status
379 *
380 * Returns:
381 * error string
382 **/
383 static const char *ibmvfc_get_ls_explain(u16 status)
384 {
385 int i;
386
387 for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
388 if (ls_explain[i].fc_explain == status)
389 return ls_explain[i].name;
390
391 return unknown_fc_explain;
392 }
393
394 /**
395 * ibmvfc_get_gs_explain - Return the FC Explain description text
396 * @status: FC Explain status
397 *
398 * Returns:
399 * error string
400 **/
401 static const char *ibmvfc_get_gs_explain(u16 status)
402 {
403 int i;
404
405 for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
406 if (gs_explain[i].fc_explain == status)
407 return gs_explain[i].name;
408
409 return unknown_fc_explain;
410 }
411
412 static const struct {
413 enum ibmvfc_fc_type fc_type;
414 char *name;
415 } fc_type [] = {
416 { IBMVFC_FABRIC_REJECT, "fabric reject" },
417 { IBMVFC_PORT_REJECT, "port reject" },
418 { IBMVFC_LS_REJECT, "ELS reject" },
419 { IBMVFC_FABRIC_BUSY, "fabric busy" },
420 { IBMVFC_PORT_BUSY, "port busy" },
421 { IBMVFC_BASIC_REJECT, "basic reject" },
422 };
423
424 static const char *unknown_fc_type = "unknown fc type";
425
426 /**
427 * ibmvfc_get_fc_type - Return the FC Type description text
428 * @status: FC Type error status
429 *
430 * Returns:
431 * error string
432 **/
433 static const char *ibmvfc_get_fc_type(u16 status)
434 {
435 int i;
436
437 for (i = 0; i < ARRAY_SIZE(fc_type); i++)
438 if (fc_type[i].fc_type == status)
439 return fc_type[i].name;
440
441 return unknown_fc_type;
442 }
443
444 /**
445 * ibmvfc_set_tgt_action - Set the next init action for the target
446 * @tgt: ibmvfc target struct
447 * @action: action to perform
448 *
449 * Returns:
450 * 0 if action changed / non-zero if not changed
451 **/
452 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
453 enum ibmvfc_target_action action)
454 {
455 int rc = -EINVAL;
456
457 switch (tgt->action) {
458 case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
459 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
460 action == IBMVFC_TGT_ACTION_DEL_RPORT) {
461 tgt->action = action;
462 rc = 0;
463 }
464 break;
465 case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
466 if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
467 action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
468 tgt->action = action;
469 rc = 0;
470 }
471 break;
472 case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
473 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
474 tgt->action = action;
475 rc = 0;
476 }
477 break;
478 case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
479 if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
480 tgt->action = action;
481 rc = 0;
482 }
483 break;
484 case IBMVFC_TGT_ACTION_DEL_RPORT:
485 if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
486 tgt->action = action;
487 rc = 0;
488 }
489 break;
490 case IBMVFC_TGT_ACTION_DELETED_RPORT:
491 break;
492 default:
493 tgt->action = action;
494 rc = 0;
495 break;
496 }
497
498 if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
499 tgt->add_rport = 0;
500
501 return rc;
502 }
503
504 /**
505 * ibmvfc_set_host_state - Set the state for the host
506 * @vhost: ibmvfc host struct
507 * @state: state to set host to
508 *
509 * Returns:
510 * 0 if state changed / non-zero if not changed
511 **/
512 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
513 enum ibmvfc_host_state state)
514 {
515 int rc = 0;
516
517 switch (vhost->state) {
518 case IBMVFC_HOST_OFFLINE:
519 rc = -EINVAL;
520 break;
521 default:
522 vhost->state = state;
523 break;
524 }
525
526 return rc;
527 }
528
529 /**
530 * ibmvfc_set_host_action - Set the next init action for the host
531 * @vhost: ibmvfc host struct
532 * @action: action to perform
533 *
534 **/
535 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
536 enum ibmvfc_host_action action)
537 {
538 switch (action) {
539 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
540 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
541 vhost->action = action;
542 break;
543 case IBMVFC_HOST_ACTION_LOGO_WAIT:
544 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
545 vhost->action = action;
546 break;
547 case IBMVFC_HOST_ACTION_INIT_WAIT:
548 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
549 vhost->action = action;
550 break;
551 case IBMVFC_HOST_ACTION_QUERY:
552 switch (vhost->action) {
553 case IBMVFC_HOST_ACTION_INIT_WAIT:
554 case IBMVFC_HOST_ACTION_NONE:
555 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
556 vhost->action = action;
557 break;
558 default:
559 break;
560 }
561 break;
562 case IBMVFC_HOST_ACTION_TGT_INIT:
563 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
564 vhost->action = action;
565 break;
566 case IBMVFC_HOST_ACTION_INIT:
567 case IBMVFC_HOST_ACTION_TGT_DEL:
568 switch (vhost->action) {
569 case IBMVFC_HOST_ACTION_RESET:
570 case IBMVFC_HOST_ACTION_REENABLE:
571 break;
572 default:
573 vhost->action = action;
574 break;
575 }
576 break;
577 case IBMVFC_HOST_ACTION_LOGO:
578 case IBMVFC_HOST_ACTION_QUERY_TGTS:
579 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
580 case IBMVFC_HOST_ACTION_NONE:
581 case IBMVFC_HOST_ACTION_RESET:
582 case IBMVFC_HOST_ACTION_REENABLE:
583 default:
584 vhost->action = action;
585 break;
586 }
587 }
588
589 /**
590 * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
591 * @vhost: ibmvfc host struct
592 *
593 * Return value:
594 * nothing
595 **/
596 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
597 {
598 if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
599 vhost->state == IBMVFC_ACTIVE) {
600 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
601 scsi_block_requests(vhost->host);
602 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
603 }
604 } else
605 vhost->reinit = 1;
606
607 wake_up(&vhost->work_wait_q);
608 }
609
610 /**
611 * ibmvfc_del_tgt - Schedule cleanup and removal of the target
612 * @tgt: ibmvfc target struct
613 * @job_step: job step to perform
614 *
615 **/
616 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
617 {
618 if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
619 tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
620 wake_up(&tgt->vhost->work_wait_q);
621 }
622
623 /**
624 * ibmvfc_link_down - Handle a link down event from the adapter
625 * @vhost: ibmvfc host struct
626 * @state: ibmvfc host state to enter
627 *
628 **/
629 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
630 enum ibmvfc_host_state state)
631 {
632 struct ibmvfc_target *tgt;
633
634 ENTER;
635 scsi_block_requests(vhost->host);
636 list_for_each_entry(tgt, &vhost->targets, queue)
637 ibmvfc_del_tgt(tgt);
638 ibmvfc_set_host_state(vhost, state);
639 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
640 vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
641 wake_up(&vhost->work_wait_q);
642 LEAVE;
643 }
644
645 /**
646 * ibmvfc_init_host - Start host initialization
647 * @vhost: ibmvfc host struct
648 *
649 * Return value:
650 * nothing
651 **/
652 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
653 {
654 struct ibmvfc_target *tgt;
655
656 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
657 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
658 dev_err(vhost->dev,
659 "Host initialization retries exceeded. Taking adapter offline\n");
660 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
661 return;
662 }
663 }
664
665 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
666 memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
667 vhost->async_crq.cur = 0;
668
669 list_for_each_entry(tgt, &vhost->targets, queue)
670 ibmvfc_del_tgt(tgt);
671 scsi_block_requests(vhost->host);
672 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
673 vhost->job_step = ibmvfc_npiv_login;
674 wake_up(&vhost->work_wait_q);
675 }
676 }
677
678 /**
679 * ibmvfc_send_crq - Send a CRQ
680 * @vhost: ibmvfc host struct
681 * @word1: the first 64 bits of the data
682 * @word2: the second 64 bits of the data
683 *
684 * Return value:
685 * 0 on success / other on failure
686 **/
687 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
688 {
689 struct vio_dev *vdev = to_vio_dev(vhost->dev);
690 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
691 }
692
693 /**
694 * ibmvfc_send_crq_init - Send a CRQ init message
695 * @vhost: ibmvfc host struct
696 *
697 * Return value:
698 * 0 on success / other on failure
699 **/
700 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
701 {
702 ibmvfc_dbg(vhost, "Sending CRQ init\n");
703 return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
704 }
705
706 /**
707 * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
708 * @vhost: ibmvfc host struct
709 *
710 * Return value:
711 * 0 on success / other on failure
712 **/
713 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
714 {
715 ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
716 return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
717 }
718
719 /**
720 * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
721 * @vhost: ibmvfc host who owns the event pool
722 *
723 * Returns zero on success.
724 **/
725 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
726 struct ibmvfc_queue *queue)
727 {
728 int i;
729 struct ibmvfc_event_pool *pool = &queue->evt_pool;
730
731 ENTER;
732 pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
733 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
734 if (!pool->events)
735 return -ENOMEM;
736
737 pool->iu_storage = dma_alloc_coherent(vhost->dev,
738 pool->size * sizeof(*pool->iu_storage),
739 &pool->iu_token, 0);
740
741 if (!pool->iu_storage) {
742 kfree(pool->events);
743 return -ENOMEM;
744 }
745
746 INIT_LIST_HEAD(&queue->sent);
747 INIT_LIST_HEAD(&queue->free);
748 spin_lock_init(&queue->l_lock);
749
750 for (i = 0; i < pool->size; ++i) {
751 struct ibmvfc_event *evt = &pool->events[i];
752
753 atomic_set(&evt->free, 1);
754 evt->crq.valid = 0x80;
755 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
756 evt->xfer_iu = pool->iu_storage + i;
757 evt->vhost = vhost;
758 evt->queue = queue;
759 evt->ext_list = NULL;
760 list_add_tail(&evt->queue_list, &queue->free);
761 }
762
763 LEAVE;
764 return 0;
765 }
766
767 /**
768 * ibmvfc_free_event_pool - Frees memory of the event pool of a host
769 * @vhost: ibmvfc host who owns the event pool
770 *
771 **/
772 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
773 struct ibmvfc_queue *queue)
774 {
775 int i;
776 struct ibmvfc_event_pool *pool = &queue->evt_pool;
777
778 ENTER;
779 for (i = 0; i < pool->size; ++i) {
780 list_del(&pool->events[i].queue_list);
781 BUG_ON(atomic_read(&pool->events[i].free) != 1);
782 if (pool->events[i].ext_list)
783 dma_pool_free(vhost->sg_pool,
784 pool->events[i].ext_list,
785 pool->events[i].ext_list_token);
786 }
787
788 kfree(pool->events);
789 dma_free_coherent(vhost->dev,
790 pool->size * sizeof(*pool->iu_storage),
791 pool->iu_storage, pool->iu_token);
792 LEAVE;
793 }
794
795 /**
796 * ibmvfc_free_queue - Deallocate queue
797 * @vhost: ibmvfc host struct
798 * @queue: ibmvfc queue struct
799 *
800 * Unmaps dma and deallocates page for messages
801 **/
802 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
803 struct ibmvfc_queue *queue)
804 {
805 struct device *dev = vhost->dev;
806
807 dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
808 free_page((unsigned long)queue->msgs.handle);
809 queue->msgs.handle = NULL;
810 }
811
812 /**
813 * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
814 * @vhost: ibmvfc host struct
815 *
816 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
817 * the crq with the hypervisor.
818 **/
819 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
820 {
821 long rc = 0;
822 struct vio_dev *vdev = to_vio_dev(vhost->dev);
823 struct ibmvfc_queue *crq = &vhost->crq;
824
825 ibmvfc_dbg(vhost, "Releasing CRQ\n");
826 free_irq(vdev->irq, vhost);
827 tasklet_kill(&vhost->tasklet);
828 do {
829 if (rc)
830 msleep(100);
831 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
832 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
833
834 vhost->state = IBMVFC_NO_CRQ;
835 vhost->logged_in = 0;
836
837 ibmvfc_free_queue(vhost, crq);
838 }
839
840 /**
841 * ibmvfc_reenable_crq_queue - reenables the CRQ
842 * @vhost: ibmvfc host struct
843 *
844 * Return value:
845 * 0 on success / other on failure
846 **/
847 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
848 {
849 int rc = 0;
850 struct vio_dev *vdev = to_vio_dev(vhost->dev);
851
852 /* Re-enable the CRQ */
853 do {
854 if (rc)
855 msleep(100);
856 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
857 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
858
859 if (rc)
860 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
861
862 return rc;
863 }
864
865 /**
866 * ibmvfc_reset_crq - resets a crq after a failure
867 * @vhost: ibmvfc host struct
868 *
869 * Return value:
870 * 0 on success / other on failure
871 **/
872 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
873 {
874 int rc = 0;
875 unsigned long flags;
876 struct vio_dev *vdev = to_vio_dev(vhost->dev);
877 struct ibmvfc_queue *crq = &vhost->crq;
878
879 /* Close the CRQ */
880 do {
881 if (rc)
882 msleep(100);
883 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
884 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
885
886 spin_lock_irqsave(vhost->host->host_lock, flags);
887 spin_lock(vhost->crq.q_lock);
888 vhost->state = IBMVFC_NO_CRQ;
889 vhost->logged_in = 0;
890
891 /* Clean out the queue */
892 memset(crq->msgs.crq, 0, PAGE_SIZE);
893 crq->cur = 0;
894
895 /* And re-open it again */
896 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
897 crq->msg_token, PAGE_SIZE);
898
899 if (rc == H_CLOSED)
900 /* Adapter is good, but other end is not ready */
901 dev_warn(vhost->dev, "Partner adapter not ready\n");
902 else if (rc != 0)
903 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
904 spin_unlock(vhost->crq.q_lock);
905 spin_unlock_irqrestore(vhost->host->host_lock, flags);
906
907 return rc;
908 }
909
910 /**
911 * ibmvfc_valid_event - Determines if event is valid.
912 * @pool: event_pool that contains the event
913 * @evt: ibmvfc event to be checked for validity
914 *
915 * Return value:
916 * 1 if event is valid / 0 if event is not valid
917 **/
918 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
919 struct ibmvfc_event *evt)
920 {
921 int index = evt - pool->events;
922 if (index < 0 || index >= pool->size) /* outside of bounds */
923 return 0;
924 if (evt != pool->events + index) /* unaligned */
925 return 0;
926 return 1;
927 }
928
929 /**
930 * ibmvfc_free_event - Free the specified event
931 * @evt: ibmvfc_event to be freed
932 *
933 **/
934 static void ibmvfc_free_event(struct ibmvfc_event *evt)
935 {
936 struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
937 unsigned long flags;
938
939 BUG_ON(!ibmvfc_valid_event(pool, evt));
940 BUG_ON(atomic_inc_return(&evt->free) != 1);
941
942 spin_lock_irqsave(&evt->queue->l_lock, flags);
943 list_add_tail(&evt->queue_list, &evt->queue->free);
944 if (evt->eh_comp)
945 complete(evt->eh_comp);
946 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
947 }
948
949 /**
950 * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
951 * @evt: ibmvfc event struct
952 *
953 * This function does not setup any error status, that must be done
954 * before this function gets called.
955 **/
956 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
957 {
958 struct scsi_cmnd *cmnd = evt->cmnd;
959
960 if (cmnd) {
961 scsi_dma_unmap(cmnd);
962 cmnd->scsi_done(cmnd);
963 }
964
965 ibmvfc_free_event(evt);
966 }
967
968 /**
969 * ibmvfc_complete_purge - Complete failed command list
970 * @purge_list: list head of failed commands
971 *
972 * This function runs completions on commands to fail as a result of a
973 * host reset or platform migration.
974 **/
975 static void ibmvfc_complete_purge(struct list_head *purge_list)
976 {
977 struct ibmvfc_event *evt, *pos;
978
979 list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
980 list_del(&evt->queue_list);
981 ibmvfc_trc_end(evt);
982 evt->done(evt);
983 }
984 }
985
986 /**
987 * ibmvfc_fail_request - Fail request with specified error code
988 * @evt: ibmvfc event struct
989 * @error_code: error code to fail request with
990 *
991 * Return value:
992 * none
993 **/
994 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
995 {
996 if (evt->cmnd) {
997 evt->cmnd->result = (error_code << 16);
998 evt->done = ibmvfc_scsi_eh_done;
999 } else
1000 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1001
1002 del_timer(&evt->timer);
1003 }
1004
1005 /**
1006 * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1007 * @vhost: ibmvfc host struct
1008 * @error_code: error code to fail requests with
1009 *
1010 * Return value:
1011 * none
1012 **/
1013 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1014 {
1015 struct ibmvfc_event *evt, *pos;
1016 unsigned long flags;
1017
1018 ibmvfc_dbg(vhost, "Purging all requests\n");
1019 spin_lock_irqsave(&vhost->crq.l_lock, flags);
1020 list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1021 ibmvfc_fail_request(evt, error_code);
1022 list_splice_init(&vhost->crq.sent, &vhost->purge);
1023 spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1024 }
1025
1026 /**
1027 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1028 * @vhost: struct ibmvfc host to reset
1029 **/
1030 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1031 {
1032 ibmvfc_purge_requests(vhost, DID_ERROR);
1033 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1034 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1035 }
1036
1037 /**
1038 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1039 * @vhost: struct ibmvfc host to reset
1040 **/
1041 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1042 {
1043 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1044 !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1045 scsi_block_requests(vhost->host);
1046 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1047 vhost->job_step = ibmvfc_npiv_logout;
1048 wake_up(&vhost->work_wait_q);
1049 } else
1050 ibmvfc_hard_reset_host(vhost);
1051 }
1052
1053 /**
1054 * ibmvfc_reset_host - Reset the connection to the server
1055 * @vhost: ibmvfc host struct
1056 **/
1057 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1058 {
1059 unsigned long flags;
1060
1061 spin_lock_irqsave(vhost->host->host_lock, flags);
1062 __ibmvfc_reset_host(vhost);
1063 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1064 }
1065
1066 /**
1067 * ibmvfc_retry_host_init - Retry host initialization if allowed
1068 * @vhost: ibmvfc host struct
1069 *
1070 * Returns: 1 if init will be retried / 0 if not
1071 *
1072 **/
1073 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1074 {
1075 int retry = 0;
1076
1077 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1078 vhost->delay_init = 1;
1079 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1080 dev_err(vhost->dev,
1081 "Host initialization retries exceeded. Taking adapter offline\n");
1082 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1083 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1084 __ibmvfc_reset_host(vhost);
1085 else {
1086 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1087 retry = 1;
1088 }
1089 }
1090
1091 wake_up(&vhost->work_wait_q);
1092 return retry;
1093 }
1094
1095 /**
1096 * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1097 * @starget: scsi target struct
1098 *
1099 * Return value:
1100 * ibmvfc_target struct / NULL if not found
1101 **/
1102 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1103 {
1104 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1105 struct ibmvfc_host *vhost = shost_priv(shost);
1106 struct ibmvfc_target *tgt;
1107
1108 list_for_each_entry(tgt, &vhost->targets, queue)
1109 if (tgt->target_id == starget->id) {
1110 kref_get(&tgt->kref);
1111 return tgt;
1112 }
1113 return NULL;
1114 }
1115
1116 /**
1117 * ibmvfc_get_target - Find the specified scsi_target
1118 * @starget: scsi target struct
1119 *
1120 * Return value:
1121 * ibmvfc_target struct / NULL if not found
1122 **/
1123 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1124 {
1125 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1126 struct ibmvfc_target *tgt;
1127 unsigned long flags;
1128
1129 spin_lock_irqsave(shost->host_lock, flags);
1130 tgt = __ibmvfc_get_target(starget);
1131 spin_unlock_irqrestore(shost->host_lock, flags);
1132 return tgt;
1133 }
1134
1135 /**
1136 * ibmvfc_get_host_speed - Get host port speed
1137 * @shost: scsi host struct
1138 *
1139 * Return value:
1140 * none
1141 **/
1142 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1143 {
1144 struct ibmvfc_host *vhost = shost_priv(shost);
1145 unsigned long flags;
1146
1147 spin_lock_irqsave(shost->host_lock, flags);
1148 if (vhost->state == IBMVFC_ACTIVE) {
1149 switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1150 case 1:
1151 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1152 break;
1153 case 2:
1154 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1155 break;
1156 case 4:
1157 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1158 break;
1159 case 8:
1160 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1161 break;
1162 case 10:
1163 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1164 break;
1165 case 16:
1166 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1167 break;
1168 default:
1169 ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1170 be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1171 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1172 break;
1173 }
1174 } else
1175 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1176 spin_unlock_irqrestore(shost->host_lock, flags);
1177 }
1178
1179 /**
1180 * ibmvfc_get_host_port_state - Get host port state
1181 * @shost: scsi host struct
1182 *
1183 * Return value:
1184 * none
1185 **/
1186 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1187 {
1188 struct ibmvfc_host *vhost = shost_priv(shost);
1189 unsigned long flags;
1190
1191 spin_lock_irqsave(shost->host_lock, flags);
1192 switch (vhost->state) {
1193 case IBMVFC_INITIALIZING:
1194 case IBMVFC_ACTIVE:
1195 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1196 break;
1197 case IBMVFC_LINK_DOWN:
1198 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1199 break;
1200 case IBMVFC_LINK_DEAD:
1201 case IBMVFC_HOST_OFFLINE:
1202 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1203 break;
1204 case IBMVFC_HALTED:
1205 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1206 break;
1207 case IBMVFC_NO_CRQ:
1208 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1209 break;
1210 default:
1211 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1212 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1213 break;
1214 }
1215 spin_unlock_irqrestore(shost->host_lock, flags);
1216 }
1217
1218 /**
1219 * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1220 * @rport: rport struct
1221 * @timeout: timeout value
1222 *
1223 * Return value:
1224 * none
1225 **/
1226 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1227 {
1228 if (timeout)
1229 rport->dev_loss_tmo = timeout;
1230 else
1231 rport->dev_loss_tmo = 1;
1232 }
1233
1234 /**
1235 * ibmvfc_release_tgt - Free memory allocated for a target
1236 * @kref: kref struct
1237 *
1238 **/
1239 static void ibmvfc_release_tgt(struct kref *kref)
1240 {
1241 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1242 kfree(tgt);
1243 }
1244
1245 /**
1246 * ibmvfc_get_starget_node_name - Get SCSI target's node name
1247 * @starget: scsi target struct
1248 *
1249 * Return value:
1250 * none
1251 **/
1252 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1253 {
1254 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1255 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1256 if (tgt)
1257 kref_put(&tgt->kref, ibmvfc_release_tgt);
1258 }
1259
1260 /**
1261 * ibmvfc_get_starget_port_name - Get SCSI target's port name
1262 * @starget: scsi target struct
1263 *
1264 * Return value:
1265 * none
1266 **/
1267 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1268 {
1269 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1270 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1271 if (tgt)
1272 kref_put(&tgt->kref, ibmvfc_release_tgt);
1273 }
1274
1275 /**
1276 * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1277 * @starget: scsi target struct
1278 *
1279 * Return value:
1280 * none
1281 **/
1282 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1283 {
1284 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1285 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1286 if (tgt)
1287 kref_put(&tgt->kref, ibmvfc_release_tgt);
1288 }
1289
1290 /**
1291 * ibmvfc_wait_while_resetting - Wait while the host resets
1292 * @vhost: ibmvfc host struct
1293 *
1294 * Return value:
1295 * 0 on success / other on failure
1296 **/
1297 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1298 {
1299 long timeout = wait_event_timeout(vhost->init_wait_q,
1300 ((vhost->state == IBMVFC_ACTIVE ||
1301 vhost->state == IBMVFC_HOST_OFFLINE ||
1302 vhost->state == IBMVFC_LINK_DEAD) &&
1303 vhost->action == IBMVFC_HOST_ACTION_NONE),
1304 (init_timeout * HZ));
1305
1306 return timeout ? 0 : -EIO;
1307 }
1308
1309 /**
1310 * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1311 * @shost: scsi host struct
1312 *
1313 * Return value:
1314 * 0 on success / other on failure
1315 **/
1316 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1317 {
1318 struct ibmvfc_host *vhost = shost_priv(shost);
1319
1320 dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1321 ibmvfc_reset_host(vhost);
1322 return ibmvfc_wait_while_resetting(vhost);
1323 }
1324
1325 /**
1326 * ibmvfc_gather_partition_info - Gather info about the LPAR
1327 *
1328 * Return value:
1329 * none
1330 **/
1331 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1332 {
1333 struct device_node *rootdn;
1334 const char *name;
1335 const unsigned int *num;
1336
1337 rootdn = of_find_node_by_path("/");
1338 if (!rootdn)
1339 return;
1340
1341 name = of_get_property(rootdn, "ibm,partition-name", NULL);
1342 if (name)
1343 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1344 num = of_get_property(rootdn, "ibm,partition-no", NULL);
1345 if (num)
1346 vhost->partition_number = *num;
1347 of_node_put(rootdn);
1348 }
1349
1350 /**
1351 * ibmvfc_set_login_info - Setup info for NPIV login
1352 * @vhost: ibmvfc host struct
1353 *
1354 * Return value:
1355 * none
1356 **/
1357 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1358 {
1359 struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1360 struct ibmvfc_queue *async_crq = &vhost->async_crq;
1361 struct device_node *of_node = vhost->dev->of_node;
1362 const char *location;
1363
1364 memset(login_info, 0, sizeof(*login_info));
1365
1366 login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1367 login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1368 login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1369 login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1370 login_info->partition_num = cpu_to_be32(vhost->partition_number);
1371 login_info->vfc_frame_version = cpu_to_be32(1);
1372 login_info->fcp_version = cpu_to_be16(3);
1373 login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1374 if (vhost->client_migrated)
1375 login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1376
1377 login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
1378 login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1379 login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1380 login_info->async.len = cpu_to_be32(async_crq->size *
1381 sizeof(*async_crq->msgs.async));
1382 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1383 strncpy(login_info->device_name,
1384 dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1385
1386 location = of_get_property(of_node, "ibm,loc-code", NULL);
1387 location = location ? location : dev_name(vhost->dev);
1388 strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1389 }
1390
1391 /**
1392 * ibmvfc_get_event - Gets the next free event in pool
1393 * @vhost: ibmvfc host struct
1394 *
1395 * Returns a free event from the pool.
1396 **/
1397 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
1398 {
1399 struct ibmvfc_event *evt;
1400 unsigned long flags;
1401
1402 spin_lock_irqsave(&queue->l_lock, flags);
1403 BUG_ON(list_empty(&queue->free));
1404 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1405 atomic_set(&evt->free, 0);
1406 list_del(&evt->queue_list);
1407 spin_unlock_irqrestore(&queue->l_lock, flags);
1408 return evt;
1409 }
1410
1411 /**
1412 * ibmvfc_locked_done - Calls evt completion with host_lock held
1413 * @evt: ibmvfc evt to complete
1414 *
1415 * All non-scsi command completion callbacks have the expectation that the
1416 * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1417 * MAD evt with the host_lock.
1418 **/
1419 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1420 {
1421 unsigned long flags;
1422
1423 spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1424 evt->_done(evt);
1425 spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1426 }
1427
1428 /**
1429 * ibmvfc_init_event - Initialize fields in an event struct that are always
1430 * required.
1431 * @evt: The event
1432 * @done: Routine to call when the event is responded to
1433 * @format: SRP or MAD format
1434 **/
1435 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1436 void (*done) (struct ibmvfc_event *), u8 format)
1437 {
1438 evt->cmnd = NULL;
1439 evt->sync_iu = NULL;
1440 evt->eh_comp = NULL;
1441 evt->crq.format = format;
1442 if (format == IBMVFC_CMD_FORMAT)
1443 evt->done = done;
1444 else {
1445 evt->_done = done;
1446 evt->done = ibmvfc_locked_done;
1447 }
1448 }
1449
1450 /**
1451 * ibmvfc_map_sg_list - Initialize scatterlist
1452 * @scmd: scsi command struct
1453 * @nseg: number of scatterlist segments
1454 * @md: memory descriptor list to initialize
1455 **/
1456 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1457 struct srp_direct_buf *md)
1458 {
1459 int i;
1460 struct scatterlist *sg;
1461
1462 scsi_for_each_sg(scmd, sg, nseg, i) {
1463 md[i].va = cpu_to_be64(sg_dma_address(sg));
1464 md[i].len = cpu_to_be32(sg_dma_len(sg));
1465 md[i].key = 0;
1466 }
1467 }
1468
1469 /**
1470 * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1471 * @scmd: struct scsi_cmnd with the scatterlist
1472 * @evt: ibmvfc event struct
1473 * @vfc_cmd: vfc_cmd that contains the memory descriptor
1474 * @dev: device for which to map dma memory
1475 *
1476 * Returns:
1477 * 0 on success / non-zero on failure
1478 **/
1479 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1480 struct ibmvfc_event *evt,
1481 struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1482 {
1483
1484 int sg_mapped;
1485 struct srp_direct_buf *data = &vfc_cmd->ioba;
1486 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1487 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1488
1489 if (cls3_error)
1490 vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1491
1492 sg_mapped = scsi_dma_map(scmd);
1493 if (!sg_mapped) {
1494 vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1495 return 0;
1496 } else if (unlikely(sg_mapped < 0)) {
1497 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1498 scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1499 return sg_mapped;
1500 }
1501
1502 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1503 vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1504 iu->add_cdb_len |= IBMVFC_WRDATA;
1505 } else {
1506 vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1507 iu->add_cdb_len |= IBMVFC_RDDATA;
1508 }
1509
1510 if (sg_mapped == 1) {
1511 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1512 return 0;
1513 }
1514
1515 vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1516
1517 if (!evt->ext_list) {
1518 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1519 &evt->ext_list_token);
1520
1521 if (!evt->ext_list) {
1522 scsi_dma_unmap(scmd);
1523 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1524 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1525 return -ENOMEM;
1526 }
1527 }
1528
1529 ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1530
1531 data->va = cpu_to_be64(evt->ext_list_token);
1532 data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1533 data->key = 0;
1534 return 0;
1535 }
1536
1537 /**
1538 * ibmvfc_timeout - Internal command timeout handler
1539 * @evt: struct ibmvfc_event that timed out
1540 *
1541 * Called when an internally generated command times out
1542 **/
1543 static void ibmvfc_timeout(struct timer_list *t)
1544 {
1545 struct ibmvfc_event *evt = from_timer(evt, t, timer);
1546 struct ibmvfc_host *vhost = evt->vhost;
1547 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1548 ibmvfc_reset_host(vhost);
1549 }
1550
1551 /**
1552 * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1553 * @evt: event to be sent
1554 * @vhost: ibmvfc host struct
1555 * @timeout: timeout in seconds - 0 means do not time command
1556 *
1557 * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1558 **/
1559 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1560 struct ibmvfc_host *vhost, unsigned long timeout)
1561 {
1562 __be64 *crq_as_u64 = (__be64 *) &evt->crq;
1563 unsigned long flags;
1564 int rc;
1565
1566 /* Copy the IU into the transfer area */
1567 *evt->xfer_iu = evt->iu;
1568 if (evt->crq.format == IBMVFC_CMD_FORMAT)
1569 evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1570 else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1571 evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1572 else
1573 BUG();
1574
1575 timer_setup(&evt->timer, ibmvfc_timeout, 0);
1576
1577 if (timeout) {
1578 evt->timer.expires = jiffies + (timeout * HZ);
1579 add_timer(&evt->timer);
1580 }
1581
1582 spin_lock_irqsave(&evt->queue->l_lock, flags);
1583 list_add_tail(&evt->queue_list, &evt->queue->sent);
1584
1585 mb();
1586
1587 if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1588 be64_to_cpu(crq_as_u64[1])))) {
1589 list_del(&evt->queue_list);
1590 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1591 del_timer(&evt->timer);
1592
1593 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1594 * Firmware will send a CRQ with a transport event (0xFF) to
1595 * tell this client what has happened to the transport. This
1596 * will be handled in ibmvfc_handle_crq()
1597 */
1598 if (rc == H_CLOSED) {
1599 if (printk_ratelimit())
1600 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1601 if (evt->cmnd)
1602 scsi_dma_unmap(evt->cmnd);
1603 ibmvfc_free_event(evt);
1604 return SCSI_MLQUEUE_HOST_BUSY;
1605 }
1606
1607 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1608 if (evt->cmnd) {
1609 evt->cmnd->result = DID_ERROR << 16;
1610 evt->done = ibmvfc_scsi_eh_done;
1611 } else
1612 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1613
1614 evt->done(evt);
1615 } else {
1616 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1617 ibmvfc_trc_start(evt);
1618 }
1619
1620 return 0;
1621 }
1622
1623 /**
1624 * ibmvfc_log_error - Log an error for the failed command if appropriate
1625 * @evt: ibmvfc event to log
1626 *
1627 **/
1628 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1629 {
1630 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1631 struct ibmvfc_host *vhost = evt->vhost;
1632 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1633 struct scsi_cmnd *cmnd = evt->cmnd;
1634 const char *err = unknown_error;
1635 int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1636 int logerr = 0;
1637 int rsp_code = 0;
1638
1639 if (index >= 0) {
1640 logerr = cmd_status[index].log;
1641 err = cmd_status[index].name;
1642 }
1643
1644 if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1645 return;
1646
1647 if (rsp->flags & FCP_RSP_LEN_VALID)
1648 rsp_code = rsp->data.info.rsp_code;
1649
1650 scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1651 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1652 cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1653 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1654 }
1655
1656 /**
1657 * ibmvfc_relogin - Log back into the specified device
1658 * @sdev: scsi device struct
1659 *
1660 **/
1661 static void ibmvfc_relogin(struct scsi_device *sdev)
1662 {
1663 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1664 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1665 struct ibmvfc_target *tgt;
1666 unsigned long flags;
1667
1668 spin_lock_irqsave(vhost->host->host_lock, flags);
1669 list_for_each_entry(tgt, &vhost->targets, queue) {
1670 if (rport == tgt->rport) {
1671 ibmvfc_del_tgt(tgt);
1672 break;
1673 }
1674 }
1675
1676 ibmvfc_reinit_host(vhost);
1677 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1678 }
1679
1680 /**
1681 * ibmvfc_scsi_done - Handle responses from commands
1682 * @evt: ibmvfc event to be handled
1683 *
1684 * Used as a callback when sending scsi cmds.
1685 **/
1686 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1687 {
1688 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1689 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1690 struct scsi_cmnd *cmnd = evt->cmnd;
1691 u32 rsp_len = 0;
1692 u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1693
1694 if (cmnd) {
1695 if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1696 scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1697 else if (rsp->flags & FCP_RESID_UNDER)
1698 scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1699 else
1700 scsi_set_resid(cmnd, 0);
1701
1702 if (vfc_cmd->status) {
1703 cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1704
1705 if (rsp->flags & FCP_RSP_LEN_VALID)
1706 rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1707 if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1708 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1709 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1710 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1711 if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1712 (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1713 ibmvfc_relogin(cmnd->device);
1714
1715 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1716 cmnd->result = (DID_ERROR << 16);
1717
1718 ibmvfc_log_error(evt);
1719 }
1720
1721 if (!cmnd->result &&
1722 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1723 cmnd->result = (DID_ERROR << 16);
1724
1725 scsi_dma_unmap(cmnd);
1726 cmnd->scsi_done(cmnd);
1727 }
1728
1729 ibmvfc_free_event(evt);
1730 }
1731
1732 /**
1733 * ibmvfc_host_chkready - Check if the host can accept commands
1734 * @vhost: struct ibmvfc host
1735 *
1736 * Returns:
1737 * 1 if host can accept command / 0 if not
1738 **/
1739 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1740 {
1741 int result = 0;
1742
1743 switch (vhost->state) {
1744 case IBMVFC_LINK_DEAD:
1745 case IBMVFC_HOST_OFFLINE:
1746 result = DID_NO_CONNECT << 16;
1747 break;
1748 case IBMVFC_NO_CRQ:
1749 case IBMVFC_INITIALIZING:
1750 case IBMVFC_HALTED:
1751 case IBMVFC_LINK_DOWN:
1752 result = DID_REQUEUE << 16;
1753 break;
1754 case IBMVFC_ACTIVE:
1755 result = 0;
1756 break;
1757 }
1758
1759 return result;
1760 }
1761
1762 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1763 {
1764 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1765 struct ibmvfc_host *vhost = evt->vhost;
1766 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1767 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1768 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1769 size_t offset;
1770
1771 memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1772 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1773 offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1774 vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1775 } else
1776 offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1777 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1778 vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1779 vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1780 vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1781 vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1782 vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1783 vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1784 int_to_scsilun(sdev->lun, &iu->lun);
1785
1786 return vfc_cmd;
1787 }
1788
1789 /**
1790 * ibmvfc_queuecommand - The queuecommand function of the scsi template
1791 * @cmnd: struct scsi_cmnd to be executed
1792 * @done: Callback function to be called when cmnd is completed
1793 *
1794 * Returns:
1795 * 0 on success / other on failure
1796 **/
1797 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1798 {
1799 struct ibmvfc_host *vhost = shost_priv(shost);
1800 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1801 struct ibmvfc_cmd *vfc_cmd;
1802 struct ibmvfc_fcp_cmd_iu *iu;
1803 struct ibmvfc_event *evt;
1804 int rc;
1805
1806 if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1807 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1808 cmnd->result = rc;
1809 cmnd->scsi_done(cmnd);
1810 return 0;
1811 }
1812
1813 cmnd->result = (DID_OK << 16);
1814 evt = ibmvfc_get_event(&vhost->crq);
1815 ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1816 evt->cmnd = cmnd;
1817
1818 vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1819 iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1820
1821 iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1822 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1823
1824 if (cmnd->flags & SCMD_TAGGED) {
1825 vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
1826 iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
1827 }
1828
1829 vfc_cmd->correlation = cpu_to_be64(evt);
1830
1831 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1832 return ibmvfc_send_event(evt, vhost, 0);
1833
1834 ibmvfc_free_event(evt);
1835 if (rc == -ENOMEM)
1836 return SCSI_MLQUEUE_HOST_BUSY;
1837
1838 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1839 scmd_printk(KERN_ERR, cmnd,
1840 "Failed to map DMA buffer for command. rc=%d\n", rc);
1841
1842 cmnd->result = DID_ERROR << 16;
1843 cmnd->scsi_done(cmnd);
1844 return 0;
1845 }
1846
1847 /**
1848 * ibmvfc_sync_completion - Signal that a synchronous command has completed
1849 * @evt: ibmvfc event struct
1850 *
1851 **/
1852 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1853 {
1854 /* copy the response back */
1855 if (evt->sync_iu)
1856 *evt->sync_iu = *evt->xfer_iu;
1857
1858 complete(&evt->comp);
1859 }
1860
1861 /**
1862 * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1863 * @evt: struct ibmvfc_event
1864 *
1865 **/
1866 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1867 {
1868 struct ibmvfc_host *vhost = evt->vhost;
1869
1870 ibmvfc_free_event(evt);
1871 vhost->aborting_passthru = 0;
1872 dev_info(vhost->dev, "Passthru command cancelled\n");
1873 }
1874
1875 /**
1876 * ibmvfc_bsg_timeout - Handle a BSG timeout
1877 * @job: struct bsg_job that timed out
1878 *
1879 * Returns:
1880 * 0 on success / other on failure
1881 **/
1882 static int ibmvfc_bsg_timeout(struct bsg_job *job)
1883 {
1884 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1885 unsigned long port_id = (unsigned long)job->dd_data;
1886 struct ibmvfc_event *evt;
1887 struct ibmvfc_tmf *tmf;
1888 unsigned long flags;
1889 int rc;
1890
1891 ENTER;
1892 spin_lock_irqsave(vhost->host->host_lock, flags);
1893 if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
1894 __ibmvfc_reset_host(vhost);
1895 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1896 return 0;
1897 }
1898
1899 vhost->aborting_passthru = 1;
1900 evt = ibmvfc_get_event(&vhost->crq);
1901 ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
1902
1903 tmf = &evt->iu.tmf;
1904 memset(tmf, 0, sizeof(*tmf));
1905 tmf->common.version = cpu_to_be32(1);
1906 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
1907 tmf->common.length = cpu_to_be16(sizeof(*tmf));
1908 tmf->scsi_id = cpu_to_be64(port_id);
1909 tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
1910 tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
1911 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1912
1913 if (rc != 0) {
1914 vhost->aborting_passthru = 0;
1915 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
1916 rc = -EIO;
1917 } else
1918 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
1919 port_id);
1920
1921 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1922
1923 LEAVE;
1924 return rc;
1925 }
1926
1927 /**
1928 * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
1929 * @vhost: struct ibmvfc_host to send command
1930 * @port_id: port ID to send command
1931 *
1932 * Returns:
1933 * 0 on success / other on failure
1934 **/
1935 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
1936 {
1937 struct ibmvfc_port_login *plogi;
1938 struct ibmvfc_target *tgt;
1939 struct ibmvfc_event *evt;
1940 union ibmvfc_iu rsp_iu;
1941 unsigned long flags;
1942 int rc = 0, issue_login = 1;
1943
1944 ENTER;
1945 spin_lock_irqsave(vhost->host->host_lock, flags);
1946 list_for_each_entry(tgt, &vhost->targets, queue) {
1947 if (tgt->scsi_id == port_id) {
1948 issue_login = 0;
1949 break;
1950 }
1951 }
1952
1953 if (!issue_login)
1954 goto unlock_out;
1955 if (unlikely((rc = ibmvfc_host_chkready(vhost))))
1956 goto unlock_out;
1957
1958 evt = ibmvfc_get_event(&vhost->crq);
1959 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1960 plogi = &evt->iu.plogi;
1961 memset(plogi, 0, sizeof(*plogi));
1962 plogi->common.version = cpu_to_be32(1);
1963 plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
1964 plogi->common.length = cpu_to_be16(sizeof(*plogi));
1965 plogi->scsi_id = cpu_to_be64(port_id);
1966 evt->sync_iu = &rsp_iu;
1967 init_completion(&evt->comp);
1968
1969 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1970 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1971
1972 if (rc)
1973 return -EIO;
1974
1975 wait_for_completion(&evt->comp);
1976
1977 if (rsp_iu.plogi.common.status)
1978 rc = -EIO;
1979
1980 spin_lock_irqsave(vhost->host->host_lock, flags);
1981 ibmvfc_free_event(evt);
1982 unlock_out:
1983 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1984 LEAVE;
1985 return rc;
1986 }
1987
1988 /**
1989 * ibmvfc_bsg_request - Handle a BSG request
1990 * @job: struct bsg_job to be executed
1991 *
1992 * Returns:
1993 * 0 on success / other on failure
1994 **/
1995 static int ibmvfc_bsg_request(struct bsg_job *job)
1996 {
1997 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1998 struct fc_rport *rport = fc_bsg_to_rport(job);
1999 struct ibmvfc_passthru_mad *mad;
2000 struct ibmvfc_event *evt;
2001 union ibmvfc_iu rsp_iu;
2002 unsigned long flags, port_id = -1;
2003 struct fc_bsg_request *bsg_request = job->request;
2004 struct fc_bsg_reply *bsg_reply = job->reply;
2005 unsigned int code = bsg_request->msgcode;
2006 int rc = 0, req_seg, rsp_seg, issue_login = 0;
2007 u32 fc_flags, rsp_len;
2008
2009 ENTER;
2010 bsg_reply->reply_payload_rcv_len = 0;
2011 if (rport)
2012 port_id = rport->port_id;
2013
2014 switch (code) {
2015 case FC_BSG_HST_ELS_NOLOGIN:
2016 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2017 (bsg_request->rqst_data.h_els.port_id[1] << 8) |
2018 bsg_request->rqst_data.h_els.port_id[2];
2019 fallthrough;
2020 case FC_BSG_RPT_ELS:
2021 fc_flags = IBMVFC_FC_ELS;
2022 break;
2023 case FC_BSG_HST_CT:
2024 issue_login = 1;
2025 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2026 (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2027 bsg_request->rqst_data.h_ct.port_id[2];
2028 fallthrough;
2029 case FC_BSG_RPT_CT:
2030 fc_flags = IBMVFC_FC_CT_IU;
2031 break;
2032 default:
2033 return -ENOTSUPP;
2034 }
2035
2036 if (port_id == -1)
2037 return -EINVAL;
2038 if (!mutex_trylock(&vhost->passthru_mutex))
2039 return -EBUSY;
2040
2041 job->dd_data = (void *)port_id;
2042 req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2043 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2044
2045 if (!req_seg) {
2046 mutex_unlock(&vhost->passthru_mutex);
2047 return -ENOMEM;
2048 }
2049
2050 rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2051 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2052
2053 if (!rsp_seg) {
2054 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2055 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2056 mutex_unlock(&vhost->passthru_mutex);
2057 return -ENOMEM;
2058 }
2059
2060 if (req_seg > 1 || rsp_seg > 1) {
2061 rc = -EINVAL;
2062 goto out;
2063 }
2064
2065 if (issue_login)
2066 rc = ibmvfc_bsg_plogi(vhost, port_id);
2067
2068 spin_lock_irqsave(vhost->host->host_lock, flags);
2069
2070 if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2071 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2072 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2073 goto out;
2074 }
2075
2076 evt = ibmvfc_get_event(&vhost->crq);
2077 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2078 mad = &evt->iu.passthru;
2079
2080 memset(mad, 0, sizeof(*mad));
2081 mad->common.version = cpu_to_be32(1);
2082 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2083 mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2084
2085 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2086 offsetof(struct ibmvfc_passthru_mad, iu));
2087 mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2088
2089 mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2090 mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2091 mad->iu.flags = cpu_to_be32(fc_flags);
2092 mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2093
2094 mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2095 mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2096 mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2097 mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2098 mad->iu.scsi_id = cpu_to_be64(port_id);
2099 mad->iu.tag = cpu_to_be64((u64)evt);
2100 rsp_len = be32_to_cpu(mad->iu.rsp.len);
2101
2102 evt->sync_iu = &rsp_iu;
2103 init_completion(&evt->comp);
2104 rc = ibmvfc_send_event(evt, vhost, 0);
2105 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2106
2107 if (rc) {
2108 rc = -EIO;
2109 goto out;
2110 }
2111
2112 wait_for_completion(&evt->comp);
2113
2114 if (rsp_iu.passthru.common.status)
2115 rc = -EIO;
2116 else
2117 bsg_reply->reply_payload_rcv_len = rsp_len;
2118
2119 spin_lock_irqsave(vhost->host->host_lock, flags);
2120 ibmvfc_free_event(evt);
2121 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2122 bsg_reply->result = rc;
2123 bsg_job_done(job, bsg_reply->result,
2124 bsg_reply->reply_payload_rcv_len);
2125 rc = 0;
2126 out:
2127 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2128 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2129 dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2130 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2131 mutex_unlock(&vhost->passthru_mutex);
2132 LEAVE;
2133 return rc;
2134 }
2135
2136 /**
2137 * ibmvfc_reset_device - Reset the device with the specified reset type
2138 * @sdev: scsi device to reset
2139 * @type: reset type
2140 * @desc: reset type description for log messages
2141 *
2142 * Returns:
2143 * 0 on success / other on failure
2144 **/
2145 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2146 {
2147 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2148 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2149 struct ibmvfc_cmd *tmf;
2150 struct ibmvfc_event *evt = NULL;
2151 union ibmvfc_iu rsp_iu;
2152 struct ibmvfc_fcp_cmd_iu *iu;
2153 struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2154 int rsp_rc = -EBUSY;
2155 unsigned long flags;
2156 int rsp_code = 0;
2157
2158 spin_lock_irqsave(vhost->host->host_lock, flags);
2159 if (vhost->state == IBMVFC_ACTIVE) {
2160 evt = ibmvfc_get_event(&vhost->crq);
2161 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2162 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2163 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2164
2165 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2166 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2167 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2168 iu->tmf_flags = type;
2169 evt->sync_iu = &rsp_iu;
2170
2171 init_completion(&evt->comp);
2172 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2173 }
2174 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2175
2176 if (rsp_rc != 0) {
2177 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2178 desc, rsp_rc);
2179 return -EIO;
2180 }
2181
2182 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2183 wait_for_completion(&evt->comp);
2184
2185 if (rsp_iu.cmd.status)
2186 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2187
2188 if (rsp_code) {
2189 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2190 rsp_code = fc_rsp->data.info.rsp_code;
2191
2192 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2193 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2194 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2195 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2196 fc_rsp->scsi_status);
2197 rsp_rc = -EIO;
2198 } else
2199 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2200
2201 spin_lock_irqsave(vhost->host->host_lock, flags);
2202 ibmvfc_free_event(evt);
2203 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2204 return rsp_rc;
2205 }
2206
2207 /**
2208 * ibmvfc_match_rport - Match function for specified remote port
2209 * @evt: ibmvfc event struct
2210 * @device: device to match (rport)
2211 *
2212 * Returns:
2213 * 1 if event matches rport / 0 if event does not match rport
2214 **/
2215 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2216 {
2217 struct fc_rport *cmd_rport;
2218
2219 if (evt->cmnd) {
2220 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2221 if (cmd_rport == rport)
2222 return 1;
2223 }
2224 return 0;
2225 }
2226
2227 /**
2228 * ibmvfc_match_target - Match function for specified target
2229 * @evt: ibmvfc event struct
2230 * @device: device to match (starget)
2231 *
2232 * Returns:
2233 * 1 if event matches starget / 0 if event does not match starget
2234 **/
2235 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2236 {
2237 if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2238 return 1;
2239 return 0;
2240 }
2241
2242 /**
2243 * ibmvfc_match_lun - Match function for specified LUN
2244 * @evt: ibmvfc event struct
2245 * @device: device to match (sdev)
2246 *
2247 * Returns:
2248 * 1 if event matches sdev / 0 if event does not match sdev
2249 **/
2250 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2251 {
2252 if (evt->cmnd && evt->cmnd->device == device)
2253 return 1;
2254 return 0;
2255 }
2256
2257 /**
2258 * ibmvfc_wait_for_ops - Wait for ops to complete
2259 * @vhost: ibmvfc host struct
2260 * @device: device to match (starget or sdev)
2261 * @match: match function
2262 *
2263 * Returns:
2264 * SUCCESS / FAILED
2265 **/
2266 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2267 int (*match) (struct ibmvfc_event *, void *))
2268 {
2269 struct ibmvfc_event *evt;
2270 DECLARE_COMPLETION_ONSTACK(comp);
2271 int wait;
2272 unsigned long flags;
2273 signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2274
2275 ENTER;
2276 do {
2277 wait = 0;
2278 spin_lock_irqsave(&vhost->crq.l_lock, flags);
2279 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2280 if (match(evt, device)) {
2281 evt->eh_comp = &comp;
2282 wait++;
2283 }
2284 }
2285 spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
2286
2287 if (wait) {
2288 timeout = wait_for_completion_timeout(&comp, timeout);
2289
2290 if (!timeout) {
2291 wait = 0;
2292 spin_lock_irqsave(&vhost->crq.l_lock, flags);
2293 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2294 if (match(evt, device)) {
2295 evt->eh_comp = NULL;
2296 wait++;
2297 }
2298 }
2299 spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
2300 if (wait)
2301 dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2302 LEAVE;
2303 return wait ? FAILED : SUCCESS;
2304 }
2305 }
2306 } while (wait);
2307
2308 LEAVE;
2309 return SUCCESS;
2310 }
2311
2312 /**
2313 * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2314 * @sdev: scsi device to cancel commands
2315 * @type: type of error recovery being performed
2316 *
2317 * This sends a cancel to the VIOS for the specified device. This does
2318 * NOT send any abort to the actual device. That must be done separately.
2319 *
2320 * Returns:
2321 * 0 on success / other on failure
2322 **/
2323 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2324 {
2325 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2326 struct scsi_target *starget = scsi_target(sdev);
2327 struct fc_rport *rport = starget_to_rport(starget);
2328 struct ibmvfc_tmf *tmf;
2329 struct ibmvfc_event *evt, *found_evt;
2330 union ibmvfc_iu rsp;
2331 int rsp_rc = -EBUSY;
2332 unsigned long flags;
2333 u16 status;
2334
2335 ENTER;
2336 found_evt = NULL;
2337 spin_lock_irqsave(vhost->host->host_lock, flags);
2338 spin_lock(&vhost->crq.l_lock);
2339 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2340 if (evt->cmnd && evt->cmnd->device == sdev) {
2341 found_evt = evt;
2342 break;
2343 }
2344 }
2345 spin_unlock(&vhost->crq.l_lock);
2346
2347 if (!found_evt) {
2348 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2349 sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2350 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2351 return 0;
2352 }
2353
2354 if (vhost->logged_in) {
2355 evt = ibmvfc_get_event(&vhost->crq);
2356 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2357
2358 tmf = &evt->iu.tmf;
2359 memset(tmf, 0, sizeof(*tmf));
2360 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2361 tmf->common.version = cpu_to_be32(2);
2362 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2363 } else {
2364 tmf->common.version = cpu_to_be32(1);
2365 }
2366 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2367 tmf->common.length = cpu_to_be16(sizeof(*tmf));
2368 tmf->scsi_id = cpu_to_be64(rport->port_id);
2369 int_to_scsilun(sdev->lun, &tmf->lun);
2370 if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2371 type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2372 if (vhost->state == IBMVFC_ACTIVE)
2373 tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2374 else
2375 tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2376 tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2377 tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2378
2379 evt->sync_iu = &rsp;
2380 init_completion(&evt->comp);
2381 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2382 }
2383
2384 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2385
2386 if (rsp_rc != 0) {
2387 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2388 /* If failure is received, the host adapter is most likely going
2389 through reset, return success so the caller will wait for the command
2390 being cancelled to get returned */
2391 return 0;
2392 }
2393
2394 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2395
2396 wait_for_completion(&evt->comp);
2397 status = be16_to_cpu(rsp.mad_common.status);
2398 spin_lock_irqsave(vhost->host->host_lock, flags);
2399 ibmvfc_free_event(evt);
2400 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2401
2402 if (status != IBMVFC_MAD_SUCCESS) {
2403 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2404 switch (status) {
2405 case IBMVFC_MAD_DRIVER_FAILED:
2406 case IBMVFC_MAD_CRQ_ERROR:
2407 /* Host adapter most likely going through reset, return success to
2408 the caller will wait for the command being cancelled to get returned */
2409 return 0;
2410 default:
2411 return -EIO;
2412 };
2413 }
2414
2415 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2416 return 0;
2417 }
2418
2419 /**
2420 * ibmvfc_match_key - Match function for specified cancel key
2421 * @evt: ibmvfc event struct
2422 * @key: cancel key to match
2423 *
2424 * Returns:
2425 * 1 if event matches key / 0 if event does not match key
2426 **/
2427 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2428 {
2429 unsigned long cancel_key = (unsigned long)key;
2430
2431 if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2432 be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2433 return 1;
2434 return 0;
2435 }
2436
2437 /**
2438 * ibmvfc_match_evt - Match function for specified event
2439 * @evt: ibmvfc event struct
2440 * @match: event to match
2441 *
2442 * Returns:
2443 * 1 if event matches key / 0 if event does not match key
2444 **/
2445 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2446 {
2447 if (evt == match)
2448 return 1;
2449 return 0;
2450 }
2451
2452 /**
2453 * ibmvfc_abort_task_set - Abort outstanding commands to the device
2454 * @sdev: scsi device to abort commands
2455 *
2456 * This sends an Abort Task Set to the VIOS for the specified device. This does
2457 * NOT send any cancel to the VIOS. That must be done separately.
2458 *
2459 * Returns:
2460 * 0 on success / other on failure
2461 **/
2462 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2463 {
2464 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2465 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2466 struct ibmvfc_cmd *tmf;
2467 struct ibmvfc_event *evt, *found_evt;
2468 union ibmvfc_iu rsp_iu;
2469 struct ibmvfc_fcp_cmd_iu *iu;
2470 struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2471 int rc, rsp_rc = -EBUSY;
2472 unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2473 int rsp_code = 0;
2474
2475 found_evt = NULL;
2476 spin_lock_irqsave(vhost->host->host_lock, flags);
2477 spin_lock(&vhost->crq.l_lock);
2478 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2479 if (evt->cmnd && evt->cmnd->device == sdev) {
2480 found_evt = evt;
2481 break;
2482 }
2483 }
2484 spin_unlock(&vhost->crq.l_lock);
2485
2486 if (!found_evt) {
2487 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2488 sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2489 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2490 return 0;
2491 }
2492
2493 if (vhost->state == IBMVFC_ACTIVE) {
2494 evt = ibmvfc_get_event(&vhost->crq);
2495 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2496 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2497 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2498
2499 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2500 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2501 iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2502 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2503 evt->sync_iu = &rsp_iu;
2504
2505 tmf->correlation = cpu_to_be64(evt);
2506
2507 init_completion(&evt->comp);
2508 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2509 }
2510
2511 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2512
2513 if (rsp_rc != 0) {
2514 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2515 return -EIO;
2516 }
2517
2518 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2519 timeout = wait_for_completion_timeout(&evt->comp, timeout);
2520
2521 if (!timeout) {
2522 rc = ibmvfc_cancel_all(sdev, 0);
2523 if (!rc) {
2524 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2525 if (rc == SUCCESS)
2526 rc = 0;
2527 }
2528
2529 if (rc) {
2530 sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2531 ibmvfc_reset_host(vhost);
2532 rsp_rc = -EIO;
2533 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2534
2535 if (rc == SUCCESS)
2536 rsp_rc = 0;
2537
2538 rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2539 if (rc != SUCCESS) {
2540 spin_lock_irqsave(vhost->host->host_lock, flags);
2541 ibmvfc_hard_reset_host(vhost);
2542 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2543 rsp_rc = 0;
2544 }
2545
2546 goto out;
2547 }
2548 }
2549
2550 if (rsp_iu.cmd.status)
2551 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2552
2553 if (rsp_code) {
2554 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2555 rsp_code = fc_rsp->data.info.rsp_code;
2556
2557 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2558 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2559 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2560 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2561 fc_rsp->scsi_status);
2562 rsp_rc = -EIO;
2563 } else
2564 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2565
2566 out:
2567 spin_lock_irqsave(vhost->host->host_lock, flags);
2568 ibmvfc_free_event(evt);
2569 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2570 return rsp_rc;
2571 }
2572
2573 /**
2574 * ibmvfc_eh_abort_handler - Abort a command
2575 * @cmd: scsi command to abort
2576 *
2577 * Returns:
2578 * SUCCESS / FAST_IO_FAIL / FAILED
2579 **/
2580 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2581 {
2582 struct scsi_device *sdev = cmd->device;
2583 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2584 int cancel_rc, block_rc;
2585 int rc = FAILED;
2586
2587 ENTER;
2588 block_rc = fc_block_scsi_eh(cmd);
2589 ibmvfc_wait_while_resetting(vhost);
2590 if (block_rc != FAST_IO_FAIL) {
2591 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2592 ibmvfc_abort_task_set(sdev);
2593 } else
2594 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2595
2596 if (!cancel_rc)
2597 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2598
2599 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2600 rc = FAST_IO_FAIL;
2601
2602 LEAVE;
2603 return rc;
2604 }
2605
2606 /**
2607 * ibmvfc_eh_device_reset_handler - Reset a single LUN
2608 * @cmd: scsi command struct
2609 *
2610 * Returns:
2611 * SUCCESS / FAST_IO_FAIL / FAILED
2612 **/
2613 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2614 {
2615 struct scsi_device *sdev = cmd->device;
2616 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2617 int cancel_rc, block_rc, reset_rc = 0;
2618 int rc = FAILED;
2619
2620 ENTER;
2621 block_rc = fc_block_scsi_eh(cmd);
2622 ibmvfc_wait_while_resetting(vhost);
2623 if (block_rc != FAST_IO_FAIL) {
2624 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2625 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2626 } else
2627 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2628
2629 if (!cancel_rc && !reset_rc)
2630 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2631
2632 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2633 rc = FAST_IO_FAIL;
2634
2635 LEAVE;
2636 return rc;
2637 }
2638
2639 /**
2640 * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2641 * @sdev: scsi device struct
2642 * @data: return code
2643 *
2644 **/
2645 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2646 {
2647 unsigned long *rc = data;
2648 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2649 }
2650
2651 /**
2652 * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2653 * @sdev: scsi device struct
2654 * @data: return code
2655 *
2656 **/
2657 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2658 {
2659 unsigned long *rc = data;
2660 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2661 }
2662
2663 /**
2664 * ibmvfc_eh_target_reset_handler - Reset the target
2665 * @cmd: scsi command struct
2666 *
2667 * Returns:
2668 * SUCCESS / FAST_IO_FAIL / FAILED
2669 **/
2670 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2671 {
2672 struct scsi_device *sdev = cmd->device;
2673 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2674 struct scsi_target *starget = scsi_target(sdev);
2675 int block_rc;
2676 int reset_rc = 0;
2677 int rc = FAILED;
2678 unsigned long cancel_rc = 0;
2679
2680 ENTER;
2681 block_rc = fc_block_scsi_eh(cmd);
2682 ibmvfc_wait_while_resetting(vhost);
2683 if (block_rc != FAST_IO_FAIL) {
2684 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2685 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2686 } else
2687 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2688
2689 if (!cancel_rc && !reset_rc)
2690 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2691
2692 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2693 rc = FAST_IO_FAIL;
2694
2695 LEAVE;
2696 return rc;
2697 }
2698
2699 /**
2700 * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2701 * @cmd: struct scsi_cmnd having problems
2702 *
2703 **/
2704 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2705 {
2706 int rc;
2707 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2708
2709 dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2710 rc = ibmvfc_issue_fc_host_lip(vhost->host);
2711
2712 return rc ? FAILED : SUCCESS;
2713 }
2714
2715 /**
2716 * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2717 * @rport: rport struct
2718 *
2719 * Return value:
2720 * none
2721 **/
2722 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2723 {
2724 struct Scsi_Host *shost = rport_to_shost(rport);
2725 struct ibmvfc_host *vhost = shost_priv(shost);
2726 struct fc_rport *dev_rport;
2727 struct scsi_device *sdev;
2728 struct ibmvfc_target *tgt;
2729 unsigned long rc, flags;
2730 unsigned int found;
2731
2732 ENTER;
2733 shost_for_each_device(sdev, shost) {
2734 dev_rport = starget_to_rport(scsi_target(sdev));
2735 if (dev_rport != rport)
2736 continue;
2737 ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2738 }
2739
2740 rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
2741
2742 if (rc == FAILED)
2743 ibmvfc_issue_fc_host_lip(shost);
2744
2745 spin_lock_irqsave(shost->host_lock, flags);
2746 found = 0;
2747 list_for_each_entry(tgt, &vhost->targets, queue) {
2748 if (tgt->scsi_id == rport->port_id) {
2749 found++;
2750 break;
2751 }
2752 }
2753
2754 if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
2755 /*
2756 * If we get here, that means we previously attempted to send
2757 * an implicit logout to the target but it failed, most likely
2758 * due to I/O being pending, so we need to send it again
2759 */
2760 ibmvfc_del_tgt(tgt);
2761 ibmvfc_reinit_host(vhost);
2762 }
2763
2764 spin_unlock_irqrestore(shost->host_lock, flags);
2765 LEAVE;
2766 }
2767
2768 static const struct ibmvfc_async_desc ae_desc [] = {
2769 { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2770 { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2771 { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2772 { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2773 { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2774 { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL },
2775 { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL },
2776 { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL },
2777 { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL },
2778 { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL },
2779 { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL },
2780 { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL },
2781 { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
2782 };
2783
2784 static const struct ibmvfc_async_desc unknown_ae = {
2785 "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
2786 };
2787
2788 /**
2789 * ibmvfc_get_ae_desc - Get text description for async event
2790 * @ae: async event
2791 *
2792 **/
2793 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
2794 {
2795 int i;
2796
2797 for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2798 if (ae_desc[i].ae == ae)
2799 return &ae_desc[i];
2800
2801 return &unknown_ae;
2802 }
2803
2804 static const struct {
2805 enum ibmvfc_ae_link_state state;
2806 const char *desc;
2807 } link_desc [] = {
2808 { IBMVFC_AE_LS_LINK_UP, " link up" },
2809 { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
2810 { IBMVFC_AE_LS_LINK_DOWN, " link down" },
2811 { IBMVFC_AE_LS_LINK_DEAD, " link dead" },
2812 };
2813
2814 /**
2815 * ibmvfc_get_link_state - Get text description for link state
2816 * @state: link state
2817 *
2818 **/
2819 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
2820 {
2821 int i;
2822
2823 for (i = 0; i < ARRAY_SIZE(link_desc); i++)
2824 if (link_desc[i].state == state)
2825 return link_desc[i].desc;
2826
2827 return "";
2828 }
2829
2830 /**
2831 * ibmvfc_handle_async - Handle an async event from the adapter
2832 * @crq: crq to process
2833 * @vhost: ibmvfc host struct
2834 *
2835 **/
2836 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2837 struct ibmvfc_host *vhost)
2838 {
2839 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
2840 struct ibmvfc_target *tgt;
2841
2842 ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
2843 " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
2844 be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
2845 ibmvfc_get_link_state(crq->link_state));
2846
2847 switch (be64_to_cpu(crq->event)) {
2848 case IBMVFC_AE_RESUME:
2849 switch (crq->link_state) {
2850 case IBMVFC_AE_LS_LINK_DOWN:
2851 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2852 break;
2853 case IBMVFC_AE_LS_LINK_DEAD:
2854 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2855 break;
2856 case IBMVFC_AE_LS_LINK_UP:
2857 case IBMVFC_AE_LS_LINK_BOUNCED:
2858 default:
2859 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2860 vhost->delay_init = 1;
2861 __ibmvfc_reset_host(vhost);
2862 break;
2863 }
2864
2865 break;
2866 case IBMVFC_AE_LINK_UP:
2867 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2868 vhost->delay_init = 1;
2869 __ibmvfc_reset_host(vhost);
2870 break;
2871 case IBMVFC_AE_SCN_FABRIC:
2872 case IBMVFC_AE_SCN_DOMAIN:
2873 vhost->events_to_log |= IBMVFC_AE_RSCN;
2874 if (vhost->state < IBMVFC_HALTED) {
2875 vhost->delay_init = 1;
2876 __ibmvfc_reset_host(vhost);
2877 }
2878 break;
2879 case IBMVFC_AE_SCN_NPORT:
2880 case IBMVFC_AE_SCN_GROUP:
2881 vhost->events_to_log |= IBMVFC_AE_RSCN;
2882 ibmvfc_reinit_host(vhost);
2883 break;
2884 case IBMVFC_AE_ELS_LOGO:
2885 case IBMVFC_AE_ELS_PRLO:
2886 case IBMVFC_AE_ELS_PLOGI:
2887 list_for_each_entry(tgt, &vhost->targets, queue) {
2888 if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2889 break;
2890 if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
2891 continue;
2892 if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
2893 continue;
2894 if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
2895 continue;
2896 if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
2897 tgt->logo_rcvd = 1;
2898 if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
2899 ibmvfc_del_tgt(tgt);
2900 ibmvfc_reinit_host(vhost);
2901 }
2902 }
2903 break;
2904 case IBMVFC_AE_LINK_DOWN:
2905 case IBMVFC_AE_ADAPTER_FAILED:
2906 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2907 break;
2908 case IBMVFC_AE_LINK_DEAD:
2909 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2910 break;
2911 case IBMVFC_AE_HALT:
2912 ibmvfc_link_down(vhost, IBMVFC_HALTED);
2913 break;
2914 default:
2915 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
2916 break;
2917 }
2918 }
2919
2920 /**
2921 * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2922 * @crq: Command/Response queue
2923 * @vhost: ibmvfc host struct
2924 *
2925 **/
2926 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
2927 struct list_head *evt_doneq)
2928 {
2929 long rc;
2930 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
2931
2932 switch (crq->valid) {
2933 case IBMVFC_CRQ_INIT_RSP:
2934 switch (crq->format) {
2935 case IBMVFC_CRQ_INIT:
2936 dev_info(vhost->dev, "Partner initialized\n");
2937 /* Send back a response */
2938 rc = ibmvfc_send_crq_init_complete(vhost);
2939 if (rc == 0)
2940 ibmvfc_init_host(vhost);
2941 else
2942 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2943 break;
2944 case IBMVFC_CRQ_INIT_COMPLETE:
2945 dev_info(vhost->dev, "Partner initialization complete\n");
2946 ibmvfc_init_host(vhost);
2947 break;
2948 default:
2949 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2950 }
2951 return;
2952 case IBMVFC_CRQ_XPORT_EVENT:
2953 vhost->state = IBMVFC_NO_CRQ;
2954 vhost->logged_in = 0;
2955 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2956 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2957 /* We need to re-setup the interpartition connection */
2958 dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
2959 vhost->client_migrated = 1;
2960 ibmvfc_purge_requests(vhost, DID_REQUEUE);
2961 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2962 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2963 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
2964 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
2965 ibmvfc_purge_requests(vhost, DID_ERROR);
2966 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2967 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2968 } else {
2969 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
2970 }
2971 return;
2972 case IBMVFC_CRQ_CMD_RSP:
2973 break;
2974 default:
2975 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2976 return;
2977 }
2978
2979 if (crq->format == IBMVFC_ASYNC_EVENT)
2980 return;
2981
2982 /* The only kind of payload CRQs we should get are responses to
2983 * things we send. Make sure this response is to something we
2984 * actually sent
2985 */
2986 if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
2987 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
2988 crq->ioba);
2989 return;
2990 }
2991
2992 if (unlikely(atomic_read(&evt->free))) {
2993 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
2994 crq->ioba);
2995 return;
2996 }
2997
2998 spin_lock(&evt->queue->l_lock);
2999 list_move_tail(&evt->queue_list, evt_doneq);
3000 spin_unlock(&evt->queue->l_lock);
3001 }
3002
3003 /**
3004 * ibmvfc_scan_finished - Check if the device scan is done.
3005 * @shost: scsi host struct
3006 * @time: current elapsed time
3007 *
3008 * Returns:
3009 * 0 if scan is not done / 1 if scan is done
3010 **/
3011 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3012 {
3013 unsigned long flags;
3014 struct ibmvfc_host *vhost = shost_priv(shost);
3015 int done = 0;
3016
3017 spin_lock_irqsave(shost->host_lock, flags);
3018 if (time >= (init_timeout * HZ)) {
3019 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3020 "continuing initialization\n", init_timeout);
3021 done = 1;
3022 }
3023
3024 if (vhost->scan_complete)
3025 done = 1;
3026 spin_unlock_irqrestore(shost->host_lock, flags);
3027 return done;
3028 }
3029
3030 /**
3031 * ibmvfc_slave_alloc - Setup the device's task set value
3032 * @sdev: struct scsi_device device to configure
3033 *
3034 * Set the device's task set value so that error handling works as
3035 * expected.
3036 *
3037 * Returns:
3038 * 0 on success / -ENXIO if device does not exist
3039 **/
3040 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3041 {
3042 struct Scsi_Host *shost = sdev->host;
3043 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3044 struct ibmvfc_host *vhost = shost_priv(shost);
3045 unsigned long flags = 0;
3046
3047 if (!rport || fc_remote_port_chkready(rport))
3048 return -ENXIO;
3049
3050 spin_lock_irqsave(shost->host_lock, flags);
3051 sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3052 spin_unlock_irqrestore(shost->host_lock, flags);
3053 return 0;
3054 }
3055
3056 /**
3057 * ibmvfc_target_alloc - Setup the target's task set value
3058 * @starget: struct scsi_target
3059 *
3060 * Set the target's task set value so that error handling works as
3061 * expected.
3062 *
3063 * Returns:
3064 * 0 on success / -ENXIO if device does not exist
3065 **/
3066 static int ibmvfc_target_alloc(struct scsi_target *starget)
3067 {
3068 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3069 struct ibmvfc_host *vhost = shost_priv(shost);
3070 unsigned long flags = 0;
3071
3072 spin_lock_irqsave(shost->host_lock, flags);
3073 starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3074 spin_unlock_irqrestore(shost->host_lock, flags);
3075 return 0;
3076 }
3077
3078 /**
3079 * ibmvfc_slave_configure - Configure the device
3080 * @sdev: struct scsi_device device to configure
3081 *
3082 * Enable allow_restart for a device if it is a disk. Adjust the
3083 * queue_depth here also.
3084 *
3085 * Returns:
3086 * 0
3087 **/
3088 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3089 {
3090 struct Scsi_Host *shost = sdev->host;
3091 unsigned long flags = 0;
3092
3093 spin_lock_irqsave(shost->host_lock, flags);
3094 if (sdev->type == TYPE_DISK)
3095 sdev->allow_restart = 1;
3096 spin_unlock_irqrestore(shost->host_lock, flags);
3097 return 0;
3098 }
3099
3100 /**
3101 * ibmvfc_change_queue_depth - Change the device's queue depth
3102 * @sdev: scsi device struct
3103 * @qdepth: depth to set
3104 * @reason: calling context
3105 *
3106 * Return value:
3107 * actual depth set
3108 **/
3109 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3110 {
3111 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3112 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3113
3114 return scsi_change_queue_depth(sdev, qdepth);
3115 }
3116
3117 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3118 struct device_attribute *attr, char *buf)
3119 {
3120 struct Scsi_Host *shost = class_to_shost(dev);
3121 struct ibmvfc_host *vhost = shost_priv(shost);
3122
3123 return snprintf(buf, PAGE_SIZE, "%s\n",
3124 vhost->login_buf->resp.partition_name);
3125 }
3126
3127 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3128 struct device_attribute *attr, char *buf)
3129 {
3130 struct Scsi_Host *shost = class_to_shost(dev);
3131 struct ibmvfc_host *vhost = shost_priv(shost);
3132
3133 return snprintf(buf, PAGE_SIZE, "%s\n",
3134 vhost->login_buf->resp.device_name);
3135 }
3136
3137 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3138 struct device_attribute *attr, char *buf)
3139 {
3140 struct Scsi_Host *shost = class_to_shost(dev);
3141 struct ibmvfc_host *vhost = shost_priv(shost);
3142
3143 return snprintf(buf, PAGE_SIZE, "%s\n",
3144 vhost->login_buf->resp.port_loc_code);
3145 }
3146
3147 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3148 struct device_attribute *attr, char *buf)
3149 {
3150 struct Scsi_Host *shost = class_to_shost(dev);
3151 struct ibmvfc_host *vhost = shost_priv(shost);
3152
3153 return snprintf(buf, PAGE_SIZE, "%s\n",
3154 vhost->login_buf->resp.drc_name);
3155 }
3156
3157 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3158 struct device_attribute *attr, char *buf)
3159 {
3160 struct Scsi_Host *shost = class_to_shost(dev);
3161 struct ibmvfc_host *vhost = shost_priv(shost);
3162 return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3163 }
3164
3165 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3166 struct device_attribute *attr, char *buf)
3167 {
3168 struct Scsi_Host *shost = class_to_shost(dev);
3169 struct ibmvfc_host *vhost = shost_priv(shost);
3170 return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3171 }
3172
3173 /**
3174 * ibmvfc_show_log_level - Show the adapter's error logging level
3175 * @dev: class device struct
3176 * @buf: buffer
3177 *
3178 * Return value:
3179 * number of bytes printed to buffer
3180 **/
3181 static ssize_t ibmvfc_show_log_level(struct device *dev,
3182 struct device_attribute *attr, char *buf)
3183 {
3184 struct Scsi_Host *shost = class_to_shost(dev);
3185 struct ibmvfc_host *vhost = shost_priv(shost);
3186 unsigned long flags = 0;
3187 int len;
3188
3189 spin_lock_irqsave(shost->host_lock, flags);
3190 len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3191 spin_unlock_irqrestore(shost->host_lock, flags);
3192 return len;
3193 }
3194
3195 /**
3196 * ibmvfc_store_log_level - Change the adapter's error logging level
3197 * @dev: class device struct
3198 * @buf: buffer
3199 *
3200 * Return value:
3201 * number of bytes printed to buffer
3202 **/
3203 static ssize_t ibmvfc_store_log_level(struct device *dev,
3204 struct device_attribute *attr,
3205 const char *buf, size_t count)
3206 {
3207 struct Scsi_Host *shost = class_to_shost(dev);
3208 struct ibmvfc_host *vhost = shost_priv(shost);
3209 unsigned long flags = 0;
3210
3211 spin_lock_irqsave(shost->host_lock, flags);
3212 vhost->log_level = simple_strtoul(buf, NULL, 10);
3213 spin_unlock_irqrestore(shost->host_lock, flags);
3214 return strlen(buf);
3215 }
3216
3217 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3218 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3219 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3220 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3221 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3222 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3223 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3224 ibmvfc_show_log_level, ibmvfc_store_log_level);
3225
3226 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3227 /**
3228 * ibmvfc_read_trace - Dump the adapter trace
3229 * @filp: open sysfs file
3230 * @kobj: kobject struct
3231 * @bin_attr: bin_attribute struct
3232 * @buf: buffer
3233 * @off: offset
3234 * @count: buffer size
3235 *
3236 * Return value:
3237 * number of bytes printed to buffer
3238 **/
3239 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3240 struct bin_attribute *bin_attr,
3241 char *buf, loff_t off, size_t count)
3242 {
3243 struct device *dev = container_of(kobj, struct device, kobj);
3244 struct Scsi_Host *shost = class_to_shost(dev);
3245 struct ibmvfc_host *vhost = shost_priv(shost);
3246 unsigned long flags = 0;
3247 int size = IBMVFC_TRACE_SIZE;
3248 char *src = (char *)vhost->trace;
3249
3250 if (off > size)
3251 return 0;
3252 if (off + count > size) {
3253 size -= off;
3254 count = size;
3255 }
3256
3257 spin_lock_irqsave(shost->host_lock, flags);
3258 memcpy(buf, &src[off], count);
3259 spin_unlock_irqrestore(shost->host_lock, flags);
3260 return count;
3261 }
3262
3263 static struct bin_attribute ibmvfc_trace_attr = {
3264 .attr = {
3265 .name = "trace",
3266 .mode = S_IRUGO,
3267 },
3268 .size = 0,
3269 .read = ibmvfc_read_trace,
3270 };
3271 #endif
3272
3273 static struct device_attribute *ibmvfc_attrs[] = {
3274 &dev_attr_partition_name,
3275 &dev_attr_device_name,
3276 &dev_attr_port_loc_code,
3277 &dev_attr_drc_name,
3278 &dev_attr_npiv_version,
3279 &dev_attr_capabilities,
3280 &dev_attr_log_level,
3281 NULL
3282 };
3283
3284 static struct scsi_host_template driver_template = {
3285 .module = THIS_MODULE,
3286 .name = "IBM POWER Virtual FC Adapter",
3287 .proc_name = IBMVFC_NAME,
3288 .queuecommand = ibmvfc_queuecommand,
3289 .eh_timed_out = fc_eh_timed_out,
3290 .eh_abort_handler = ibmvfc_eh_abort_handler,
3291 .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3292 .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3293 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3294 .slave_alloc = ibmvfc_slave_alloc,
3295 .slave_configure = ibmvfc_slave_configure,
3296 .target_alloc = ibmvfc_target_alloc,
3297 .scan_finished = ibmvfc_scan_finished,
3298 .change_queue_depth = ibmvfc_change_queue_depth,
3299 .cmd_per_lun = 16,
3300 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3301 .this_id = -1,
3302 .sg_tablesize = SG_ALL,
3303 .max_sectors = IBMVFC_MAX_SECTORS,
3304 .shost_attrs = ibmvfc_attrs,
3305 .track_queue_depth = 1,
3306 .host_tagset = 1,
3307 };
3308
3309 /**
3310 * ibmvfc_next_async_crq - Returns the next entry in async queue
3311 * @vhost: ibmvfc host struct
3312 *
3313 * Returns:
3314 * Pointer to next entry in queue / NULL if empty
3315 **/
3316 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3317 {
3318 struct ibmvfc_queue *async_crq = &vhost->async_crq;
3319 struct ibmvfc_async_crq *crq;
3320
3321 crq = &async_crq->msgs.async[async_crq->cur];
3322 if (crq->valid & 0x80) {
3323 if (++async_crq->cur == async_crq->size)
3324 async_crq->cur = 0;
3325 rmb();
3326 } else
3327 crq = NULL;
3328
3329 return crq;
3330 }
3331
3332 /**
3333 * ibmvfc_next_crq - Returns the next entry in message queue
3334 * @vhost: ibmvfc host struct
3335 *
3336 * Returns:
3337 * Pointer to next entry in queue / NULL if empty
3338 **/
3339 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3340 {
3341 struct ibmvfc_queue *queue = &vhost->crq;
3342 struct ibmvfc_crq *crq;
3343
3344 crq = &queue->msgs.crq[queue->cur];
3345 if (crq->valid & 0x80) {
3346 if (++queue->cur == queue->size)
3347 queue->cur = 0;
3348 rmb();
3349 } else
3350 crq = NULL;
3351
3352 return crq;
3353 }
3354
3355 /**
3356 * ibmvfc_interrupt - Interrupt handler
3357 * @irq: number of irq to handle, not used
3358 * @dev_instance: ibmvfc_host that received interrupt
3359 *
3360 * Returns:
3361 * IRQ_HANDLED
3362 **/
3363 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3364 {
3365 struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3366 unsigned long flags;
3367
3368 spin_lock_irqsave(vhost->host->host_lock, flags);
3369 vio_disable_interrupts(to_vio_dev(vhost->dev));
3370 tasklet_schedule(&vhost->tasklet);
3371 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3372 return IRQ_HANDLED;
3373 }
3374
3375 /**
3376 * ibmvfc_tasklet - Interrupt handler tasklet
3377 * @data: ibmvfc host struct
3378 *
3379 * Returns:
3380 * Nothing
3381 **/
3382 static void ibmvfc_tasklet(void *data)
3383 {
3384 struct ibmvfc_host *vhost = data;
3385 struct vio_dev *vdev = to_vio_dev(vhost->dev);
3386 struct ibmvfc_crq *crq;
3387 struct ibmvfc_async_crq *async;
3388 struct ibmvfc_event *evt, *temp;
3389 unsigned long flags;
3390 int done = 0;
3391 LIST_HEAD(evt_doneq);
3392
3393 spin_lock_irqsave(vhost->host->host_lock, flags);
3394 spin_lock(vhost->crq.q_lock);
3395 while (!done) {
3396 /* Pull all the valid messages off the async CRQ */
3397 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3398 ibmvfc_handle_async(async, vhost);
3399 async->valid = 0;
3400 wmb();
3401 }
3402
3403 /* Pull all the valid messages off the CRQ */
3404 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3405 ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3406 crq->valid = 0;
3407 wmb();
3408 }
3409
3410 vio_enable_interrupts(vdev);
3411 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3412 vio_disable_interrupts(vdev);
3413 ibmvfc_handle_async(async, vhost);
3414 async->valid = 0;
3415 wmb();
3416 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3417 vio_disable_interrupts(vdev);
3418 ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3419 crq->valid = 0;
3420 wmb();
3421 } else
3422 done = 1;
3423 }
3424
3425 spin_unlock(vhost->crq.q_lock);
3426 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3427
3428 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3429 del_timer(&evt->timer);
3430 list_del(&evt->queue_list);
3431 ibmvfc_trc_end(evt);
3432 evt->done(evt);
3433 }
3434 }
3435
3436 /**
3437 * ibmvfc_init_tgt - Set the next init job step for the target
3438 * @tgt: ibmvfc target struct
3439 * @job_step: job step to perform
3440 *
3441 **/
3442 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3443 void (*job_step) (struct ibmvfc_target *))
3444 {
3445 if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3446 tgt->job_step = job_step;
3447 wake_up(&tgt->vhost->work_wait_q);
3448 }
3449
3450 /**
3451 * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3452 * @tgt: ibmvfc target struct
3453 * @job_step: initialization job step
3454 *
3455 * Returns: 1 if step will be retried / 0 if not
3456 *
3457 **/
3458 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3459 void (*job_step) (struct ibmvfc_target *))
3460 {
3461 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3462 ibmvfc_del_tgt(tgt);
3463 wake_up(&tgt->vhost->work_wait_q);
3464 return 0;
3465 } else
3466 ibmvfc_init_tgt(tgt, job_step);
3467 return 1;
3468 }
3469
3470 /* Defined in FC-LS */
3471 static const struct {
3472 int code;
3473 int retry;
3474 int logged_in;
3475 } prli_rsp [] = {
3476 { 0, 1, 0 },
3477 { 1, 0, 1 },
3478 { 2, 1, 0 },
3479 { 3, 1, 0 },
3480 { 4, 0, 0 },
3481 { 5, 0, 0 },
3482 { 6, 0, 1 },
3483 { 7, 0, 0 },
3484 { 8, 1, 0 },
3485 };
3486
3487 /**
3488 * ibmvfc_get_prli_rsp - Find PRLI response index
3489 * @flags: PRLI response flags
3490 *
3491 **/
3492 static int ibmvfc_get_prli_rsp(u16 flags)
3493 {
3494 int i;
3495 int code = (flags & 0x0f00) >> 8;
3496
3497 for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3498 if (prli_rsp[i].code == code)
3499 return i;
3500
3501 return 0;
3502 }
3503
3504 /**
3505 * ibmvfc_tgt_prli_done - Completion handler for Process Login
3506 * @evt: ibmvfc event struct
3507 *
3508 **/
3509 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3510 {
3511 struct ibmvfc_target *tgt = evt->tgt;
3512 struct ibmvfc_host *vhost = evt->vhost;
3513 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3514 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3515 u32 status = be16_to_cpu(rsp->common.status);
3516 int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3517
3518 vhost->discovery_threads--;
3519 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3520 switch (status) {
3521 case IBMVFC_MAD_SUCCESS:
3522 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3523 parms->type, parms->flags, parms->service_parms);
3524
3525 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3526 index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
3527 if (prli_rsp[index].logged_in) {
3528 if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
3529 tgt->need_login = 0;
3530 tgt->ids.roles = 0;
3531 if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
3532 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3533 if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
3534 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3535 tgt->add_rport = 1;
3536 } else
3537 ibmvfc_del_tgt(tgt);
3538 } else if (prli_rsp[index].retry)
3539 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3540 else
3541 ibmvfc_del_tgt(tgt);
3542 } else
3543 ibmvfc_del_tgt(tgt);
3544 break;
3545 case IBMVFC_MAD_DRIVER_FAILED:
3546 break;
3547 case IBMVFC_MAD_CRQ_ERROR:
3548 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3549 break;
3550 case IBMVFC_MAD_FAILED:
3551 default:
3552 if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
3553 be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
3554 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3555 else if (tgt->logo_rcvd)
3556 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3557 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3558 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3559 else
3560 ibmvfc_del_tgt(tgt);
3561
3562 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3563 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3564 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3565 break;
3566 }
3567
3568 kref_put(&tgt->kref, ibmvfc_release_tgt);
3569 ibmvfc_free_event(evt);
3570 wake_up(&vhost->work_wait_q);
3571 }
3572
3573 /**
3574 * ibmvfc_tgt_send_prli - Send a process login
3575 * @tgt: ibmvfc target struct
3576 *
3577 **/
3578 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3579 {
3580 struct ibmvfc_process_login *prli;
3581 struct ibmvfc_host *vhost = tgt->vhost;
3582 struct ibmvfc_event *evt;
3583
3584 if (vhost->discovery_threads >= disc_threads)
3585 return;
3586
3587 kref_get(&tgt->kref);
3588 evt = ibmvfc_get_event(&vhost->crq);
3589 vhost->discovery_threads++;
3590 ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
3591 evt->tgt = tgt;
3592 prli = &evt->iu.prli;
3593 memset(prli, 0, sizeof(*prli));
3594 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
3595 prli->common.version = cpu_to_be32(2);
3596 prli->target_wwpn = cpu_to_be64(tgt->wwpn);
3597 } else {
3598 prli->common.version = cpu_to_be32(1);
3599 }
3600 prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
3601 prli->common.length = cpu_to_be16(sizeof(*prli));
3602 prli->scsi_id = cpu_to_be64(tgt->scsi_id);
3603
3604 prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
3605 prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
3606 prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
3607 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
3608
3609 if (cls3_error)
3610 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
3611
3612 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3613 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3614 vhost->discovery_threads--;
3615 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3616 kref_put(&tgt->kref, ibmvfc_release_tgt);
3617 } else
3618 tgt_dbg(tgt, "Sent process login\n");
3619 }
3620
3621 /**
3622 * ibmvfc_tgt_plogi_done - Completion handler for Port Login
3623 * @evt: ibmvfc event struct
3624 *
3625 **/
3626 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3627 {
3628 struct ibmvfc_target *tgt = evt->tgt;
3629 struct ibmvfc_host *vhost = evt->vhost;
3630 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
3631 u32 status = be16_to_cpu(rsp->common.status);
3632 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3633
3634 vhost->discovery_threads--;
3635 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3636 switch (status) {
3637 case IBMVFC_MAD_SUCCESS:
3638 tgt_dbg(tgt, "Port Login succeeded\n");
3639 if (tgt->ids.port_name &&
3640 tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
3641 vhost->reinit = 1;
3642 tgt_dbg(tgt, "Port re-init required\n");
3643 break;
3644 }
3645 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3646 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3647 tgt->ids.port_id = tgt->scsi_id;
3648 memcpy(&tgt->service_parms, &rsp->service_parms,
3649 sizeof(tgt->service_parms));
3650 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3651 sizeof(tgt->service_parms_change));
3652 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3653 break;
3654 case IBMVFC_MAD_DRIVER_FAILED:
3655 break;
3656 case IBMVFC_MAD_CRQ_ERROR:
3657 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3658 break;
3659 case IBMVFC_MAD_FAILED:
3660 default:
3661 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3662 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3663 else
3664 ibmvfc_del_tgt(tgt);
3665
3666 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3667 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3668 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3669 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3670 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
3671 break;
3672 }
3673
3674 kref_put(&tgt->kref, ibmvfc_release_tgt);
3675 ibmvfc_free_event(evt);
3676 wake_up(&vhost->work_wait_q);
3677 }
3678
3679 /**
3680 * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
3681 * @tgt: ibmvfc target struct
3682 *
3683 **/
3684 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
3685 {
3686 struct ibmvfc_port_login *plogi;
3687 struct ibmvfc_host *vhost = tgt->vhost;
3688 struct ibmvfc_event *evt;
3689
3690 if (vhost->discovery_threads >= disc_threads)
3691 return;
3692
3693 kref_get(&tgt->kref);
3694 tgt->logo_rcvd = 0;
3695 evt = ibmvfc_get_event(&vhost->crq);
3696 vhost->discovery_threads++;
3697 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3698 ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
3699 evt->tgt = tgt;
3700 plogi = &evt->iu.plogi;
3701 memset(plogi, 0, sizeof(*plogi));
3702 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
3703 plogi->common.version = cpu_to_be32(2);
3704 plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
3705 } else {
3706 plogi->common.version = cpu_to_be32(1);
3707 }
3708 plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
3709 plogi->common.length = cpu_to_be16(sizeof(*plogi));
3710 plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
3711
3712 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3713 vhost->discovery_threads--;
3714 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3715 kref_put(&tgt->kref, ibmvfc_release_tgt);
3716 } else
3717 tgt_dbg(tgt, "Sent port login\n");
3718 }
3719
3720 /**
3721 * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
3722 * @evt: ibmvfc event struct
3723 *
3724 **/
3725 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
3726 {
3727 struct ibmvfc_target *tgt = evt->tgt;
3728 struct ibmvfc_host *vhost = evt->vhost;
3729 struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
3730 u32 status = be16_to_cpu(rsp->common.status);
3731
3732 vhost->discovery_threads--;
3733 ibmvfc_free_event(evt);
3734 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3735
3736 switch (status) {
3737 case IBMVFC_MAD_SUCCESS:
3738 tgt_dbg(tgt, "Implicit Logout succeeded\n");
3739 break;
3740 case IBMVFC_MAD_DRIVER_FAILED:
3741 kref_put(&tgt->kref, ibmvfc_release_tgt);
3742 wake_up(&vhost->work_wait_q);
3743 return;
3744 case IBMVFC_MAD_FAILED:
3745 default:
3746 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
3747 break;
3748 }
3749
3750 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
3751 kref_put(&tgt->kref, ibmvfc_release_tgt);
3752 wake_up(&vhost->work_wait_q);
3753 }
3754
3755 /**
3756 * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
3757 * @tgt: ibmvfc target struct
3758 *
3759 * Returns:
3760 * Allocated and initialized ibmvfc_event struct
3761 **/
3762 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
3763 void (*done) (struct ibmvfc_event *))
3764 {
3765 struct ibmvfc_implicit_logout *mad;
3766 struct ibmvfc_host *vhost = tgt->vhost;
3767 struct ibmvfc_event *evt;
3768
3769 kref_get(&tgt->kref);
3770 evt = ibmvfc_get_event(&vhost->crq);
3771 ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
3772 evt->tgt = tgt;
3773 mad = &evt->iu.implicit_logout;
3774 memset(mad, 0, sizeof(*mad));
3775 mad->common.version = cpu_to_be32(1);
3776 mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
3777 mad->common.length = cpu_to_be16(sizeof(*mad));
3778 mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
3779 return evt;
3780 }
3781
3782 /**
3783 * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
3784 * @tgt: ibmvfc target struct
3785 *
3786 **/
3787 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
3788 {
3789 struct ibmvfc_host *vhost = tgt->vhost;
3790 struct ibmvfc_event *evt;
3791
3792 if (vhost->discovery_threads >= disc_threads)
3793 return;
3794
3795 vhost->discovery_threads++;
3796 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
3797 ibmvfc_tgt_implicit_logout_done);
3798
3799 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3800 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3801 vhost->discovery_threads--;
3802 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3803 kref_put(&tgt->kref, ibmvfc_release_tgt);
3804 } else
3805 tgt_dbg(tgt, "Sent Implicit Logout\n");
3806 }
3807
3808 /**
3809 * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
3810 * @evt: ibmvfc event struct
3811 *
3812 **/
3813 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
3814 {
3815 struct ibmvfc_target *tgt = evt->tgt;
3816 struct ibmvfc_host *vhost = evt->vhost;
3817 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3818 u32 status = be16_to_cpu(mad->common.status);
3819
3820 vhost->discovery_threads--;
3821 ibmvfc_free_event(evt);
3822
3823 /*
3824 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
3825 * driver in which case we need to free up all the targets. If we are
3826 * not unloading, we will still go through a hard reset to get out of
3827 * offline state, so there is no need to track the old targets in that
3828 * case.
3829 */
3830 if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
3831 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3832 else
3833 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
3834
3835 tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
3836 kref_put(&tgt->kref, ibmvfc_release_tgt);
3837 wake_up(&vhost->work_wait_q);
3838 }
3839
3840 /**
3841 * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
3842 * @tgt: ibmvfc target struct
3843 *
3844 **/
3845 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
3846 {
3847 struct ibmvfc_host *vhost = tgt->vhost;
3848 struct ibmvfc_event *evt;
3849
3850 if (!vhost->logged_in) {
3851 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3852 return;
3853 }
3854
3855 if (vhost->discovery_threads >= disc_threads)
3856 return;
3857
3858 vhost->discovery_threads++;
3859 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
3860 ibmvfc_tgt_implicit_logout_and_del_done);
3861
3862 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
3863 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3864 vhost->discovery_threads--;
3865 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3866 kref_put(&tgt->kref, ibmvfc_release_tgt);
3867 } else
3868 tgt_dbg(tgt, "Sent Implicit Logout\n");
3869 }
3870
3871 /**
3872 * ibmvfc_tgt_move_login_done - Completion handler for Move Login
3873 * @evt: ibmvfc event struct
3874 *
3875 **/
3876 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
3877 {
3878 struct ibmvfc_target *tgt = evt->tgt;
3879 struct ibmvfc_host *vhost = evt->vhost;
3880 struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
3881 u32 status = be16_to_cpu(rsp->common.status);
3882 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3883
3884 vhost->discovery_threads--;
3885 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3886 switch (status) {
3887 case IBMVFC_MAD_SUCCESS:
3888 tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
3889 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3890 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3891 tgt->ids.port_id = tgt->scsi_id;
3892 memcpy(&tgt->service_parms, &rsp->service_parms,
3893 sizeof(tgt->service_parms));
3894 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3895 sizeof(tgt->service_parms_change));
3896 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3897 break;
3898 case IBMVFC_MAD_DRIVER_FAILED:
3899 break;
3900 case IBMVFC_MAD_CRQ_ERROR:
3901 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
3902 break;
3903 case IBMVFC_MAD_FAILED:
3904 default:
3905 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
3906
3907 tgt_log(tgt, level,
3908 "Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
3909 tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
3910 status);
3911 break;
3912 }
3913
3914 kref_put(&tgt->kref, ibmvfc_release_tgt);
3915 ibmvfc_free_event(evt);
3916 wake_up(&vhost->work_wait_q);
3917 }
3918
3919
3920 /**
3921 * ibmvfc_tgt_move_login - Initiate a move login for specified target
3922 * @tgt: ibmvfc target struct
3923 *
3924 **/
3925 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
3926 {
3927 struct ibmvfc_host *vhost = tgt->vhost;
3928 struct ibmvfc_move_login *move;
3929 struct ibmvfc_event *evt;
3930
3931 if (vhost->discovery_threads >= disc_threads)
3932 return;
3933
3934 kref_get(&tgt->kref);
3935 evt = ibmvfc_get_event(&vhost->crq);
3936 vhost->discovery_threads++;
3937 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3938 ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
3939 evt->tgt = tgt;
3940 move = &evt->iu.move_login;
3941 memset(move, 0, sizeof(*move));
3942 move->common.version = cpu_to_be32(1);
3943 move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
3944 move->common.length = cpu_to_be16(sizeof(*move));
3945
3946 move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
3947 move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
3948 move->wwpn = cpu_to_be64(tgt->wwpn);
3949 move->node_name = cpu_to_be64(tgt->ids.node_name);
3950
3951 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3952 vhost->discovery_threads--;
3953 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3954 kref_put(&tgt->kref, ibmvfc_release_tgt);
3955 } else
3956 tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
3957 }
3958
3959 /**
3960 * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
3961 * @mad: ibmvfc passthru mad struct
3962 * @tgt: ibmvfc target struct
3963 *
3964 * Returns:
3965 * 1 if PLOGI needed / 0 if PLOGI not needed
3966 **/
3967 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
3968 struct ibmvfc_target *tgt)
3969 {
3970 if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
3971 return 1;
3972 if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
3973 return 1;
3974 if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
3975 return 1;
3976 return 0;
3977 }
3978
3979 /**
3980 * ibmvfc_tgt_adisc_done - Completion handler for ADISC
3981 * @evt: ibmvfc event struct
3982 *
3983 **/
3984 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3985 {
3986 struct ibmvfc_target *tgt = evt->tgt;
3987 struct ibmvfc_host *vhost = evt->vhost;
3988 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3989 u32 status = be16_to_cpu(mad->common.status);
3990 u8 fc_reason, fc_explain;
3991
3992 vhost->discovery_threads--;
3993 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3994 del_timer(&tgt->timer);
3995
3996 switch (status) {
3997 case IBMVFC_MAD_SUCCESS:
3998 tgt_dbg(tgt, "ADISC succeeded\n");
3999 if (ibmvfc_adisc_needs_plogi(mad, tgt))
4000 ibmvfc_del_tgt(tgt);
4001 break;
4002 case IBMVFC_MAD_DRIVER_FAILED:
4003 break;
4004 case IBMVFC_MAD_FAILED:
4005 default:
4006 ibmvfc_del_tgt(tgt);
4007 fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4008 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4009 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4010 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4011 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4012 ibmvfc_get_fc_type(fc_reason), fc_reason,
4013 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4014 break;
4015 }
4016
4017 kref_put(&tgt->kref, ibmvfc_release_tgt);
4018 ibmvfc_free_event(evt);
4019 wake_up(&vhost->work_wait_q);
4020 }
4021
4022 /**
4023 * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4024 * @evt: ibmvfc event struct
4025 *
4026 **/
4027 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4028 {
4029 struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4030
4031 memset(mad, 0, sizeof(*mad));
4032 mad->common.version = cpu_to_be32(1);
4033 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4034 mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4035 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4036 offsetof(struct ibmvfc_passthru_mad, iu));
4037 mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4038 mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4039 mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4040 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4041 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4042 offsetof(struct ibmvfc_passthru_fc_iu, payload));
4043 mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4044 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4045 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4046 offsetof(struct ibmvfc_passthru_fc_iu, response));
4047 mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4048 }
4049
4050 /**
4051 * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4052 * @evt: ibmvfc event struct
4053 *
4054 * Just cleanup this event struct. Everything else is handled by
4055 * the ADISC completion handler. If the ADISC never actually comes
4056 * back, we still have the timer running on the ADISC event struct
4057 * which will fire and cause the CRQ to get reset.
4058 *
4059 **/
4060 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4061 {
4062 struct ibmvfc_host *vhost = evt->vhost;
4063 struct ibmvfc_target *tgt = evt->tgt;
4064
4065 tgt_dbg(tgt, "ADISC cancel complete\n");
4066 vhost->abort_threads--;
4067 ibmvfc_free_event(evt);
4068 kref_put(&tgt->kref, ibmvfc_release_tgt);
4069 wake_up(&vhost->work_wait_q);
4070 }
4071
4072 /**
4073 * ibmvfc_adisc_timeout - Handle an ADISC timeout
4074 * @tgt: ibmvfc target struct
4075 *
4076 * If an ADISC times out, send a cancel. If the cancel times
4077 * out, reset the CRQ. When the ADISC comes back as cancelled,
4078 * log back into the target.
4079 **/
4080 static void ibmvfc_adisc_timeout(struct timer_list *t)
4081 {
4082 struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4083 struct ibmvfc_host *vhost = tgt->vhost;
4084 struct ibmvfc_event *evt;
4085 struct ibmvfc_tmf *tmf;
4086 unsigned long flags;
4087 int rc;
4088
4089 tgt_dbg(tgt, "ADISC timeout\n");
4090 spin_lock_irqsave(vhost->host->host_lock, flags);
4091 if (vhost->abort_threads >= disc_threads ||
4092 tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4093 vhost->state != IBMVFC_INITIALIZING ||
4094 vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4095 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4096 return;
4097 }
4098
4099 vhost->abort_threads++;
4100 kref_get(&tgt->kref);
4101 evt = ibmvfc_get_event(&vhost->crq);
4102 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4103
4104 evt->tgt = tgt;
4105 tmf = &evt->iu.tmf;
4106 memset(tmf, 0, sizeof(*tmf));
4107 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4108 tmf->common.version = cpu_to_be32(2);
4109 tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4110 } else {
4111 tmf->common.version = cpu_to_be32(1);
4112 }
4113 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4114 tmf->common.length = cpu_to_be16(sizeof(*tmf));
4115 tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4116 tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4117
4118 rc = ibmvfc_send_event(evt, vhost, default_timeout);
4119
4120 if (rc) {
4121 tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4122 vhost->abort_threads--;
4123 kref_put(&tgt->kref, ibmvfc_release_tgt);
4124 __ibmvfc_reset_host(vhost);
4125 } else
4126 tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4127 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4128 }
4129
4130 /**
4131 * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4132 * @tgt: ibmvfc target struct
4133 *
4134 * When sending an ADISC we end up with two timers running. The
4135 * first timer is the timer in the ibmvfc target struct. If this
4136 * fires, we send a cancel to the target. The second timer is the
4137 * timer on the ibmvfc event for the ADISC, which is longer. If that
4138 * fires, it means the ADISC timed out and our attempt to cancel it
4139 * also failed, so we need to reset the CRQ.
4140 **/
4141 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4142 {
4143 struct ibmvfc_passthru_mad *mad;
4144 struct ibmvfc_host *vhost = tgt->vhost;
4145 struct ibmvfc_event *evt;
4146
4147 if (vhost->discovery_threads >= disc_threads)
4148 return;
4149
4150 kref_get(&tgt->kref);
4151 evt = ibmvfc_get_event(&vhost->crq);
4152 vhost->discovery_threads++;
4153 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4154 evt->tgt = tgt;
4155
4156 ibmvfc_init_passthru(evt);
4157 mad = &evt->iu.passthru;
4158 mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4159 mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4160 mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4161
4162 mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4163 memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4164 sizeof(vhost->login_buf->resp.port_name));
4165 memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4166 sizeof(vhost->login_buf->resp.node_name));
4167 mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4168
4169 if (timer_pending(&tgt->timer))
4170 mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4171 else {
4172 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4173 add_timer(&tgt->timer);
4174 }
4175
4176 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4177 if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4178 vhost->discovery_threads--;
4179 del_timer(&tgt->timer);
4180 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4181 kref_put(&tgt->kref, ibmvfc_release_tgt);
4182 } else
4183 tgt_dbg(tgt, "Sent ADISC\n");
4184 }
4185
4186 /**
4187 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4188 * @evt: ibmvfc event struct
4189 *
4190 **/
4191 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4192 {
4193 struct ibmvfc_target *tgt = evt->tgt;
4194 struct ibmvfc_host *vhost = evt->vhost;
4195 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4196 u32 status = be16_to_cpu(rsp->common.status);
4197 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4198
4199 vhost->discovery_threads--;
4200 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4201 switch (status) {
4202 case IBMVFC_MAD_SUCCESS:
4203 tgt_dbg(tgt, "Query Target succeeded\n");
4204 if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4205 ibmvfc_del_tgt(tgt);
4206 else
4207 ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4208 break;
4209 case IBMVFC_MAD_DRIVER_FAILED:
4210 break;
4211 case IBMVFC_MAD_CRQ_ERROR:
4212 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4213 break;
4214 case IBMVFC_MAD_FAILED:
4215 default:
4216 if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4217 be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4218 be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4219 ibmvfc_del_tgt(tgt);
4220 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4221 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4222 else
4223 ibmvfc_del_tgt(tgt);
4224
4225 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4226 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4227 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4228 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4229 ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4230 status);
4231 break;
4232 }
4233
4234 kref_put(&tgt->kref, ibmvfc_release_tgt);
4235 ibmvfc_free_event(evt);
4236 wake_up(&vhost->work_wait_q);
4237 }
4238
4239 /**
4240 * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4241 * @tgt: ibmvfc target struct
4242 *
4243 **/
4244 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4245 {
4246 struct ibmvfc_query_tgt *query_tgt;
4247 struct ibmvfc_host *vhost = tgt->vhost;
4248 struct ibmvfc_event *evt;
4249
4250 if (vhost->discovery_threads >= disc_threads)
4251 return;
4252
4253 kref_get(&tgt->kref);
4254 evt = ibmvfc_get_event(&vhost->crq);
4255 vhost->discovery_threads++;
4256 evt->tgt = tgt;
4257 ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4258 query_tgt = &evt->iu.query_tgt;
4259 memset(query_tgt, 0, sizeof(*query_tgt));
4260 query_tgt->common.version = cpu_to_be32(1);
4261 query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4262 query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4263 query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4264
4265 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4266 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4267 vhost->discovery_threads--;
4268 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4269 kref_put(&tgt->kref, ibmvfc_release_tgt);
4270 } else
4271 tgt_dbg(tgt, "Sent Query Target\n");
4272 }
4273
4274 /**
4275 * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4276 * @vhost: ibmvfc host struct
4277 * @scsi_id: SCSI ID to allocate target for
4278 *
4279 * Returns:
4280 * 0 on success / other on failure
4281 **/
4282 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4283 struct ibmvfc_discover_targets_entry *target)
4284 {
4285 struct ibmvfc_target *stgt = NULL;
4286 struct ibmvfc_target *wtgt = NULL;
4287 struct ibmvfc_target *tgt;
4288 unsigned long flags;
4289 u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4290 u64 wwpn = be64_to_cpu(target->wwpn);
4291
4292 /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4293 spin_lock_irqsave(vhost->host->host_lock, flags);
4294 list_for_each_entry(tgt, &vhost->targets, queue) {
4295 if (tgt->wwpn == wwpn) {
4296 wtgt = tgt;
4297 break;
4298 }
4299 }
4300
4301 list_for_each_entry(tgt, &vhost->targets, queue) {
4302 if (tgt->scsi_id == scsi_id) {
4303 stgt = tgt;
4304 break;
4305 }
4306 }
4307
4308 if (wtgt && !stgt) {
4309 /*
4310 * A WWPN target has moved and we still are tracking the old
4311 * SCSI ID. The only way we should be able to get here is if
4312 * we attempted to send an implicit logout for the old SCSI ID
4313 * and it failed for some reason, such as there being I/O
4314 * pending to the target. In this case, we will have already
4315 * deleted the rport from the FC transport so we do a move
4316 * login, which works even with I/O pending, as it will cancel
4317 * any active commands.
4318 */
4319 if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4320 /*
4321 * Do a move login here. The old target is no longer
4322 * known to the transport layer We don't use the
4323 * normal ibmvfc_set_tgt_action to set this, as we
4324 * don't normally want to allow this state change.
4325 */
4326 wtgt->old_scsi_id = wtgt->scsi_id;
4327 wtgt->scsi_id = scsi_id;
4328 wtgt->action = IBMVFC_TGT_ACTION_INIT;
4329 ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4330 goto unlock_out;
4331 } else {
4332 tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4333 wtgt->action, wtgt->rport);
4334 }
4335 } else if (stgt) {
4336 if (tgt->need_login)
4337 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4338 goto unlock_out;
4339 }
4340 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4341
4342 tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4343 memset(tgt, 0, sizeof(*tgt));
4344 tgt->scsi_id = scsi_id;
4345 tgt->wwpn = wwpn;
4346 tgt->vhost = vhost;
4347 tgt->need_login = 1;
4348 timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4349 kref_init(&tgt->kref);
4350 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4351 spin_lock_irqsave(vhost->host->host_lock, flags);
4352 tgt->cancel_key = vhost->task_set++;
4353 list_add_tail(&tgt->queue, &vhost->targets);
4354
4355 unlock_out:
4356 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4357 return 0;
4358 }
4359
4360 /**
4361 * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4362 * @vhost: ibmvfc host struct
4363 *
4364 * Returns:
4365 * 0 on success / other on failure
4366 **/
4367 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4368 {
4369 int i, rc;
4370
4371 for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4372 rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4373
4374 return rc;
4375 }
4376
4377 /**
4378 * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4379 * @evt: ibmvfc event struct
4380 *
4381 **/
4382 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4383 {
4384 struct ibmvfc_host *vhost = evt->vhost;
4385 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4386 u32 mad_status = be16_to_cpu(rsp->common.status);
4387 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4388
4389 switch (mad_status) {
4390 case IBMVFC_MAD_SUCCESS:
4391 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4392 vhost->num_targets = be32_to_cpu(rsp->num_written);
4393 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4394 break;
4395 case IBMVFC_MAD_FAILED:
4396 level += ibmvfc_retry_host_init(vhost);
4397 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4398 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4399 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4400 break;
4401 case IBMVFC_MAD_DRIVER_FAILED:
4402 break;
4403 default:
4404 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4405 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4406 break;
4407 }
4408
4409 ibmvfc_free_event(evt);
4410 wake_up(&vhost->work_wait_q);
4411 }
4412
4413 /**
4414 * ibmvfc_discover_targets - Send Discover Targets MAD
4415 * @vhost: ibmvfc host struct
4416 *
4417 **/
4418 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4419 {
4420 struct ibmvfc_discover_targets *mad;
4421 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4422
4423 ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4424 mad = &evt->iu.discover_targets;
4425 memset(mad, 0, sizeof(*mad));
4426 mad->common.version = cpu_to_be32(1);
4427 mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4428 mad->common.length = cpu_to_be16(sizeof(*mad));
4429 mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
4430 mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
4431 mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
4432 mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
4433 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4434
4435 if (!ibmvfc_send_event(evt, vhost, default_timeout))
4436 ibmvfc_dbg(vhost, "Sent discover targets\n");
4437 else
4438 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4439 }
4440
4441 /**
4442 * ibmvfc_npiv_login_done - Completion handler for NPIV Login
4443 * @evt: ibmvfc event struct
4444 *
4445 **/
4446 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4447 {
4448 struct ibmvfc_host *vhost = evt->vhost;
4449 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
4450 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
4451 unsigned int npiv_max_sectors;
4452 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4453
4454 switch (mad_status) {
4455 case IBMVFC_MAD_SUCCESS:
4456 ibmvfc_free_event(evt);
4457 break;
4458 case IBMVFC_MAD_FAILED:
4459 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4460 level += ibmvfc_retry_host_init(vhost);
4461 else
4462 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4463 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
4464 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4465 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4466 ibmvfc_free_event(evt);
4467 return;
4468 case IBMVFC_MAD_CRQ_ERROR:
4469 ibmvfc_retry_host_init(vhost);
4470 fallthrough;
4471 case IBMVFC_MAD_DRIVER_FAILED:
4472 ibmvfc_free_event(evt);
4473 return;
4474 default:
4475 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
4476 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4477 ibmvfc_free_event(evt);
4478 return;
4479 }
4480
4481 vhost->client_migrated = 0;
4482
4483 if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
4484 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
4485 rsp->flags);
4486 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4487 wake_up(&vhost->work_wait_q);
4488 return;
4489 }
4490
4491 if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
4492 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
4493 rsp->max_cmds);
4494 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4495 wake_up(&vhost->work_wait_q);
4496 return;
4497 }
4498
4499 vhost->logged_in = 1;
4500 npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
4501 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
4502 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
4503 rsp->drc_name, npiv_max_sectors);
4504
4505 fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
4506 fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
4507 fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
4508 fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
4509 fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
4510 fc_host_supported_classes(vhost->host) = 0;
4511 if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
4512 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
4513 if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
4514 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
4515 if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
4516 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
4517 fc_host_maxframe_size(vhost->host) =
4518 be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
4519
4520 vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
4521 vhost->host->max_sectors = npiv_max_sectors;
4522 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4523 wake_up(&vhost->work_wait_q);
4524 }
4525
4526 /**
4527 * ibmvfc_npiv_login - Sends NPIV login
4528 * @vhost: ibmvfc host struct
4529 *
4530 **/
4531 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
4532 {
4533 struct ibmvfc_npiv_login_mad *mad;
4534 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4535
4536 ibmvfc_gather_partition_info(vhost);
4537 ibmvfc_set_login_info(vhost);
4538 ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
4539
4540 memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
4541 mad = &evt->iu.npiv_login;
4542 memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
4543 mad->common.version = cpu_to_be32(1);
4544 mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
4545 mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
4546 mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
4547 mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
4548
4549 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4550
4551 if (!ibmvfc_send_event(evt, vhost, default_timeout))
4552 ibmvfc_dbg(vhost, "Sent NPIV login\n");
4553 else
4554 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4555 }
4556
4557 /**
4558 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
4559 * @vhost: ibmvfc host struct
4560 *
4561 **/
4562 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
4563 {
4564 struct ibmvfc_host *vhost = evt->vhost;
4565 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
4566
4567 ibmvfc_free_event(evt);
4568
4569 switch (mad_status) {
4570 case IBMVFC_MAD_SUCCESS:
4571 if (list_empty(&vhost->crq.sent) &&
4572 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
4573 ibmvfc_init_host(vhost);
4574 return;
4575 }
4576 break;
4577 case IBMVFC_MAD_FAILED:
4578 case IBMVFC_MAD_NOT_SUPPORTED:
4579 case IBMVFC_MAD_CRQ_ERROR:
4580 case IBMVFC_MAD_DRIVER_FAILED:
4581 default:
4582 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
4583 break;
4584 }
4585
4586 ibmvfc_hard_reset_host(vhost);
4587 }
4588
4589 /**
4590 * ibmvfc_npiv_logout - Issue an NPIV Logout
4591 * @vhost: ibmvfc host struct
4592 *
4593 **/
4594 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
4595 {
4596 struct ibmvfc_npiv_logout_mad *mad;
4597 struct ibmvfc_event *evt;
4598
4599 evt = ibmvfc_get_event(&vhost->crq);
4600 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
4601
4602 mad = &evt->iu.npiv_logout;
4603 memset(mad, 0, sizeof(*mad));
4604 mad->common.version = cpu_to_be32(1);
4605 mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
4606 mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
4607
4608 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
4609
4610 if (!ibmvfc_send_event(evt, vhost, default_timeout))
4611 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
4612 else
4613 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4614 }
4615
4616 /**
4617 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
4618 * @vhost: ibmvfc host struct
4619 *
4620 * Returns:
4621 * 1 if work to do / 0 if not
4622 **/
4623 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
4624 {
4625 struct ibmvfc_target *tgt;
4626
4627 list_for_each_entry(tgt, &vhost->targets, queue) {
4628 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
4629 tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4630 return 1;
4631 }
4632
4633 return 0;
4634 }
4635
4636 /**
4637 * ibmvfc_dev_logo_to_do - Is there target logout work to do?
4638 * @vhost: ibmvfc host struct
4639 *
4640 * Returns:
4641 * 1 if work to do / 0 if not
4642 **/
4643 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
4644 {
4645 struct ibmvfc_target *tgt;
4646
4647 list_for_each_entry(tgt, &vhost->targets, queue) {
4648 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
4649 tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
4650 return 1;
4651 }
4652 return 0;
4653 }
4654
4655 /**
4656 * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
4657 * @vhost: ibmvfc host struct
4658 *
4659 * Returns:
4660 * 1 if work to do / 0 if not
4661 **/
4662 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4663 {
4664 struct ibmvfc_target *tgt;
4665
4666 if (kthread_should_stop())
4667 return 1;
4668 switch (vhost->action) {
4669 case IBMVFC_HOST_ACTION_NONE:
4670 case IBMVFC_HOST_ACTION_INIT_WAIT:
4671 case IBMVFC_HOST_ACTION_LOGO_WAIT:
4672 return 0;
4673 case IBMVFC_HOST_ACTION_TGT_INIT:
4674 case IBMVFC_HOST_ACTION_QUERY_TGTS:
4675 if (vhost->discovery_threads == disc_threads)
4676 return 0;
4677 list_for_each_entry(tgt, &vhost->targets, queue)
4678 if (tgt->action == IBMVFC_TGT_ACTION_INIT)
4679 return 1;
4680 list_for_each_entry(tgt, &vhost->targets, queue)
4681 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4682 return 0;
4683 return 1;
4684 case IBMVFC_HOST_ACTION_TGT_DEL:
4685 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4686 if (vhost->discovery_threads == disc_threads)
4687 return 0;
4688 list_for_each_entry(tgt, &vhost->targets, queue)
4689 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
4690 return 1;
4691 list_for_each_entry(tgt, &vhost->targets, queue)
4692 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
4693 return 0;
4694 return 1;
4695 case IBMVFC_HOST_ACTION_LOGO:
4696 case IBMVFC_HOST_ACTION_INIT:
4697 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4698 case IBMVFC_HOST_ACTION_QUERY:
4699 case IBMVFC_HOST_ACTION_RESET:
4700 case IBMVFC_HOST_ACTION_REENABLE:
4701 default:
4702 break;
4703 }
4704
4705 return 1;
4706 }
4707
4708 /**
4709 * ibmvfc_work_to_do - Is there task level work to do?
4710 * @vhost: ibmvfc host struct
4711 *
4712 * Returns:
4713 * 1 if work to do / 0 if not
4714 **/
4715 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4716 {
4717 unsigned long flags;
4718 int rc;
4719
4720 spin_lock_irqsave(vhost->host->host_lock, flags);
4721 rc = __ibmvfc_work_to_do(vhost);
4722 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4723 return rc;
4724 }
4725
4726 /**
4727 * ibmvfc_log_ae - Log async events if necessary
4728 * @vhost: ibmvfc host struct
4729 * @events: events to log
4730 *
4731 **/
4732 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
4733 {
4734 if (events & IBMVFC_AE_RSCN)
4735 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
4736 if ((events & IBMVFC_AE_LINKDOWN) &&
4737 vhost->state >= IBMVFC_HALTED)
4738 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
4739 if ((events & IBMVFC_AE_LINKUP) &&
4740 vhost->state == IBMVFC_INITIALIZING)
4741 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
4742 }
4743
4744 /**
4745 * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
4746 * @tgt: ibmvfc target struct
4747 *
4748 **/
4749 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
4750 {
4751 struct ibmvfc_host *vhost = tgt->vhost;
4752 struct fc_rport *rport;
4753 unsigned long flags;
4754
4755 tgt_dbg(tgt, "Adding rport\n");
4756 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
4757 spin_lock_irqsave(vhost->host->host_lock, flags);
4758
4759 if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4760 tgt_dbg(tgt, "Deleting rport\n");
4761 list_del(&tgt->queue);
4762 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4763 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4764 fc_remote_port_delete(rport);
4765 del_timer_sync(&tgt->timer);
4766 kref_put(&tgt->kref, ibmvfc_release_tgt);
4767 return;
4768 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
4769 tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
4770 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
4771 tgt->rport = NULL;
4772 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4773 fc_remote_port_delete(rport);
4774 return;
4775 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
4776 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4777 return;
4778 }
4779
4780 if (rport) {
4781 tgt_dbg(tgt, "rport add succeeded\n");
4782 tgt->rport = rport;
4783 rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
4784 rport->supported_classes = 0;
4785 tgt->target_id = rport->scsi_target_id;
4786 if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
4787 rport->supported_classes |= FC_COS_CLASS1;
4788 if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
4789 rport->supported_classes |= FC_COS_CLASS2;
4790 if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
4791 rport->supported_classes |= FC_COS_CLASS3;
4792 if (rport->rqst_q)
4793 blk_queue_max_segments(rport->rqst_q, 1);
4794 } else
4795 tgt_dbg(tgt, "rport add failed\n");
4796 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4797 }
4798
4799 /**
4800 * ibmvfc_do_work - Do task level work
4801 * @vhost: ibmvfc host struct
4802 *
4803 **/
4804 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4805 {
4806 struct ibmvfc_target *tgt;
4807 unsigned long flags;
4808 struct fc_rport *rport;
4809 LIST_HEAD(purge);
4810 int rc;
4811
4812 ibmvfc_log_ae(vhost, vhost->events_to_log);
4813 spin_lock_irqsave(vhost->host->host_lock, flags);
4814 vhost->events_to_log = 0;
4815 switch (vhost->action) {
4816 case IBMVFC_HOST_ACTION_NONE:
4817 case IBMVFC_HOST_ACTION_LOGO_WAIT:
4818 case IBMVFC_HOST_ACTION_INIT_WAIT:
4819 break;
4820 case IBMVFC_HOST_ACTION_RESET:
4821 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4822 list_splice_init(&vhost->purge, &purge);
4823 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4824 ibmvfc_complete_purge(&purge);
4825 rc = ibmvfc_reset_crq(vhost);
4826 spin_lock_irqsave(vhost->host->host_lock, flags);
4827 if (rc == H_CLOSED)
4828 vio_enable_interrupts(to_vio_dev(vhost->dev));
4829 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4830 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4831 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4832 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4833 }
4834 break;
4835 case IBMVFC_HOST_ACTION_REENABLE:
4836 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4837 list_splice_init(&vhost->purge, &purge);
4838 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4839 ibmvfc_complete_purge(&purge);
4840 rc = ibmvfc_reenable_crq_queue(vhost);
4841 spin_lock_irqsave(vhost->host->host_lock, flags);
4842 if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
4843 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4844 dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
4845 }
4846 break;
4847 case IBMVFC_HOST_ACTION_LOGO:
4848 vhost->job_step(vhost);
4849 break;
4850 case IBMVFC_HOST_ACTION_INIT:
4851 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
4852 if (vhost->delay_init) {
4853 vhost->delay_init = 0;
4854 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4855 ssleep(15);
4856 return;
4857 } else
4858 vhost->job_step(vhost);
4859 break;
4860 case IBMVFC_HOST_ACTION_QUERY:
4861 list_for_each_entry(tgt, &vhost->targets, queue)
4862 ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
4863 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
4864 break;
4865 case IBMVFC_HOST_ACTION_QUERY_TGTS:
4866 list_for_each_entry(tgt, &vhost->targets, queue) {
4867 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4868 tgt->job_step(tgt);
4869 break;
4870 }
4871 }
4872
4873 if (!ibmvfc_dev_init_to_do(vhost))
4874 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
4875 break;
4876 case IBMVFC_HOST_ACTION_TGT_DEL:
4877 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4878 list_for_each_entry(tgt, &vhost->targets, queue) {
4879 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
4880 tgt->job_step(tgt);
4881 break;
4882 }
4883 }
4884
4885 if (ibmvfc_dev_logo_to_do(vhost)) {
4886 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4887 return;
4888 }
4889
4890 list_for_each_entry(tgt, &vhost->targets, queue) {
4891 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4892 tgt_dbg(tgt, "Deleting rport\n");
4893 rport = tgt->rport;
4894 tgt->rport = NULL;
4895 list_del(&tgt->queue);
4896 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4897 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4898 if (rport)
4899 fc_remote_port_delete(rport);
4900 del_timer_sync(&tgt->timer);
4901 kref_put(&tgt->kref, ibmvfc_release_tgt);
4902 return;
4903 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
4904 tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
4905 rport = tgt->rport;
4906 tgt->rport = NULL;
4907 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
4908 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4909 if (rport)
4910 fc_remote_port_delete(rport);
4911 return;
4912 }
4913 }
4914
4915 if (vhost->state == IBMVFC_INITIALIZING) {
4916 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
4917 if (vhost->reinit) {
4918 vhost->reinit = 0;
4919 scsi_block_requests(vhost->host);
4920 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4921 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4922 } else {
4923 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
4924 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4925 wake_up(&vhost->init_wait_q);
4926 schedule_work(&vhost->rport_add_work_q);
4927 vhost->init_retries = 0;
4928 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4929 scsi_unblock_requests(vhost->host);
4930 }
4931
4932 return;
4933 } else {
4934 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
4935 vhost->job_step = ibmvfc_discover_targets;
4936 }
4937 } else {
4938 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4939 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4940 scsi_unblock_requests(vhost->host);
4941 wake_up(&vhost->init_wait_q);
4942 return;
4943 }
4944 break;
4945 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4946 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
4947 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4948 ibmvfc_alloc_targets(vhost);
4949 spin_lock_irqsave(vhost->host->host_lock, flags);
4950 break;
4951 case IBMVFC_HOST_ACTION_TGT_INIT:
4952 list_for_each_entry(tgt, &vhost->targets, queue) {
4953 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4954 tgt->job_step(tgt);
4955 break;
4956 }
4957 }
4958
4959 if (!ibmvfc_dev_init_to_do(vhost))
4960 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
4961 break;
4962 default:
4963 break;
4964 }
4965
4966 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4967 }
4968
4969 /**
4970 * ibmvfc_work - Do task level work
4971 * @data: ibmvfc host struct
4972 *
4973 * Returns:
4974 * zero
4975 **/
4976 static int ibmvfc_work(void *data)
4977 {
4978 struct ibmvfc_host *vhost = data;
4979 int rc;
4980
4981 set_user_nice(current, MIN_NICE);
4982
4983 while (1) {
4984 rc = wait_event_interruptible(vhost->work_wait_q,
4985 ibmvfc_work_to_do(vhost));
4986
4987 BUG_ON(rc);
4988
4989 if (kthread_should_stop())
4990 break;
4991
4992 ibmvfc_do_work(vhost);
4993 }
4994
4995 ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
4996 return 0;
4997 }
4998
4999 /**
5000 * ibmvfc_alloc_queue - Allocate queue
5001 * @vhost: ibmvfc host struct
5002 * @queue: ibmvfc queue to allocate
5003 * @fmt: queue format to allocate
5004 *
5005 * Returns:
5006 * 0 on success / non-zero on failure
5007 **/
5008 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5009 struct ibmvfc_queue *queue,
5010 enum ibmvfc_msg_fmt fmt)
5011 {
5012 struct device *dev = vhost->dev;
5013 size_t fmt_size;
5014
5015 ENTER;
5016 spin_lock_init(&queue->_lock);
5017 queue->q_lock = &queue->_lock;
5018
5019 switch (fmt) {
5020 case IBMVFC_CRQ_FMT:
5021 fmt_size = sizeof(*queue->msgs.crq);
5022 break;
5023 case IBMVFC_ASYNC_FMT:
5024 fmt_size = sizeof(*queue->msgs.async);
5025 break;
5026 default:
5027 dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5028 return -EINVAL;
5029 }
5030
5031 queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5032 if (!queue->msgs.handle)
5033 return -ENOMEM;
5034
5035 queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5036 DMA_BIDIRECTIONAL);
5037
5038 if (dma_mapping_error(dev, queue->msg_token)) {
5039 free_page((unsigned long)queue->msgs.handle);
5040 queue->msgs.handle = NULL;
5041 return -ENOMEM;
5042 }
5043
5044 queue->cur = 0;
5045 queue->fmt = fmt;
5046 queue->size = PAGE_SIZE / fmt_size;
5047 return 0;
5048 }
5049
5050 /**
5051 * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5052 * @vhost: ibmvfc host struct
5053 *
5054 * Allocates a page for messages, maps it for dma, and registers
5055 * the crq with the hypervisor.
5056 *
5057 * Return value:
5058 * zero on success / other on failure
5059 **/
5060 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5061 {
5062 int rc, retrc = -ENOMEM;
5063 struct device *dev = vhost->dev;
5064 struct vio_dev *vdev = to_vio_dev(dev);
5065 struct ibmvfc_queue *crq = &vhost->crq;
5066
5067 ENTER;
5068 if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5069 return -ENOMEM;
5070
5071 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5072 crq->msg_token, PAGE_SIZE);
5073
5074 if (rc == H_RESOURCE)
5075 /* maybe kexecing and resource is busy. try a reset */
5076 retrc = rc = ibmvfc_reset_crq(vhost);
5077
5078 if (rc == H_CLOSED)
5079 dev_warn(dev, "Partner adapter not ready\n");
5080 else if (rc) {
5081 dev_warn(dev, "Error %d opening adapter\n", rc);
5082 goto reg_crq_failed;
5083 }
5084
5085 retrc = 0;
5086
5087 tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5088
5089 if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5090 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5091 goto req_irq_failed;
5092 }
5093
5094 if ((rc = vio_enable_interrupts(vdev))) {
5095 dev_err(dev, "Error %d enabling interrupts\n", rc);
5096 goto req_irq_failed;
5097 }
5098
5099 LEAVE;
5100 return retrc;
5101
5102 req_irq_failed:
5103 tasklet_kill(&vhost->tasklet);
5104 do {
5105 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5106 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5107 reg_crq_failed:
5108 ibmvfc_free_queue(vhost, crq);
5109 return retrc;
5110 }
5111
5112 /**
5113 * ibmvfc_free_mem - Free memory for vhost
5114 * @vhost: ibmvfc host struct
5115 *
5116 * Return value:
5117 * none
5118 **/
5119 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
5120 {
5121 struct ibmvfc_queue *async_q = &vhost->async_crq;
5122
5123 ENTER;
5124 mempool_destroy(vhost->tgt_pool);
5125 kfree(vhost->trace);
5126 dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
5127 vhost->disc_buf_dma);
5128 dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
5129 vhost->login_buf, vhost->login_buf_dma);
5130 dma_pool_destroy(vhost->sg_pool);
5131 ibmvfc_free_queue(vhost, async_q);
5132 LEAVE;
5133 }
5134
5135 /**
5136 * ibmvfc_alloc_mem - Allocate memory for vhost
5137 * @vhost: ibmvfc host struct
5138 *
5139 * Return value:
5140 * 0 on success / non-zero on failure
5141 **/
5142 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
5143 {
5144 struct ibmvfc_queue *async_q = &vhost->async_crq;
5145 struct device *dev = vhost->dev;
5146
5147 ENTER;
5148 if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
5149 dev_err(dev, "Couldn't allocate/map async queue.\n");
5150 goto nomem;
5151 }
5152
5153 vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
5154 SG_ALL * sizeof(struct srp_direct_buf),
5155 sizeof(struct srp_direct_buf), 0);
5156
5157 if (!vhost->sg_pool) {
5158 dev_err(dev, "Failed to allocate sg pool\n");
5159 goto unmap_async_crq;
5160 }
5161
5162 vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
5163 &vhost->login_buf_dma, GFP_KERNEL);
5164
5165 if (!vhost->login_buf) {
5166 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
5167 goto free_sg_pool;
5168 }
5169
5170 vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
5171 vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
5172 &vhost->disc_buf_dma, GFP_KERNEL);
5173
5174 if (!vhost->disc_buf) {
5175 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
5176 goto free_login_buffer;
5177 }
5178
5179 vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
5180 sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
5181 atomic_set(&vhost->trace_index, -1);
5182
5183 if (!vhost->trace)
5184 goto free_disc_buffer;
5185
5186 vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
5187 sizeof(struct ibmvfc_target));
5188
5189 if (!vhost->tgt_pool) {
5190 dev_err(dev, "Couldn't allocate target memory pool\n");
5191 goto free_trace;
5192 }
5193
5194 LEAVE;
5195 return 0;
5196
5197 free_trace:
5198 kfree(vhost->trace);
5199 free_disc_buffer:
5200 dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
5201 vhost->disc_buf_dma);
5202 free_login_buffer:
5203 dma_free_coherent(dev, sizeof(*vhost->login_buf),
5204 vhost->login_buf, vhost->login_buf_dma);
5205 free_sg_pool:
5206 dma_pool_destroy(vhost->sg_pool);
5207 unmap_async_crq:
5208 ibmvfc_free_queue(vhost, async_q);
5209 nomem:
5210 LEAVE;
5211 return -ENOMEM;
5212 }
5213
5214 /**
5215 * ibmvfc_rport_add_thread - Worker thread for rport adds
5216 * @work: work struct
5217 *
5218 **/
5219 static void ibmvfc_rport_add_thread(struct work_struct *work)
5220 {
5221 struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
5222 rport_add_work_q);
5223 struct ibmvfc_target *tgt;
5224 struct fc_rport *rport;
5225 unsigned long flags;
5226 int did_work;
5227
5228 ENTER;
5229 spin_lock_irqsave(vhost->host->host_lock, flags);
5230 do {
5231 did_work = 0;
5232 if (vhost->state != IBMVFC_ACTIVE)
5233 break;
5234
5235 list_for_each_entry(tgt, &vhost->targets, queue) {
5236 if (tgt->add_rport) {
5237 did_work = 1;
5238 tgt->add_rport = 0;
5239 kref_get(&tgt->kref);
5240 rport = tgt->rport;
5241 if (!rport) {
5242 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5243 ibmvfc_tgt_add_rport(tgt);
5244 } else if (get_device(&rport->dev)) {
5245 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5246 tgt_dbg(tgt, "Setting rport roles\n");
5247 fc_remote_port_rolechg(rport, tgt->ids.roles);
5248 put_device(&rport->dev);
5249 } else {
5250 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5251 }
5252
5253 kref_put(&tgt->kref, ibmvfc_release_tgt);
5254 spin_lock_irqsave(vhost->host->host_lock, flags);
5255 break;
5256 }
5257 }
5258 } while(did_work);
5259
5260 if (vhost->state == IBMVFC_ACTIVE)
5261 vhost->scan_complete = 1;
5262 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5263 LEAVE;
5264 }
5265
5266 /**
5267 * ibmvfc_probe - Adapter hot plug add entry point
5268 * @vdev: vio device struct
5269 * @id: vio device id struct
5270 *
5271 * Return value:
5272 * 0 on success / non-zero on failure
5273 **/
5274 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
5275 {
5276 struct ibmvfc_host *vhost;
5277 struct Scsi_Host *shost;
5278 struct device *dev = &vdev->dev;
5279 int rc = -ENOMEM;
5280
5281 ENTER;
5282 shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
5283 if (!shost) {
5284 dev_err(dev, "Couldn't allocate host data\n");
5285 goto out;
5286 }
5287
5288 shost->transportt = ibmvfc_transport_template;
5289 shost->can_queue = max_requests;
5290 shost->max_lun = max_lun;
5291 shost->max_id = max_targets;
5292 shost->max_sectors = IBMVFC_MAX_SECTORS;
5293 shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
5294 shost->unique_id = shost->host_no;
5295 shost->nr_hw_queues = IBMVFC_MQ ? IBMVFC_SCSI_HW_QUEUES : 1;
5296
5297 vhost = shost_priv(shost);
5298 INIT_LIST_HEAD(&vhost->targets);
5299 INIT_LIST_HEAD(&vhost->purge);
5300 sprintf(vhost->name, IBMVFC_NAME);
5301 vhost->host = shost;
5302 vhost->dev = dev;
5303 vhost->partition_number = -1;
5304 vhost->log_level = log_level;
5305 vhost->task_set = 1;
5306
5307 vhost->mq_enabled = IBMVFC_MQ;
5308 vhost->client_scsi_channels = IBMVFC_SCSI_CHANNELS;
5309 vhost->using_channels = 0;
5310 vhost->do_enquiry = 1;
5311
5312 strcpy(vhost->partition_name, "UNKNOWN");
5313 init_waitqueue_head(&vhost->work_wait_q);
5314 init_waitqueue_head(&vhost->init_wait_q);
5315 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
5316 mutex_init(&vhost->passthru_mutex);
5317
5318 if ((rc = ibmvfc_alloc_mem(vhost)))
5319 goto free_scsi_host;
5320
5321 vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
5322 shost->host_no);
5323
5324 if (IS_ERR(vhost->work_thread)) {
5325 dev_err(dev, "Couldn't create kernel thread: %ld\n",
5326 PTR_ERR(vhost->work_thread));
5327 rc = PTR_ERR(vhost->work_thread);
5328 goto free_host_mem;
5329 }
5330
5331 if ((rc = ibmvfc_init_crq(vhost))) {
5332 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
5333 goto kill_kthread;
5334 }
5335
5336 if ((rc = ibmvfc_init_event_pool(vhost, &vhost->crq))) {
5337 dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
5338 goto release_crq;
5339 }
5340
5341 if ((rc = scsi_add_host(shost, dev)))
5342 goto release_event_pool;
5343
5344 fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
5345
5346 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
5347 &ibmvfc_trace_attr))) {
5348 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
5349 goto remove_shost;
5350 }
5351
5352 if (shost_to_fc_host(shost)->rqst_q)
5353 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
5354 dev_set_drvdata(dev, vhost);
5355 spin_lock(&ibmvfc_driver_lock);
5356 list_add_tail(&vhost->queue, &ibmvfc_head);
5357 spin_unlock(&ibmvfc_driver_lock);
5358
5359 ibmvfc_send_crq_init(vhost);
5360 scsi_scan_host(shost);
5361 return 0;
5362
5363 remove_shost:
5364 scsi_remove_host(shost);
5365 release_event_pool:
5366 ibmvfc_free_event_pool(vhost, &vhost->crq);
5367 release_crq:
5368 ibmvfc_release_crq_queue(vhost);
5369 kill_kthread:
5370 kthread_stop(vhost->work_thread);
5371 free_host_mem:
5372 ibmvfc_free_mem(vhost);
5373 free_scsi_host:
5374 scsi_host_put(shost);
5375 out:
5376 LEAVE;
5377 return rc;
5378 }
5379
5380 /**
5381 * ibmvfc_remove - Adapter hot plug remove entry point
5382 * @vdev: vio device struct
5383 *
5384 * Return value:
5385 * 0
5386 **/
5387 static int ibmvfc_remove(struct vio_dev *vdev)
5388 {
5389 struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
5390 LIST_HEAD(purge);
5391 unsigned long flags;
5392
5393 ENTER;
5394 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
5395
5396 spin_lock_irqsave(vhost->host->host_lock, flags);
5397 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
5398 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5399
5400 ibmvfc_wait_while_resetting(vhost);
5401 ibmvfc_release_crq_queue(vhost);
5402 kthread_stop(vhost->work_thread);
5403 fc_remove_host(vhost->host);
5404 scsi_remove_host(vhost->host);
5405
5406 spin_lock_irqsave(vhost->host->host_lock, flags);
5407 ibmvfc_purge_requests(vhost, DID_ERROR);
5408 list_splice_init(&vhost->purge, &purge);
5409 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5410 ibmvfc_complete_purge(&purge);
5411 ibmvfc_free_event_pool(vhost, &vhost->crq);
5412
5413 ibmvfc_free_mem(vhost);
5414 spin_lock(&ibmvfc_driver_lock);
5415 list_del(&vhost->queue);
5416 spin_unlock(&ibmvfc_driver_lock);
5417 scsi_host_put(vhost->host);
5418 LEAVE;
5419 return 0;
5420 }
5421
5422 /**
5423 * ibmvfc_resume - Resume from suspend
5424 * @dev: device struct
5425 *
5426 * We may have lost an interrupt across suspend/resume, so kick the
5427 * interrupt handler
5428 *
5429 */
5430 static int ibmvfc_resume(struct device *dev)
5431 {
5432 unsigned long flags;
5433 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
5434 struct vio_dev *vdev = to_vio_dev(dev);
5435
5436 spin_lock_irqsave(vhost->host->host_lock, flags);
5437 vio_disable_interrupts(vdev);
5438 tasklet_schedule(&vhost->tasklet);
5439 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5440 return 0;
5441 }
5442
5443 /**
5444 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
5445 * @vdev: vio device struct
5446 *
5447 * Return value:
5448 * Number of bytes the driver will need to DMA map at the same time in
5449 * order to perform well.
5450 */
5451 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
5452 {
5453 unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
5454 return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
5455 }
5456
5457 static const struct vio_device_id ibmvfc_device_table[] = {
5458 {"fcp", "IBM,vfc-client"},
5459 { "", "" }
5460 };
5461 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
5462
5463 static const struct dev_pm_ops ibmvfc_pm_ops = {
5464 .resume = ibmvfc_resume
5465 };
5466
5467 static struct vio_driver ibmvfc_driver = {
5468 .id_table = ibmvfc_device_table,
5469 .probe = ibmvfc_probe,
5470 .remove = ibmvfc_remove,
5471 .get_desired_dma = ibmvfc_get_desired_dma,
5472 .name = IBMVFC_NAME,
5473 .pm = &ibmvfc_pm_ops,
5474 };
5475
5476 static struct fc_function_template ibmvfc_transport_functions = {
5477 .show_host_fabric_name = 1,
5478 .show_host_node_name = 1,
5479 .show_host_port_name = 1,
5480 .show_host_supported_classes = 1,
5481 .show_host_port_type = 1,
5482 .show_host_port_id = 1,
5483 .show_host_maxframe_size = 1,
5484
5485 .get_host_port_state = ibmvfc_get_host_port_state,
5486 .show_host_port_state = 1,
5487
5488 .get_host_speed = ibmvfc_get_host_speed,
5489 .show_host_speed = 1,
5490
5491 .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
5492 .terminate_rport_io = ibmvfc_terminate_rport_io,
5493
5494 .show_rport_maxframe_size = 1,
5495 .show_rport_supported_classes = 1,
5496
5497 .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
5498 .show_rport_dev_loss_tmo = 1,
5499
5500 .get_starget_node_name = ibmvfc_get_starget_node_name,
5501 .show_starget_node_name = 1,
5502
5503 .get_starget_port_name = ibmvfc_get_starget_port_name,
5504 .show_starget_port_name = 1,
5505
5506 .get_starget_port_id = ibmvfc_get_starget_port_id,
5507 .show_starget_port_id = 1,
5508
5509 .bsg_request = ibmvfc_bsg_request,
5510 .bsg_timeout = ibmvfc_bsg_timeout,
5511 };
5512
5513 /**
5514 * ibmvfc_module_init - Initialize the ibmvfc module
5515 *
5516 * Return value:
5517 * 0 on success / other on failure
5518 **/
5519 static int __init ibmvfc_module_init(void)
5520 {
5521 int rc;
5522
5523 if (!firmware_has_feature(FW_FEATURE_VIO))
5524 return -ENODEV;
5525
5526 printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
5527 IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
5528
5529 ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
5530 if (!ibmvfc_transport_template)
5531 return -ENOMEM;
5532
5533 rc = vio_register_driver(&ibmvfc_driver);
5534 if (rc)
5535 fc_release_transport(ibmvfc_transport_template);
5536 return rc;
5537 }
5538
5539 /**
5540 * ibmvfc_module_exit - Teardown the ibmvfc module
5541 *
5542 * Return value:
5543 * nothing
5544 **/
5545 static void __exit ibmvfc_module_exit(void)
5546 {
5547 vio_unregister_driver(&ibmvfc_driver);
5548 fc_release_transport(ibmvfc_transport_template);
5549 }
5550
5551 module_init(ibmvfc_module_init);
5552 module_exit(ibmvfc_module_exit);