]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/s390/scsi/zfcp_fsf.c
Merge branch 'for-rmk-realview' of git://linux-arm.org/linux-2.6 into devel
[mirror_ubuntu-eoan-kernel.git] / drivers / s390 / scsi / zfcp_fsf.c
1 /*
2 * zfcp device driver
3 *
4 * Implementation of FSF commands.
5 *
6 * Copyright IBM Corporation 2002, 2008
7 */
8
9 #include <linux/blktrace_api.h>
10 #include "zfcp_ext.h"
11
12 static void zfcp_fsf_request_timeout_handler(unsigned long data)
13 {
14 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
15 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62,
16 NULL);
17 }
18
19 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
20 unsigned long timeout)
21 {
22 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
23 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
24 fsf_req->timer.expires = jiffies + timeout;
25 add_timer(&fsf_req->timer);
26 }
27
28 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
29 {
30 BUG_ON(!fsf_req->erp_action);
31 fsf_req->timer.function = zfcp_erp_timeout_handler;
32 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
33 fsf_req->timer.expires = jiffies + 30 * HZ;
34 add_timer(&fsf_req->timer);
35 }
36
37 /* association between FSF command and FSF QTCB type */
38 static u32 fsf_qtcb_type[] = {
39 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
40 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
41 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
42 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
43 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
44 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
45 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
46 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
47 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
48 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
49 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
50 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
51 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
52 };
53
54 static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
55 {
56 u16 subtable = table >> 16;
57 u16 rule = table & 0xffff;
58 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
59
60 if (subtable && subtable < ARRAY_SIZE(act_type))
61 dev_warn(&adapter->ccw_device->dev,
62 "Access denied according to ACT rule type %s, "
63 "rule %d\n", act_type[subtable], rule);
64 }
65
66 static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
67 struct zfcp_port *port)
68 {
69 struct fsf_qtcb_header *header = &req->qtcb->header;
70 dev_warn(&req->adapter->ccw_device->dev,
71 "Access denied to port 0x%016Lx\n",
72 (unsigned long long)port->wwpn);
73 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
74 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
75 zfcp_erp_port_access_denied(port, 55, req);
76 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
77 }
78
79 static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
80 struct zfcp_unit *unit)
81 {
82 struct fsf_qtcb_header *header = &req->qtcb->header;
83 dev_warn(&req->adapter->ccw_device->dev,
84 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
85 (unsigned long long)unit->fcp_lun,
86 (unsigned long long)unit->port->wwpn);
87 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
88 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
89 zfcp_erp_unit_access_denied(unit, 59, req);
90 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
91 }
92
93 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
94 {
95 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
96 "operational because of an unsupported FC class\n");
97 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
98 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
99 }
100
101 /**
102 * zfcp_fsf_req_free - free memory used by fsf request
103 * @fsf_req: pointer to struct zfcp_fsf_req
104 */
105 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
106 {
107 if (likely(req->pool)) {
108 mempool_free(req, req->pool);
109 return;
110 }
111
112 if (req->qtcb) {
113 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req);
114 return;
115 }
116 }
117
118 /**
119 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
120 * @adapter: pointer to struct zfcp_adapter
121 *
122 * Never ever call this without shutting down the adapter first.
123 * Otherwise the adapter would continue using and corrupting s390 storage.
124 * Included BUG_ON() call to ensure this is done.
125 * ERP is supposed to be the only user of this function.
126 */
127 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
128 {
129 struct zfcp_fsf_req *req, *tmp;
130 unsigned long flags;
131 LIST_HEAD(remove_queue);
132 unsigned int i;
133
134 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
135 spin_lock_irqsave(&adapter->req_list_lock, flags);
136 for (i = 0; i < REQUEST_LIST_SIZE; i++)
137 list_splice_init(&adapter->req_list[i], &remove_queue);
138 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
139
140 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
141 list_del(&req->list);
142 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
143 zfcp_fsf_req_complete(req);
144 }
145 }
146
147 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
148 {
149 struct fsf_status_read_buffer *sr_buf = req->data;
150 struct zfcp_adapter *adapter = req->adapter;
151 struct zfcp_port *port;
152 int d_id = sr_buf->d_id & ZFCP_DID_MASK;
153 unsigned long flags;
154
155 read_lock_irqsave(&zfcp_data.config_lock, flags);
156 list_for_each_entry(port, &adapter->port_list_head, list)
157 if (port->d_id == d_id) {
158 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
159 switch (sr_buf->status_subtype) {
160 case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
161 zfcp_erp_port_reopen(port, 0, 101, req);
162 break;
163 case FSF_STATUS_READ_SUB_ERROR_PORT:
164 zfcp_erp_port_shutdown(port, 0, 122, req);
165 break;
166 }
167 return;
168 }
169 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
170 }
171
172 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
173 struct fsf_link_down_info *link_down)
174 {
175 struct zfcp_adapter *adapter = req->adapter;
176
177 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
178 return;
179
180 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
181
182 if (!link_down)
183 goto out;
184
185 switch (link_down->error_code) {
186 case FSF_PSQ_LINK_NO_LIGHT:
187 dev_warn(&req->adapter->ccw_device->dev,
188 "There is no light signal from the local "
189 "fibre channel cable\n");
190 break;
191 case FSF_PSQ_LINK_WRAP_PLUG:
192 dev_warn(&req->adapter->ccw_device->dev,
193 "There is a wrap plug instead of a fibre "
194 "channel cable\n");
195 break;
196 case FSF_PSQ_LINK_NO_FCP:
197 dev_warn(&req->adapter->ccw_device->dev,
198 "The adjacent fibre channel node does not "
199 "support FCP\n");
200 break;
201 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
202 dev_warn(&req->adapter->ccw_device->dev,
203 "The FCP device is suspended because of a "
204 "firmware update\n");
205 break;
206 case FSF_PSQ_LINK_INVALID_WWPN:
207 dev_warn(&req->adapter->ccw_device->dev,
208 "The FCP device detected a WWPN that is "
209 "duplicate or not valid\n");
210 break;
211 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
212 dev_warn(&req->adapter->ccw_device->dev,
213 "The fibre channel fabric does not support NPIV\n");
214 break;
215 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
216 dev_warn(&req->adapter->ccw_device->dev,
217 "The FCP adapter cannot support more NPIV ports\n");
218 break;
219 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
220 dev_warn(&req->adapter->ccw_device->dev,
221 "The adjacent switch cannot support "
222 "more NPIV ports\n");
223 break;
224 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
225 dev_warn(&req->adapter->ccw_device->dev,
226 "The FCP adapter could not log in to the "
227 "fibre channel fabric\n");
228 break;
229 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
230 dev_warn(&req->adapter->ccw_device->dev,
231 "The WWPN assignment file on the FCP adapter "
232 "has been damaged\n");
233 break;
234 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
235 dev_warn(&req->adapter->ccw_device->dev,
236 "The mode table on the FCP adapter "
237 "has been damaged\n");
238 break;
239 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
240 dev_warn(&req->adapter->ccw_device->dev,
241 "All NPIV ports on the FCP adapter have "
242 "been assigned\n");
243 break;
244 default:
245 dev_warn(&req->adapter->ccw_device->dev,
246 "The link between the FCP adapter and "
247 "the FC fabric is down\n");
248 }
249 out:
250 zfcp_erp_adapter_failed(adapter, id, req);
251 }
252
253 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
254 {
255 struct fsf_status_read_buffer *sr_buf = req->data;
256 struct fsf_link_down_info *ldi =
257 (struct fsf_link_down_info *) &sr_buf->payload;
258
259 switch (sr_buf->status_subtype) {
260 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
261 zfcp_fsf_link_down_info_eval(req, 38, ldi);
262 break;
263 case FSF_STATUS_READ_SUB_FDISC_FAILED:
264 zfcp_fsf_link_down_info_eval(req, 39, ldi);
265 break;
266 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
267 zfcp_fsf_link_down_info_eval(req, 40, NULL);
268 };
269 }
270
271 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
272 {
273 struct zfcp_adapter *adapter = req->adapter;
274 struct fsf_status_read_buffer *sr_buf = req->data;
275
276 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
277 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf);
278 mempool_free(sr_buf, adapter->pool.data_status_read);
279 zfcp_fsf_req_free(req);
280 return;
281 }
282
283 zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf);
284
285 switch (sr_buf->status_type) {
286 case FSF_STATUS_READ_PORT_CLOSED:
287 zfcp_fsf_status_read_port_closed(req);
288 break;
289 case FSF_STATUS_READ_INCOMING_ELS:
290 zfcp_fc_incoming_els(req);
291 break;
292 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
293 break;
294 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
295 dev_warn(&adapter->ccw_device->dev,
296 "The error threshold for checksum statistics "
297 "has been exceeded\n");
298 zfcp_hba_dbf_event_berr(adapter, req);
299 break;
300 case FSF_STATUS_READ_LINK_DOWN:
301 zfcp_fsf_status_read_link_down(req);
302 break;
303 case FSF_STATUS_READ_LINK_UP:
304 dev_info(&adapter->ccw_device->dev,
305 "The local link has been restored\n");
306 /* All ports should be marked as ready to run again */
307 zfcp_erp_modify_adapter_status(adapter, 30, NULL,
308 ZFCP_STATUS_COMMON_RUNNING,
309 ZFCP_SET);
310 zfcp_erp_adapter_reopen(adapter,
311 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
312 ZFCP_STATUS_COMMON_ERP_FAILED,
313 102, req);
314 break;
315 case FSF_STATUS_READ_NOTIFICATION_LOST:
316 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
317 zfcp_erp_adapter_access_changed(adapter, 135, req);
318 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
319 schedule_work(&adapter->scan_work);
320 break;
321 case FSF_STATUS_READ_CFDC_UPDATED:
322 zfcp_erp_adapter_access_changed(adapter, 136, req);
323 break;
324 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
325 adapter->adapter_features = sr_buf->payload.word[0];
326 break;
327 }
328
329 mempool_free(sr_buf, adapter->pool.data_status_read);
330 zfcp_fsf_req_free(req);
331
332 atomic_inc(&adapter->stat_miss);
333 queue_work(zfcp_data.work_queue, &adapter->stat_work);
334 }
335
336 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
337 {
338 switch (req->qtcb->header.fsf_status_qual.word[0]) {
339 case FSF_SQ_FCP_RSP_AVAILABLE:
340 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
341 case FSF_SQ_NO_RETRY_POSSIBLE:
342 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
343 return;
344 case FSF_SQ_COMMAND_ABORTED:
345 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
346 break;
347 case FSF_SQ_NO_RECOM:
348 dev_err(&req->adapter->ccw_device->dev,
349 "The FCP adapter reported a problem "
350 "that cannot be recovered\n");
351 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
352 break;
353 }
354 /* all non-return stats set FSFREQ_ERROR*/
355 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
356 }
357
358 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
359 {
360 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
361 return;
362
363 switch (req->qtcb->header.fsf_status) {
364 case FSF_UNKNOWN_COMMAND:
365 dev_err(&req->adapter->ccw_device->dev,
366 "The FCP adapter does not recognize the command 0x%x\n",
367 req->qtcb->header.fsf_command);
368 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
369 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
370 break;
371 case FSF_ADAPTER_STATUS_AVAILABLE:
372 zfcp_fsf_fsfstatus_qual_eval(req);
373 break;
374 }
375 }
376
377 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
378 {
379 struct zfcp_adapter *adapter = req->adapter;
380 struct fsf_qtcb *qtcb = req->qtcb;
381 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
382
383 zfcp_hba_dbf_event_fsf_response(req);
384
385 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
386 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
387 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
388 return;
389 }
390
391 switch (qtcb->prefix.prot_status) {
392 case FSF_PROT_GOOD:
393 case FSF_PROT_FSF_STATUS_PRESENTED:
394 return;
395 case FSF_PROT_QTCB_VERSION_ERROR:
396 dev_err(&adapter->ccw_device->dev,
397 "QTCB version 0x%x not supported by FCP adapter "
398 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
399 psq->word[0], psq->word[1]);
400 zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
401 break;
402 case FSF_PROT_ERROR_STATE:
403 case FSF_PROT_SEQ_NUMB_ERROR:
404 zfcp_erp_adapter_reopen(adapter, 0, 98, req);
405 req->status |= ZFCP_STATUS_FSFREQ_RETRY;
406 break;
407 case FSF_PROT_UNSUPP_QTCB_TYPE:
408 dev_err(&adapter->ccw_device->dev,
409 "The QTCB type is not supported by the FCP adapter\n");
410 zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
411 break;
412 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
413 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
414 &adapter->status);
415 break;
416 case FSF_PROT_DUPLICATE_REQUEST_ID:
417 dev_err(&adapter->ccw_device->dev,
418 "0x%Lx is an ambiguous request identifier\n",
419 (unsigned long long)qtcb->bottom.support.req_handle);
420 zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
421 break;
422 case FSF_PROT_LINK_DOWN:
423 zfcp_fsf_link_down_info_eval(req, 37, &psq->link_down_info);
424 /* FIXME: reopening adapter now? better wait for link up */
425 zfcp_erp_adapter_reopen(adapter, 0, 79, req);
426 break;
427 case FSF_PROT_REEST_QUEUE:
428 /* All ports should be marked as ready to run again */
429 zfcp_erp_modify_adapter_status(adapter, 28, NULL,
430 ZFCP_STATUS_COMMON_RUNNING,
431 ZFCP_SET);
432 zfcp_erp_adapter_reopen(adapter,
433 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
434 ZFCP_STATUS_COMMON_ERP_FAILED, 99, req);
435 break;
436 default:
437 dev_err(&adapter->ccw_device->dev,
438 "0x%x is not a valid transfer protocol status\n",
439 qtcb->prefix.prot_status);
440 zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
441 }
442 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
443 }
444
445 /**
446 * zfcp_fsf_req_complete - process completion of a FSF request
447 * @fsf_req: The FSF request that has been completed.
448 *
449 * When a request has been completed either from the FCP adapter,
450 * or it has been dismissed due to a queue shutdown, this function
451 * is called to process the completion status and trigger further
452 * events related to the FSF request.
453 */
454 void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
455 {
456 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
457 zfcp_fsf_status_read_handler(req);
458 return;
459 }
460
461 del_timer(&req->timer);
462 zfcp_fsf_protstatus_eval(req);
463 zfcp_fsf_fsfstatus_eval(req);
464 req->handler(req);
465
466 if (req->erp_action)
467 zfcp_erp_notify(req->erp_action, 0);
468 req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
469
470 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
471 zfcp_fsf_req_free(req);
472 else
473 /* notify initiator waiting for the requests completion */
474 /*
475 * FIXME: Race! We must not access fsf_req here as it might have been
476 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
477 * flag. It's an improbable case. But, we have the same paranoia for
478 * the cleanup flag already.
479 * Might better be handled using complete()?
480 * (setting the flag and doing wakeup ought to be atomic
481 * with regard to checking the flag as long as waitqueue is
482 * part of the to be released structure)
483 */
484 wake_up(&req->completion_wq);
485 }
486
487 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
488 {
489 struct fsf_qtcb_bottom_config *bottom;
490 struct zfcp_adapter *adapter = req->adapter;
491 struct Scsi_Host *shost = adapter->scsi_host;
492
493 bottom = &req->qtcb->bottom.config;
494
495 if (req->data)
496 memcpy(req->data, bottom, sizeof(*bottom));
497
498 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
499 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
500 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
501 fc_host_speed(shost) = bottom->fc_link_speed;
502 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
503
504 adapter->hydra_version = bottom->adapter_type;
505 adapter->timer_ticks = bottom->timer_interval;
506
507 if (fc_host_permanent_port_name(shost) == -1)
508 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
509
510 switch (bottom->fc_topology) {
511 case FSF_TOPO_P2P:
512 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
513 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
514 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
515 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
516 break;
517 case FSF_TOPO_FABRIC:
518 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
519 break;
520 case FSF_TOPO_AL:
521 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
522 default:
523 dev_err(&adapter->ccw_device->dev,
524 "Unknown or unsupported arbitrated loop "
525 "fibre channel topology detected\n");
526 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
527 return -EIO;
528 }
529
530 return 0;
531 }
532
533 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
534 {
535 struct zfcp_adapter *adapter = req->adapter;
536 struct fsf_qtcb *qtcb = req->qtcb;
537 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
538 struct Scsi_Host *shost = adapter->scsi_host;
539
540 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
541 return;
542
543 adapter->fsf_lic_version = bottom->lic_version;
544 adapter->adapter_features = bottom->adapter_features;
545 adapter->connection_features = bottom->connection_features;
546 adapter->peer_wwpn = 0;
547 adapter->peer_wwnn = 0;
548 adapter->peer_d_id = 0;
549
550 switch (qtcb->header.fsf_status) {
551 case FSF_GOOD:
552 if (zfcp_fsf_exchange_config_evaluate(req))
553 return;
554
555 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
556 dev_err(&adapter->ccw_device->dev,
557 "FCP adapter maximum QTCB size (%d bytes) "
558 "is too small\n",
559 bottom->max_qtcb_size);
560 zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
561 return;
562 }
563 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
564 &adapter->status);
565 break;
566 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
567 fc_host_node_name(shost) = 0;
568 fc_host_port_name(shost) = 0;
569 fc_host_port_id(shost) = 0;
570 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
571 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
572 adapter->hydra_version = 0;
573
574 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
575 &adapter->status);
576
577 zfcp_fsf_link_down_info_eval(req, 42,
578 &qtcb->header.fsf_status_qual.link_down_info);
579 break;
580 default:
581 zfcp_erp_adapter_shutdown(adapter, 0, 130, req);
582 return;
583 }
584
585 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
586 adapter->hardware_version = bottom->hardware_version;
587 memcpy(fc_host_serial_number(shost), bottom->serial_number,
588 min(FC_SERIAL_NUMBER_SIZE, 17));
589 EBCASC(fc_host_serial_number(shost),
590 min(FC_SERIAL_NUMBER_SIZE, 17));
591 }
592
593 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
594 dev_err(&adapter->ccw_device->dev,
595 "The FCP adapter only supports newer "
596 "control block versions\n");
597 zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
598 return;
599 }
600 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
601 dev_err(&adapter->ccw_device->dev,
602 "The FCP adapter only supports older "
603 "control block versions\n");
604 zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
605 }
606 }
607
608 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
609 {
610 struct zfcp_adapter *adapter = req->adapter;
611 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
612 struct Scsi_Host *shost = adapter->scsi_host;
613
614 if (req->data)
615 memcpy(req->data, bottom, sizeof(*bottom));
616
617 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
618 fc_host_permanent_port_name(shost) = bottom->wwpn;
619 else
620 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
621 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
622 fc_host_supported_speeds(shost) = bottom->supported_speed;
623 }
624
625 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
626 {
627 struct fsf_qtcb *qtcb = req->qtcb;
628
629 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
630 return;
631
632 switch (qtcb->header.fsf_status) {
633 case FSF_GOOD:
634 zfcp_fsf_exchange_port_evaluate(req);
635 break;
636 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
637 zfcp_fsf_exchange_port_evaluate(req);
638 zfcp_fsf_link_down_info_eval(req, 43,
639 &qtcb->header.fsf_status_qual.link_down_info);
640 break;
641 }
642 }
643
644 static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
645 {
646 struct zfcp_qdio_queue *req_q = &adapter->req_q;
647
648 spin_lock_bh(&adapter->req_q_lock);
649 if (atomic_read(&req_q->count))
650 return 1;
651 spin_unlock_bh(&adapter->req_q_lock);
652 return 0;
653 }
654
655 static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
656 {
657 unsigned int count = atomic_read(&adapter->req_q.count);
658 if (!count)
659 atomic_inc(&adapter->qdio_outb_full);
660 return count > 0;
661 }
662
663 static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
664 {
665 long ret;
666
667 spin_unlock_bh(&adapter->req_q_lock);
668 ret = wait_event_interruptible_timeout(adapter->request_wq,
669 zfcp_fsf_sbal_check(adapter), 5 * HZ);
670 if (ret > 0)
671 return 0;
672 if (!ret)
673 atomic_inc(&adapter->qdio_outb_full);
674
675 spin_lock_bh(&adapter->req_q_lock);
676 return -EIO;
677 }
678
679 static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool)
680 {
681 struct zfcp_fsf_req *req;
682 req = mempool_alloc(pool, GFP_ATOMIC);
683 if (!req)
684 return NULL;
685 memset(req, 0, sizeof(*req));
686 req->pool = pool;
687 return req;
688 }
689
690 static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool)
691 {
692 struct zfcp_fsf_req_qtcb *qtcb;
693
694 if (likely(pool))
695 qtcb = mempool_alloc(pool, GFP_ATOMIC);
696 else
697 qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
698 GFP_ATOMIC);
699 if (unlikely(!qtcb))
700 return NULL;
701
702 memset(qtcb, 0, sizeof(*qtcb));
703 qtcb->fsf_req.qtcb = &qtcb->qtcb;
704 qtcb->fsf_req.pool = pool;
705
706 return &qtcb->fsf_req;
707 }
708
709 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
710 u32 fsf_cmd, int req_flags,
711 mempool_t *pool)
712 {
713 struct qdio_buffer_element *sbale;
714
715 struct zfcp_fsf_req *req;
716 struct zfcp_qdio_queue *req_q = &adapter->req_q;
717
718 if (req_flags & ZFCP_REQ_NO_QTCB)
719 req = zfcp_fsf_alloc_noqtcb(pool);
720 else
721 req = zfcp_fsf_alloc_qtcb(pool);
722
723 if (unlikely(!req))
724 return ERR_PTR(-EIO);
725
726 if (adapter->req_no == 0)
727 adapter->req_no++;
728
729 INIT_LIST_HEAD(&req->list);
730 init_timer(&req->timer);
731 init_waitqueue_head(&req->completion_wq);
732
733 req->adapter = adapter;
734 req->fsf_command = fsf_cmd;
735 req->req_id = adapter->req_no++;
736 req->sbal_number = 1;
737 req->sbal_first = req_q->first;
738 req->sbal_last = req_q->first;
739 req->sbale_curr = 1;
740
741 sbale = zfcp_qdio_sbale_req(req);
742 sbale[0].addr = (void *) req->req_id;
743 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
744
745 if (likely(req->qtcb)) {
746 req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no;
747 req->qtcb->prefix.req_id = req->req_id;
748 req->qtcb->prefix.ulp_info = 26;
749 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
750 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
751 req->qtcb->header.req_handle = req->req_id;
752 req->qtcb->header.fsf_command = req->fsf_command;
753 req->seq_no = adapter->fsf_req_seq_no;
754 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
755 sbale[1].addr = (void *) req->qtcb;
756 sbale[1].length = sizeof(struct fsf_qtcb);
757 }
758
759 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
760 zfcp_fsf_req_free(req);
761 return ERR_PTR(-EIO);
762 }
763
764 if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP))
765 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
766
767 return req;
768 }
769
770 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
771 {
772 struct zfcp_adapter *adapter = req->adapter;
773 unsigned long flags;
774 int idx;
775
776 /* put allocated FSF request into hash table */
777 spin_lock_irqsave(&adapter->req_list_lock, flags);
778 idx = zfcp_reqlist_hash(req->req_id);
779 list_add_tail(&req->list, &adapter->req_list[idx]);
780 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
781
782 req->qdio_outb_usage = atomic_read(&adapter->req_q.count);
783 req->issued = get_clock();
784 if (zfcp_qdio_send(req)) {
785 del_timer(&req->timer);
786 spin_lock_irqsave(&adapter->req_list_lock, flags);
787 /* lookup request again, list might have changed */
788 if (zfcp_reqlist_find_safe(adapter, req))
789 zfcp_reqlist_remove(adapter, req);
790 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
791 zfcp_erp_adapter_reopen(adapter, 0, 116, req);
792 return -EIO;
793 }
794
795 /* Don't increase for unsolicited status */
796 if (req->qtcb)
797 adapter->fsf_req_seq_no++;
798
799 return 0;
800 }
801
802 /**
803 * zfcp_fsf_status_read - send status read request
804 * @adapter: pointer to struct zfcp_adapter
805 * @req_flags: request flags
806 * Returns: 0 on success, ERROR otherwise
807 */
808 int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
809 {
810 struct zfcp_fsf_req *req;
811 struct fsf_status_read_buffer *sr_buf;
812 struct qdio_buffer_element *sbale;
813 int retval = -EIO;
814
815 spin_lock_bh(&adapter->req_q_lock);
816 if (zfcp_fsf_req_sbal_get(adapter))
817 goto out;
818
819 req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
820 ZFCP_REQ_NO_QTCB,
821 adapter->pool.fsf_req_status_read);
822 if (IS_ERR(req)) {
823 retval = PTR_ERR(req);
824 goto out;
825 }
826
827 sbale = zfcp_qdio_sbale_req(req);
828 sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
829 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
830 req->sbale_curr = 2;
831
832 sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
833 if (!sr_buf) {
834 retval = -ENOMEM;
835 goto failed_buf;
836 }
837 memset(sr_buf, 0, sizeof(*sr_buf));
838 req->data = sr_buf;
839 sbale = zfcp_qdio_sbale_curr(req);
840 sbale->addr = (void *) sr_buf;
841 sbale->length = sizeof(*sr_buf);
842
843 retval = zfcp_fsf_req_send(req);
844 if (retval)
845 goto failed_req_send;
846
847 goto out;
848
849 failed_req_send:
850 mempool_free(sr_buf, adapter->pool.data_status_read);
851 failed_buf:
852 zfcp_fsf_req_free(req);
853 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
854 out:
855 spin_unlock_bh(&adapter->req_q_lock);
856 return retval;
857 }
858
859 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
860 {
861 struct zfcp_unit *unit = req->data;
862 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
863
864 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
865 return;
866
867 switch (req->qtcb->header.fsf_status) {
868 case FSF_PORT_HANDLE_NOT_VALID:
869 if (fsq->word[0] == fsq->word[1]) {
870 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104,
871 req);
872 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
873 }
874 break;
875 case FSF_LUN_HANDLE_NOT_VALID:
876 if (fsq->word[0] == fsq->word[1]) {
877 zfcp_erp_port_reopen(unit->port, 0, 105, req);
878 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
879 }
880 break;
881 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
882 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
883 break;
884 case FSF_PORT_BOXED:
885 zfcp_erp_port_boxed(unit->port, 47, req);
886 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
887 ZFCP_STATUS_FSFREQ_RETRY;
888 break;
889 case FSF_LUN_BOXED:
890 zfcp_erp_unit_boxed(unit, 48, req);
891 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
892 ZFCP_STATUS_FSFREQ_RETRY;
893 break;
894 case FSF_ADAPTER_STATUS_AVAILABLE:
895 switch (fsq->word[0]) {
896 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
897 zfcp_test_link(unit->port);
898 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
899 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
900 break;
901 }
902 break;
903 case FSF_GOOD:
904 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
905 break;
906 }
907 }
908
909 /**
910 * zfcp_fsf_abort_fcp_command - abort running SCSI command
911 * @old_req_id: unsigned long
912 * @adapter: pointer to struct zfcp_adapter
913 * @unit: pointer to struct zfcp_unit
914 * @req_flags: integer specifying the request flags
915 * Returns: pointer to struct zfcp_fsf_req
916 *
917 * FIXME(design): should be watched by a timeout !!!
918 */
919
920 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
921 struct zfcp_adapter *adapter,
922 struct zfcp_unit *unit,
923 int req_flags)
924 {
925 struct qdio_buffer_element *sbale;
926 struct zfcp_fsf_req *req = NULL;
927
928 spin_lock(&adapter->req_q_lock);
929 if (!zfcp_fsf_sbal_available(adapter))
930 goto out;
931 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
932 req_flags, adapter->pool.fsf_req_abort);
933 if (IS_ERR(req))
934 goto out;
935
936 if (unlikely(!(atomic_read(&unit->status) &
937 ZFCP_STATUS_COMMON_UNBLOCKED)))
938 goto out_error_free;
939
940 sbale = zfcp_qdio_sbale_req(req);
941 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
942 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
943
944 req->data = unit;
945 req->handler = zfcp_fsf_abort_fcp_command_handler;
946 req->qtcb->header.lun_handle = unit->handle;
947 req->qtcb->header.port_handle = unit->port->handle;
948 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
949
950 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
951 if (!zfcp_fsf_req_send(req))
952 goto out;
953
954 out_error_free:
955 zfcp_fsf_req_free(req);
956 req = NULL;
957 out:
958 spin_unlock(&adapter->req_q_lock);
959 return req;
960 }
961
962 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
963 {
964 struct zfcp_adapter *adapter = req->adapter;
965 struct zfcp_send_ct *send_ct = req->data;
966 struct fsf_qtcb_header *header = &req->qtcb->header;
967
968 send_ct->status = -EINVAL;
969
970 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
971 goto skip_fsfstatus;
972
973 switch (header->fsf_status) {
974 case FSF_GOOD:
975 zfcp_san_dbf_event_ct_response(req);
976 send_ct->status = 0;
977 break;
978 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
979 zfcp_fsf_class_not_supp(req);
980 break;
981 case FSF_ADAPTER_STATUS_AVAILABLE:
982 switch (header->fsf_status_qual.word[0]){
983 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
984 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
985 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
986 break;
987 }
988 break;
989 case FSF_ACCESS_DENIED:
990 break;
991 case FSF_PORT_BOXED:
992 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
993 ZFCP_STATUS_FSFREQ_RETRY;
994 break;
995 case FSF_PORT_HANDLE_NOT_VALID:
996 zfcp_erp_adapter_reopen(adapter, 0, 106, req);
997 case FSF_GENERIC_COMMAND_REJECTED:
998 case FSF_PAYLOAD_SIZE_MISMATCH:
999 case FSF_REQUEST_SIZE_TOO_LARGE:
1000 case FSF_RESPONSE_SIZE_TOO_LARGE:
1001 case FSF_SBAL_MISMATCH:
1002 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1003 break;
1004 }
1005
1006 skip_fsfstatus:
1007 if (send_ct->handler)
1008 send_ct->handler(send_ct->handler_data);
1009 }
1010
1011 static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
1012 struct scatterlist *sg_req,
1013 struct scatterlist *sg_resp, int max_sbals)
1014 {
1015 int bytes;
1016
1017 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1018 sg_req, max_sbals);
1019 if (bytes <= 0)
1020 return -ENOMEM;
1021 req->qtcb->bottom.support.req_buf_length = bytes;
1022 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1023
1024 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1025 sg_resp, max_sbals);
1026 if (bytes <= 0)
1027 return -ENOMEM;
1028 req->qtcb->bottom.support.resp_buf_length = bytes;
1029
1030 return 0;
1031 }
1032
1033 /**
1034 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1035 * @ct: pointer to struct zfcp_send_ct with data for request
1036 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1037 * @erp_action: if non-null the Generic Service request sent within ERP
1038 */
1039 int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1040 struct zfcp_erp_action *erp_action)
1041 {
1042 struct zfcp_wka_port *wka_port = ct->wka_port;
1043 struct zfcp_adapter *adapter = wka_port->adapter;
1044 struct zfcp_fsf_req *req;
1045 int ret = -EIO;
1046
1047 spin_lock_bh(&adapter->req_q_lock);
1048 if (zfcp_fsf_req_sbal_get(adapter))
1049 goto out;
1050
1051 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
1052 ZFCP_REQ_AUTO_CLEANUP, pool);
1053 if (IS_ERR(req)) {
1054 ret = PTR_ERR(req);
1055 goto out;
1056 }
1057
1058 ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp,
1059 FSF_MAX_SBALS_PER_REQ);
1060 if (ret)
1061 goto failed_send;
1062
1063 req->handler = zfcp_fsf_send_ct_handler;
1064 req->qtcb->header.port_handle = wka_port->handle;
1065 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1066 req->qtcb->bottom.support.timeout = ct->timeout;
1067 req->data = ct;
1068
1069 zfcp_san_dbf_event_ct_request(req);
1070
1071 if (erp_action) {
1072 erp_action->fsf_req = req;
1073 req->erp_action = erp_action;
1074 zfcp_fsf_start_erp_timer(req);
1075 } else
1076 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1077
1078 ret = zfcp_fsf_req_send(req);
1079 if (ret)
1080 goto failed_send;
1081
1082 goto out;
1083
1084 failed_send:
1085 zfcp_fsf_req_free(req);
1086 if (erp_action)
1087 erp_action->fsf_req = NULL;
1088 out:
1089 spin_unlock_bh(&adapter->req_q_lock);
1090 return ret;
1091 }
1092
1093 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1094 {
1095 struct zfcp_send_els *send_els = req->data;
1096 struct zfcp_port *port = send_els->port;
1097 struct fsf_qtcb_header *header = &req->qtcb->header;
1098
1099 send_els->status = -EINVAL;
1100
1101 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1102 goto skip_fsfstatus;
1103
1104 switch (header->fsf_status) {
1105 case FSF_GOOD:
1106 zfcp_san_dbf_event_els_response(req);
1107 send_els->status = 0;
1108 break;
1109 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1110 zfcp_fsf_class_not_supp(req);
1111 break;
1112 case FSF_ADAPTER_STATUS_AVAILABLE:
1113 switch (header->fsf_status_qual.word[0]){
1114 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1115 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1116 zfcp_test_link(port);
1117 /*fall through */
1118 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1119 case FSF_SQ_RETRY_IF_POSSIBLE:
1120 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1121 break;
1122 }
1123 break;
1124 case FSF_ELS_COMMAND_REJECTED:
1125 case FSF_PAYLOAD_SIZE_MISMATCH:
1126 case FSF_REQUEST_SIZE_TOO_LARGE:
1127 case FSF_RESPONSE_SIZE_TOO_LARGE:
1128 break;
1129 case FSF_ACCESS_DENIED:
1130 zfcp_fsf_access_denied_port(req, port);
1131 break;
1132 case FSF_SBAL_MISMATCH:
1133 /* should never occure, avoided in zfcp_fsf_send_els */
1134 /* fall through */
1135 default:
1136 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1137 break;
1138 }
1139 skip_fsfstatus:
1140 if (send_els->handler)
1141 send_els->handler(send_els->handler_data);
1142 }
1143
1144 /**
1145 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1146 * @els: pointer to struct zfcp_send_els with data for the command
1147 */
1148 int zfcp_fsf_send_els(struct zfcp_send_els *els)
1149 {
1150 struct zfcp_fsf_req *req;
1151 struct zfcp_adapter *adapter = els->adapter;
1152 struct fsf_qtcb_bottom_support *bottom;
1153 int ret = -EIO;
1154
1155 if (unlikely(!(atomic_read(&els->port->status) &
1156 ZFCP_STATUS_COMMON_UNBLOCKED)))
1157 return -EBUSY;
1158
1159 spin_lock(&adapter->req_q_lock);
1160 if (!zfcp_fsf_sbal_available(adapter))
1161 goto out;
1162 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
1163 ZFCP_REQ_AUTO_CLEANUP, NULL);
1164 if (IS_ERR(req)) {
1165 ret = PTR_ERR(req);
1166 goto out;
1167 }
1168
1169 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2);
1170
1171 if (ret)
1172 goto failed_send;
1173
1174 bottom = &req->qtcb->bottom.support;
1175 req->handler = zfcp_fsf_send_els_handler;
1176 bottom->d_id = els->d_id;
1177 bottom->service_class = FSF_CLASS_3;
1178 bottom->timeout = 2 * R_A_TOV;
1179 req->data = els;
1180
1181 zfcp_san_dbf_event_els_request(req);
1182
1183 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1184 ret = zfcp_fsf_req_send(req);
1185 if (ret)
1186 goto failed_send;
1187
1188 goto out;
1189
1190 failed_send:
1191 zfcp_fsf_req_free(req);
1192 out:
1193 spin_unlock(&adapter->req_q_lock);
1194 return ret;
1195 }
1196
1197 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1198 {
1199 struct qdio_buffer_element *sbale;
1200 struct zfcp_fsf_req *req;
1201 struct zfcp_adapter *adapter = erp_action->adapter;
1202 int retval = -EIO;
1203
1204 spin_lock_bh(&adapter->req_q_lock);
1205 if (!zfcp_fsf_sbal_available(adapter))
1206 goto out;
1207 req = zfcp_fsf_req_create(adapter,
1208 FSF_QTCB_EXCHANGE_CONFIG_DATA,
1209 ZFCP_REQ_AUTO_CLEANUP,
1210 adapter->pool.fsf_req_erp);
1211 if (IS_ERR(req)) {
1212 retval = PTR_ERR(req);
1213 goto out;
1214 }
1215
1216 sbale = zfcp_qdio_sbale_req(req);
1217 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1218 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1219
1220 req->qtcb->bottom.config.feature_selection =
1221 FSF_FEATURE_CFDC |
1222 FSF_FEATURE_LUN_SHARING |
1223 FSF_FEATURE_NOTIFICATION_LOST |
1224 FSF_FEATURE_UPDATE_ALERT;
1225 req->erp_action = erp_action;
1226 req->handler = zfcp_fsf_exchange_config_data_handler;
1227 erp_action->fsf_req = req;
1228
1229 zfcp_fsf_start_erp_timer(req);
1230 retval = zfcp_fsf_req_send(req);
1231 if (retval) {
1232 zfcp_fsf_req_free(req);
1233 erp_action->fsf_req = NULL;
1234 }
1235 out:
1236 spin_unlock_bh(&adapter->req_q_lock);
1237 return retval;
1238 }
1239
1240 int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1241 struct fsf_qtcb_bottom_config *data)
1242 {
1243 struct qdio_buffer_element *sbale;
1244 struct zfcp_fsf_req *req = NULL;
1245 int retval = -EIO;
1246
1247 spin_lock_bh(&adapter->req_q_lock);
1248 if (zfcp_fsf_req_sbal_get(adapter))
1249 goto out;
1250
1251 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1252 0, NULL);
1253 if (IS_ERR(req)) {
1254 retval = PTR_ERR(req);
1255 goto out;
1256 }
1257
1258 sbale = zfcp_qdio_sbale_req(req);
1259 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1260 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1261 req->handler = zfcp_fsf_exchange_config_data_handler;
1262
1263 req->qtcb->bottom.config.feature_selection =
1264 FSF_FEATURE_CFDC |
1265 FSF_FEATURE_LUN_SHARING |
1266 FSF_FEATURE_NOTIFICATION_LOST |
1267 FSF_FEATURE_UPDATE_ALERT;
1268
1269 if (data)
1270 req->data = data;
1271
1272 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1273 retval = zfcp_fsf_req_send(req);
1274 out:
1275 spin_unlock_bh(&adapter->req_q_lock);
1276 if (!retval)
1277 wait_event(req->completion_wq,
1278 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
1279
1280 zfcp_fsf_req_free(req);
1281
1282 return retval;
1283 }
1284
1285 /**
1286 * zfcp_fsf_exchange_port_data - request information about local port
1287 * @erp_action: ERP action for the adapter for which port data is requested
1288 * Returns: 0 on success, error otherwise
1289 */
1290 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1291 {
1292 struct qdio_buffer_element *sbale;
1293 struct zfcp_fsf_req *req;
1294 struct zfcp_adapter *adapter = erp_action->adapter;
1295 int retval = -EIO;
1296
1297 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1298 return -EOPNOTSUPP;
1299
1300 spin_lock_bh(&adapter->req_q_lock);
1301 if (!zfcp_fsf_sbal_available(adapter))
1302 goto out;
1303 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
1304 ZFCP_REQ_AUTO_CLEANUP,
1305 adapter->pool.fsf_req_erp);
1306 if (IS_ERR(req)) {
1307 retval = PTR_ERR(req);
1308 goto out;
1309 }
1310
1311 sbale = zfcp_qdio_sbale_req(req);
1312 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1313 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1314
1315 req->handler = zfcp_fsf_exchange_port_data_handler;
1316 req->erp_action = erp_action;
1317 erp_action->fsf_req = req;
1318
1319 zfcp_fsf_start_erp_timer(req);
1320 retval = zfcp_fsf_req_send(req);
1321 if (retval) {
1322 zfcp_fsf_req_free(req);
1323 erp_action->fsf_req = NULL;
1324 }
1325 out:
1326 spin_unlock_bh(&adapter->req_q_lock);
1327 return retval;
1328 }
1329
1330 /**
1331 * zfcp_fsf_exchange_port_data_sync - request information about local port
1332 * @adapter: pointer to struct zfcp_adapter
1333 * @data: pointer to struct fsf_qtcb_bottom_port
1334 * Returns: 0 on success, error otherwise
1335 */
1336 int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1337 struct fsf_qtcb_bottom_port *data)
1338 {
1339 struct qdio_buffer_element *sbale;
1340 struct zfcp_fsf_req *req = NULL;
1341 int retval = -EIO;
1342
1343 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1344 return -EOPNOTSUPP;
1345
1346 spin_lock_bh(&adapter->req_q_lock);
1347 if (!zfcp_fsf_sbal_available(adapter))
1348 goto out;
1349
1350 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
1351 NULL);
1352 if (IS_ERR(req)) {
1353 retval = PTR_ERR(req);
1354 goto out;
1355 }
1356
1357 if (data)
1358 req->data = data;
1359
1360 sbale = zfcp_qdio_sbale_req(req);
1361 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1362 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1363
1364 req->handler = zfcp_fsf_exchange_port_data_handler;
1365 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1366 retval = zfcp_fsf_req_send(req);
1367 out:
1368 spin_unlock_bh(&adapter->req_q_lock);
1369 if (!retval)
1370 wait_event(req->completion_wq,
1371 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
1372 zfcp_fsf_req_free(req);
1373
1374 return retval;
1375 }
1376
1377 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1378 {
1379 struct zfcp_port *port = req->data;
1380 struct fsf_qtcb_header *header = &req->qtcb->header;
1381 struct fsf_plogi *plogi;
1382
1383 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1384 return;
1385
1386 switch (header->fsf_status) {
1387 case FSF_PORT_ALREADY_OPEN:
1388 break;
1389 case FSF_ACCESS_DENIED:
1390 zfcp_fsf_access_denied_port(req, port);
1391 break;
1392 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1393 dev_warn(&req->adapter->ccw_device->dev,
1394 "Not enough FCP adapter resources to open "
1395 "remote port 0x%016Lx\n",
1396 (unsigned long long)port->wwpn);
1397 zfcp_erp_port_failed(port, 31, req);
1398 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1399 break;
1400 case FSF_ADAPTER_STATUS_AVAILABLE:
1401 switch (header->fsf_status_qual.word[0]) {
1402 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1403 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1404 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1405 break;
1406 case FSF_SQ_NO_RETRY_POSSIBLE:
1407 dev_warn(&req->adapter->ccw_device->dev,
1408 "Remote port 0x%016Lx could not be opened\n",
1409 (unsigned long long)port->wwpn);
1410 zfcp_erp_port_failed(port, 32, req);
1411 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1412 break;
1413 }
1414 break;
1415 case FSF_GOOD:
1416 port->handle = header->port_handle;
1417 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1418 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1419 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1420 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1421 &port->status);
1422 /* check whether D_ID has changed during open */
1423 /*
1424 * FIXME: This check is not airtight, as the FCP channel does
1425 * not monitor closures of target port connections caused on
1426 * the remote side. Thus, they might miss out on invalidating
1427 * locally cached WWPNs (and other N_Port parameters) of gone
1428 * target ports. So, our heroic attempt to make things safe
1429 * could be undermined by 'open port' response data tagged with
1430 * obsolete WWPNs. Another reason to monitor potential
1431 * connection closures ourself at least (by interpreting
1432 * incoming ELS' and unsolicited status). It just crosses my
1433 * mind that one should be able to cross-check by means of
1434 * another GID_PN straight after a port has been opened.
1435 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1436 */
1437 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1438 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
1439 if (plogi->serv_param.wwpn != port->wwpn)
1440 atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID,
1441 &port->status);
1442 else {
1443 port->wwnn = plogi->serv_param.wwnn;
1444 zfcp_fc_plogi_evaluate(port, plogi);
1445 }
1446 }
1447 break;
1448 case FSF_UNKNOWN_OP_SUBTYPE:
1449 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1450 break;
1451 }
1452 }
1453
1454 /**
1455 * zfcp_fsf_open_port - create and send open port request
1456 * @erp_action: pointer to struct zfcp_erp_action
1457 * Returns: 0 on success, error otherwise
1458 */
1459 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1460 {
1461 struct qdio_buffer_element *sbale;
1462 struct zfcp_adapter *adapter = erp_action->adapter;
1463 struct zfcp_fsf_req *req;
1464 int retval = -EIO;
1465
1466 spin_lock_bh(&adapter->req_q_lock);
1467 if (zfcp_fsf_req_sbal_get(adapter))
1468 goto out;
1469
1470 req = zfcp_fsf_req_create(adapter,
1471 FSF_QTCB_OPEN_PORT_WITH_DID,
1472 ZFCP_REQ_AUTO_CLEANUP,
1473 adapter->pool.fsf_req_erp);
1474 if (IS_ERR(req)) {
1475 retval = PTR_ERR(req);
1476 goto out;
1477 }
1478
1479 sbale = zfcp_qdio_sbale_req(req);
1480 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1481 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1482
1483 req->handler = zfcp_fsf_open_port_handler;
1484 req->qtcb->bottom.support.d_id = erp_action->port->d_id;
1485 req->data = erp_action->port;
1486 req->erp_action = erp_action;
1487 erp_action->fsf_req = req;
1488
1489 zfcp_fsf_start_erp_timer(req);
1490 retval = zfcp_fsf_req_send(req);
1491 if (retval) {
1492 zfcp_fsf_req_free(req);
1493 erp_action->fsf_req = NULL;
1494 }
1495 out:
1496 spin_unlock_bh(&adapter->req_q_lock);
1497 return retval;
1498 }
1499
1500 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1501 {
1502 struct zfcp_port *port = req->data;
1503
1504 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1505 return;
1506
1507 switch (req->qtcb->header.fsf_status) {
1508 case FSF_PORT_HANDLE_NOT_VALID:
1509 zfcp_erp_adapter_reopen(port->adapter, 0, 107, req);
1510 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1511 break;
1512 case FSF_ADAPTER_STATUS_AVAILABLE:
1513 break;
1514 case FSF_GOOD:
1515 zfcp_erp_modify_port_status(port, 33, req,
1516 ZFCP_STATUS_COMMON_OPEN,
1517 ZFCP_CLEAR);
1518 break;
1519 }
1520 }
1521
1522 /**
1523 * zfcp_fsf_close_port - create and send close port request
1524 * @erp_action: pointer to struct zfcp_erp_action
1525 * Returns: 0 on success, error otherwise
1526 */
1527 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1528 {
1529 struct qdio_buffer_element *sbale;
1530 struct zfcp_adapter *adapter = erp_action->adapter;
1531 struct zfcp_fsf_req *req;
1532 int retval = -EIO;
1533
1534 spin_lock_bh(&adapter->req_q_lock);
1535 if (zfcp_fsf_req_sbal_get(adapter))
1536 goto out;
1537
1538 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1539 ZFCP_REQ_AUTO_CLEANUP,
1540 adapter->pool.fsf_req_erp);
1541 if (IS_ERR(req)) {
1542 retval = PTR_ERR(req);
1543 goto out;
1544 }
1545
1546 sbale = zfcp_qdio_sbale_req(req);
1547 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1548 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1549
1550 req->handler = zfcp_fsf_close_port_handler;
1551 req->data = erp_action->port;
1552 req->erp_action = erp_action;
1553 req->qtcb->header.port_handle = erp_action->port->handle;
1554 erp_action->fsf_req = req;
1555
1556 zfcp_fsf_start_erp_timer(req);
1557 retval = zfcp_fsf_req_send(req);
1558 if (retval) {
1559 zfcp_fsf_req_free(req);
1560 erp_action->fsf_req = NULL;
1561 }
1562 out:
1563 spin_unlock_bh(&adapter->req_q_lock);
1564 return retval;
1565 }
1566
1567 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1568 {
1569 struct zfcp_wka_port *wka_port = req->data;
1570 struct fsf_qtcb_header *header = &req->qtcb->header;
1571
1572 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1573 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1574 goto out;
1575 }
1576
1577 switch (header->fsf_status) {
1578 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1579 dev_warn(&req->adapter->ccw_device->dev,
1580 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1581 case FSF_ADAPTER_STATUS_AVAILABLE:
1582 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1583 case FSF_ACCESS_DENIED:
1584 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1585 break;
1586 case FSF_PORT_ALREADY_OPEN:
1587 case FSF_GOOD:
1588 wka_port->handle = header->port_handle;
1589 wka_port->status = ZFCP_WKA_PORT_ONLINE;
1590 }
1591 out:
1592 wake_up(&wka_port->completion_wq);
1593 }
1594
1595 /**
1596 * zfcp_fsf_open_wka_port - create and send open wka-port request
1597 * @wka_port: pointer to struct zfcp_wka_port
1598 * Returns: 0 on success, error otherwise
1599 */
1600 int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1601 {
1602 struct qdio_buffer_element *sbale;
1603 struct zfcp_adapter *adapter = wka_port->adapter;
1604 struct zfcp_fsf_req *req;
1605 int retval = -EIO;
1606
1607 spin_lock_bh(&adapter->req_q_lock);
1608 if (zfcp_fsf_req_sbal_get(adapter))
1609 goto out;
1610
1611 req = zfcp_fsf_req_create(adapter,
1612 FSF_QTCB_OPEN_PORT_WITH_DID,
1613 ZFCP_REQ_AUTO_CLEANUP,
1614 adapter->pool.fsf_req_erp);
1615 if (unlikely(IS_ERR(req))) {
1616 retval = PTR_ERR(req);
1617 goto out;
1618 }
1619
1620 sbale = zfcp_qdio_sbale_req(req);
1621 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1622 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1623
1624 req->handler = zfcp_fsf_open_wka_port_handler;
1625 req->qtcb->bottom.support.d_id = wka_port->d_id;
1626 req->data = wka_port;
1627
1628 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1629 retval = zfcp_fsf_req_send(req);
1630 if (retval)
1631 zfcp_fsf_req_free(req);
1632 out:
1633 spin_unlock_bh(&adapter->req_q_lock);
1634 return retval;
1635 }
1636
1637 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1638 {
1639 struct zfcp_wka_port *wka_port = req->data;
1640
1641 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1642 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1643 zfcp_erp_adapter_reopen(wka_port->adapter, 0, 84, req);
1644 }
1645
1646 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1647 wake_up(&wka_port->completion_wq);
1648 }
1649
1650 /**
1651 * zfcp_fsf_close_wka_port - create and send close wka port request
1652 * @erp_action: pointer to struct zfcp_erp_action
1653 * Returns: 0 on success, error otherwise
1654 */
1655 int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1656 {
1657 struct qdio_buffer_element *sbale;
1658 struct zfcp_adapter *adapter = wka_port->adapter;
1659 struct zfcp_fsf_req *req;
1660 int retval = -EIO;
1661
1662 spin_lock_bh(&adapter->req_q_lock);
1663 if (zfcp_fsf_req_sbal_get(adapter))
1664 goto out;
1665
1666 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1667 ZFCP_REQ_AUTO_CLEANUP,
1668 adapter->pool.fsf_req_erp);
1669 if (unlikely(IS_ERR(req))) {
1670 retval = PTR_ERR(req);
1671 goto out;
1672 }
1673
1674 sbale = zfcp_qdio_sbale_req(req);
1675 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1676 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1677
1678 req->handler = zfcp_fsf_close_wka_port_handler;
1679 req->data = wka_port;
1680 req->qtcb->header.port_handle = wka_port->handle;
1681
1682 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1683 retval = zfcp_fsf_req_send(req);
1684 if (retval)
1685 zfcp_fsf_req_free(req);
1686 out:
1687 spin_unlock_bh(&adapter->req_q_lock);
1688 return retval;
1689 }
1690
1691 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1692 {
1693 struct zfcp_port *port = req->data;
1694 struct fsf_qtcb_header *header = &req->qtcb->header;
1695 struct zfcp_unit *unit;
1696
1697 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1698 goto skip_fsfstatus;
1699
1700 switch (header->fsf_status) {
1701 case FSF_PORT_HANDLE_NOT_VALID:
1702 zfcp_erp_adapter_reopen(port->adapter, 0, 108, req);
1703 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1704 break;
1705 case FSF_ACCESS_DENIED:
1706 zfcp_fsf_access_denied_port(req, port);
1707 break;
1708 case FSF_PORT_BOXED:
1709 zfcp_erp_port_boxed(port, 50, req);
1710 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1711 ZFCP_STATUS_FSFREQ_RETRY;
1712 /* can't use generic zfcp_erp_modify_port_status because
1713 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1714 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1715 list_for_each_entry(unit, &port->unit_list_head, list)
1716 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1717 &unit->status);
1718 break;
1719 case FSF_ADAPTER_STATUS_AVAILABLE:
1720 switch (header->fsf_status_qual.word[0]) {
1721 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1722 /* fall through */
1723 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1724 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1725 break;
1726 }
1727 break;
1728 case FSF_GOOD:
1729 /* can't use generic zfcp_erp_modify_port_status because
1730 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1731 */
1732 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1733 list_for_each_entry(unit, &port->unit_list_head, list)
1734 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1735 &unit->status);
1736 break;
1737 }
1738 skip_fsfstatus:
1739 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
1740 }
1741
1742 /**
1743 * zfcp_fsf_close_physical_port - close physical port
1744 * @erp_action: pointer to struct zfcp_erp_action
1745 * Returns: 0 on success
1746 */
1747 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1748 {
1749 struct qdio_buffer_element *sbale;
1750 struct zfcp_adapter *adapter = erp_action->adapter;
1751 struct zfcp_fsf_req *req;
1752 int retval = -EIO;
1753
1754 spin_lock_bh(&adapter->req_q_lock);
1755 if (zfcp_fsf_req_sbal_get(adapter))
1756 goto out;
1757
1758 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1759 ZFCP_REQ_AUTO_CLEANUP,
1760 adapter->pool.fsf_req_erp);
1761 if (IS_ERR(req)) {
1762 retval = PTR_ERR(req);
1763 goto out;
1764 }
1765
1766 sbale = zfcp_qdio_sbale_req(req);
1767 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1768 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1769
1770 req->data = erp_action->port;
1771 req->qtcb->header.port_handle = erp_action->port->handle;
1772 req->erp_action = erp_action;
1773 req->handler = zfcp_fsf_close_physical_port_handler;
1774 erp_action->fsf_req = req;
1775 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
1776 &erp_action->port->status);
1777
1778 zfcp_fsf_start_erp_timer(req);
1779 retval = zfcp_fsf_req_send(req);
1780 if (retval) {
1781 zfcp_fsf_req_free(req);
1782 erp_action->fsf_req = NULL;
1783 }
1784 out:
1785 spin_unlock_bh(&adapter->req_q_lock);
1786 return retval;
1787 }
1788
1789 static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1790 {
1791 struct zfcp_adapter *adapter = req->adapter;
1792 struct zfcp_unit *unit = req->data;
1793 struct fsf_qtcb_header *header = &req->qtcb->header;
1794 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1795 struct fsf_queue_designator *queue_designator =
1796 &header->fsf_status_qual.fsf_queue_designator;
1797 int exclusive, readwrite;
1798
1799 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1800 return;
1801
1802 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1803 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1804 ZFCP_STATUS_UNIT_SHARED |
1805 ZFCP_STATUS_UNIT_READONLY,
1806 &unit->status);
1807
1808 switch (header->fsf_status) {
1809
1810 case FSF_PORT_HANDLE_NOT_VALID:
1811 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, req);
1812 /* fall through */
1813 case FSF_LUN_ALREADY_OPEN:
1814 break;
1815 case FSF_ACCESS_DENIED:
1816 zfcp_fsf_access_denied_unit(req, unit);
1817 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1818 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1819 break;
1820 case FSF_PORT_BOXED:
1821 zfcp_erp_port_boxed(unit->port, 51, req);
1822 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1823 ZFCP_STATUS_FSFREQ_RETRY;
1824 break;
1825 case FSF_LUN_SHARING_VIOLATION:
1826 if (header->fsf_status_qual.word[0])
1827 dev_warn(&adapter->ccw_device->dev,
1828 "LUN 0x%Lx on port 0x%Lx is already in "
1829 "use by CSS%d, MIF Image ID %x\n",
1830 (unsigned long long)unit->fcp_lun,
1831 (unsigned long long)unit->port->wwpn,
1832 queue_designator->cssid,
1833 queue_designator->hla);
1834 else
1835 zfcp_act_eval_err(adapter,
1836 header->fsf_status_qual.word[2]);
1837 zfcp_erp_unit_access_denied(unit, 60, req);
1838 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1839 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1840 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1841 break;
1842 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1843 dev_warn(&adapter->ccw_device->dev,
1844 "No handle is available for LUN "
1845 "0x%016Lx on port 0x%016Lx\n",
1846 (unsigned long long)unit->fcp_lun,
1847 (unsigned long long)unit->port->wwpn);
1848 zfcp_erp_unit_failed(unit, 34, req);
1849 /* fall through */
1850 case FSF_INVALID_COMMAND_OPTION:
1851 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1852 break;
1853 case FSF_ADAPTER_STATUS_AVAILABLE:
1854 switch (header->fsf_status_qual.word[0]) {
1855 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1856 zfcp_test_link(unit->port);
1857 /* fall through */
1858 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1859 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1860 break;
1861 }
1862 break;
1863
1864 case FSF_GOOD:
1865 unit->handle = header->lun_handle;
1866 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1867
1868 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1869 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1870 (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
1871 exclusive = (bottom->lun_access_info &
1872 FSF_UNIT_ACCESS_EXCLUSIVE);
1873 readwrite = (bottom->lun_access_info &
1874 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1875
1876 if (!exclusive)
1877 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1878 &unit->status);
1879
1880 if (!readwrite) {
1881 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1882 &unit->status);
1883 dev_info(&adapter->ccw_device->dev,
1884 "SCSI device at LUN 0x%016Lx on port "
1885 "0x%016Lx opened read-only\n",
1886 (unsigned long long)unit->fcp_lun,
1887 (unsigned long long)unit->port->wwpn);
1888 }
1889
1890 if (exclusive && !readwrite) {
1891 dev_err(&adapter->ccw_device->dev,
1892 "Exclusive read-only access not "
1893 "supported (unit 0x%016Lx, "
1894 "port 0x%016Lx)\n",
1895 (unsigned long long)unit->fcp_lun,
1896 (unsigned long long)unit->port->wwpn);
1897 zfcp_erp_unit_failed(unit, 35, req);
1898 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1899 zfcp_erp_unit_shutdown(unit, 0, 80, req);
1900 } else if (!exclusive && readwrite) {
1901 dev_err(&adapter->ccw_device->dev,
1902 "Shared read-write access not "
1903 "supported (unit 0x%016Lx, port "
1904 "0x%016Lx\n)",
1905 (unsigned long long)unit->fcp_lun,
1906 (unsigned long long)unit->port->wwpn);
1907 zfcp_erp_unit_failed(unit, 36, req);
1908 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1909 zfcp_erp_unit_shutdown(unit, 0, 81, req);
1910 }
1911 }
1912 break;
1913 }
1914 }
1915
1916 /**
1917 * zfcp_fsf_open_unit - open unit
1918 * @erp_action: pointer to struct zfcp_erp_action
1919 * Returns: 0 on success, error otherwise
1920 */
1921 int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1922 {
1923 struct qdio_buffer_element *sbale;
1924 struct zfcp_adapter *adapter = erp_action->adapter;
1925 struct zfcp_fsf_req *req;
1926 int retval = -EIO;
1927
1928 spin_lock_bh(&adapter->req_q_lock);
1929 if (zfcp_fsf_req_sbal_get(adapter))
1930 goto out;
1931
1932 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN,
1933 ZFCP_REQ_AUTO_CLEANUP,
1934 adapter->pool.fsf_req_erp);
1935 if (IS_ERR(req)) {
1936 retval = PTR_ERR(req);
1937 goto out;
1938 }
1939
1940 sbale = zfcp_qdio_sbale_req(req);
1941 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1942 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1943
1944 req->qtcb->header.port_handle = erp_action->port->handle;
1945 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
1946 req->handler = zfcp_fsf_open_unit_handler;
1947 req->data = erp_action->unit;
1948 req->erp_action = erp_action;
1949 erp_action->fsf_req = req;
1950
1951 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1952 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1953
1954 zfcp_fsf_start_erp_timer(req);
1955 retval = zfcp_fsf_req_send(req);
1956 if (retval) {
1957 zfcp_fsf_req_free(req);
1958 erp_action->fsf_req = NULL;
1959 }
1960 out:
1961 spin_unlock_bh(&adapter->req_q_lock);
1962 return retval;
1963 }
1964
1965 static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1966 {
1967 struct zfcp_unit *unit = req->data;
1968
1969 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1970 return;
1971
1972 switch (req->qtcb->header.fsf_status) {
1973 case FSF_PORT_HANDLE_NOT_VALID:
1974 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, req);
1975 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1976 break;
1977 case FSF_LUN_HANDLE_NOT_VALID:
1978 zfcp_erp_port_reopen(unit->port, 0, 111, req);
1979 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1980 break;
1981 case FSF_PORT_BOXED:
1982 zfcp_erp_port_boxed(unit->port, 52, req);
1983 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1984 ZFCP_STATUS_FSFREQ_RETRY;
1985 break;
1986 case FSF_ADAPTER_STATUS_AVAILABLE:
1987 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1988 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1989 zfcp_test_link(unit->port);
1990 /* fall through */
1991 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1992 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1993 break;
1994 }
1995 break;
1996 case FSF_GOOD:
1997 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1998 break;
1999 }
2000 }
2001
2002 /**
2003 * zfcp_fsf_close_unit - close zfcp unit
2004 * @erp_action: pointer to struct zfcp_unit
2005 * Returns: 0 on success, error otherwise
2006 */
2007 int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2008 {
2009 struct qdio_buffer_element *sbale;
2010 struct zfcp_adapter *adapter = erp_action->adapter;
2011 struct zfcp_fsf_req *req;
2012 int retval = -EIO;
2013
2014 spin_lock_bh(&adapter->req_q_lock);
2015 if (zfcp_fsf_req_sbal_get(adapter))
2016 goto out;
2017 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
2018 ZFCP_REQ_AUTO_CLEANUP,
2019 adapter->pool.fsf_req_erp);
2020 if (IS_ERR(req)) {
2021 retval = PTR_ERR(req);
2022 goto out;
2023 }
2024
2025 sbale = zfcp_qdio_sbale_req(req);
2026 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2027 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2028
2029 req->qtcb->header.port_handle = erp_action->port->handle;
2030 req->qtcb->header.lun_handle = erp_action->unit->handle;
2031 req->handler = zfcp_fsf_close_unit_handler;
2032 req->data = erp_action->unit;
2033 req->erp_action = erp_action;
2034 erp_action->fsf_req = req;
2035
2036 zfcp_fsf_start_erp_timer(req);
2037 retval = zfcp_fsf_req_send(req);
2038 if (retval) {
2039 zfcp_fsf_req_free(req);
2040 erp_action->fsf_req = NULL;
2041 }
2042 out:
2043 spin_unlock_bh(&adapter->req_q_lock);
2044 return retval;
2045 }
2046
2047 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2048 {
2049 lat_rec->sum += lat;
2050 lat_rec->min = min(lat_rec->min, lat);
2051 lat_rec->max = max(lat_rec->max, lat);
2052 }
2053
2054 static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
2055 {
2056 struct fsf_qual_latency_info *lat_inf;
2057 struct latency_cont *lat;
2058 struct zfcp_unit *unit = req->unit;
2059 unsigned long flags;
2060
2061 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
2062
2063 switch (req->qtcb->bottom.io.data_direction) {
2064 case FSF_DATADIR_READ:
2065 lat = &unit->latencies.read;
2066 break;
2067 case FSF_DATADIR_WRITE:
2068 lat = &unit->latencies.write;
2069 break;
2070 case FSF_DATADIR_CMND:
2071 lat = &unit->latencies.cmd;
2072 break;
2073 default:
2074 return;
2075 }
2076
2077 spin_lock_irqsave(&unit->latencies.lock, flags);
2078 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
2079 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
2080 lat->counter++;
2081 spin_unlock_irqrestore(&unit->latencies.lock, flags);
2082 }
2083
2084 #ifdef CONFIG_BLK_DEV_IO_TRACE
2085 static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2086 {
2087 struct fsf_qual_latency_info *lat_inf;
2088 struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
2089 struct request *req = scsi_cmnd->request;
2090 struct zfcp_blk_drv_data trace;
2091 int ticks = fsf_req->adapter->timer_ticks;
2092
2093 trace.flags = 0;
2094 trace.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2095 if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
2096 trace.flags |= ZFCP_BLK_LAT_VALID;
2097 lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info;
2098 trace.channel_lat = lat_inf->channel_lat * ticks;
2099 trace.fabric_lat = lat_inf->fabric_lat * ticks;
2100 }
2101 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2102 trace.flags |= ZFCP_BLK_REQ_ERROR;
2103 trace.inb_usage = fsf_req->qdio_inb_usage;
2104 trace.outb_usage = fsf_req->qdio_outb_usage;
2105
2106 blk_add_driver_data(req->q, req, &trace, sizeof(trace));
2107 }
2108 #else
2109 static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2110 {
2111 }
2112 #endif
2113
2114 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2115 {
2116 struct scsi_cmnd *scpnt = req->data;
2117 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2118 &(req->qtcb->bottom.io.fcp_rsp);
2119 u32 sns_len;
2120 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2121 unsigned long flags;
2122
2123 if (unlikely(!scpnt))
2124 return;
2125
2126 read_lock_irqsave(&req->adapter->abort_lock, flags);
2127
2128 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2129 set_host_byte(scpnt, DID_SOFT_ERROR);
2130 set_driver_byte(scpnt, SUGGEST_RETRY);
2131 goto skip_fsfstatus;
2132 }
2133
2134 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2135 set_host_byte(scpnt, DID_ERROR);
2136 goto skip_fsfstatus;
2137 }
2138
2139 set_msg_byte(scpnt, COMMAND_COMPLETE);
2140
2141 scpnt->result |= fcp_rsp_iu->scsi_status;
2142
2143 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2144 zfcp_fsf_req_latency(req);
2145
2146 zfcp_fsf_trace_latency(req);
2147
2148 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
2149 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
2150 set_host_byte(scpnt, DID_OK);
2151 else {
2152 set_host_byte(scpnt, DID_ERROR);
2153 goto skip_fsfstatus;
2154 }
2155 }
2156
2157 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
2158 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
2159 fcp_rsp_iu->fcp_rsp_len;
2160 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
2161 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
2162
2163 memcpy(scpnt->sense_buffer,
2164 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
2165 }
2166
2167 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
2168 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
2169 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
2170 scpnt->underflow)
2171 set_host_byte(scpnt, DID_ERROR);
2172 }
2173 skip_fsfstatus:
2174 if (scpnt->result != 0)
2175 zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req);
2176 else if (scpnt->retries > 0)
2177 zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req);
2178 else
2179 zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req);
2180
2181 scpnt->host_scribble = NULL;
2182 (scpnt->scsi_done) (scpnt);
2183 /*
2184 * We must hold this lock until scsi_done has been called.
2185 * Otherwise we may call scsi_done after abort regarding this
2186 * command has completed.
2187 * Note: scsi_done must not block!
2188 */
2189 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2190 }
2191
2192 static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2193 {
2194 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2195 &(req->qtcb->bottom.io.fcp_rsp);
2196 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2197
2198 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
2199 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2200 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2201 }
2202
2203
2204 static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2205 {
2206 struct zfcp_unit *unit;
2207 struct fsf_qtcb_header *header = &req->qtcb->header;
2208
2209 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2210 unit = req->data;
2211 else
2212 unit = req->unit;
2213
2214 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2215 goto skip_fsfstatus;
2216
2217 switch (header->fsf_status) {
2218 case FSF_HANDLE_MISMATCH:
2219 case FSF_PORT_HANDLE_NOT_VALID:
2220 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, req);
2221 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2222 break;
2223 case FSF_FCPLUN_NOT_VALID:
2224 case FSF_LUN_HANDLE_NOT_VALID:
2225 zfcp_erp_port_reopen(unit->port, 0, 113, req);
2226 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2227 break;
2228 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2229 zfcp_fsf_class_not_supp(req);
2230 break;
2231 case FSF_ACCESS_DENIED:
2232 zfcp_fsf_access_denied_unit(req, unit);
2233 break;
2234 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2235 dev_err(&req->adapter->ccw_device->dev,
2236 "Incorrect direction %d, unit 0x%016Lx on port "
2237 "0x%016Lx closed\n",
2238 req->qtcb->bottom.io.data_direction,
2239 (unsigned long long)unit->fcp_lun,
2240 (unsigned long long)unit->port->wwpn);
2241 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
2242 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2243 break;
2244 case FSF_CMND_LENGTH_NOT_VALID:
2245 dev_err(&req->adapter->ccw_device->dev,
2246 "Incorrect CDB length %d, unit 0x%016Lx on "
2247 "port 0x%016Lx closed\n",
2248 req->qtcb->bottom.io.fcp_cmnd_length,
2249 (unsigned long long)unit->fcp_lun,
2250 (unsigned long long)unit->port->wwpn);
2251 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
2252 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2253 break;
2254 case FSF_PORT_BOXED:
2255 zfcp_erp_port_boxed(unit->port, 53, req);
2256 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2257 ZFCP_STATUS_FSFREQ_RETRY;
2258 break;
2259 case FSF_LUN_BOXED:
2260 zfcp_erp_unit_boxed(unit, 54, req);
2261 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2262 ZFCP_STATUS_FSFREQ_RETRY;
2263 break;
2264 case FSF_ADAPTER_STATUS_AVAILABLE:
2265 if (header->fsf_status_qual.word[0] ==
2266 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2267 zfcp_test_link(unit->port);
2268 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2269 break;
2270 }
2271 skip_fsfstatus:
2272 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2273 zfcp_fsf_send_fcp_ctm_handler(req);
2274 else {
2275 zfcp_fsf_send_fcp_command_task_handler(req);
2276 req->unit = NULL;
2277 zfcp_unit_put(unit);
2278 }
2279 }
2280
2281 static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2282 {
2283 u32 *fcp_dl_ptr;
2284
2285 /*
2286 * fcp_dl_addr = start address of fcp_cmnd structure +
2287 * size of fixed part + size of dynamically sized add_dcp_cdb field
2288 * SEE FCP-2 documentation
2289 */
2290 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2291 (fcp_cmd->add_fcp_cdb_length << 2));
2292 *fcp_dl_ptr = fcp_dl;
2293 }
2294
2295 /**
2296 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2297 * @adapter: adapter where scsi command is issued
2298 * @unit: unit where command is sent to
2299 * @scsi_cmnd: scsi command to be sent
2300 * @timer: timer to be started when request is initiated
2301 * @req_flags: flags for fsf_request
2302 */
2303 int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2304 struct zfcp_unit *unit,
2305 struct scsi_cmnd *scsi_cmnd,
2306 int use_timer, int req_flags)
2307 {
2308 struct zfcp_fsf_req *req;
2309 struct fcp_cmnd_iu *fcp_cmnd_iu;
2310 unsigned int sbtype;
2311 int real_bytes, retval = -EIO;
2312
2313 if (unlikely(!(atomic_read(&unit->status) &
2314 ZFCP_STATUS_COMMON_UNBLOCKED)))
2315 return -EBUSY;
2316
2317 spin_lock(&adapter->req_q_lock);
2318 if (!zfcp_fsf_sbal_available(adapter))
2319 goto out;
2320 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2321 adapter->pool.fsf_req_scsi);
2322 if (IS_ERR(req)) {
2323 retval = PTR_ERR(req);
2324 goto out;
2325 }
2326
2327 zfcp_unit_get(unit);
2328 req->unit = unit;
2329 req->data = scsi_cmnd;
2330 req->handler = zfcp_fsf_send_fcp_command_handler;
2331 req->qtcb->header.lun_handle = unit->handle;
2332 req->qtcb->header.port_handle = unit->port->handle;
2333 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2334
2335 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2336
2337 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2338 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2339 /*
2340 * set depending on data direction:
2341 * data direction bits in SBALE (SB Type)
2342 * data direction bits in QTCB
2343 * data direction bits in FCP_CMND IU
2344 */
2345 switch (scsi_cmnd->sc_data_direction) {
2346 case DMA_NONE:
2347 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2348 sbtype = SBAL_FLAGS0_TYPE_READ;
2349 break;
2350 case DMA_FROM_DEVICE:
2351 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2352 sbtype = SBAL_FLAGS0_TYPE_READ;
2353 fcp_cmnd_iu->rddata = 1;
2354 break;
2355 case DMA_TO_DEVICE:
2356 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2357 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2358 fcp_cmnd_iu->wddata = 1;
2359 break;
2360 case DMA_BIDIRECTIONAL:
2361 default:
2362 retval = -EIO;
2363 goto failed_scsi_cmnd;
2364 }
2365
2366 if (likely((scsi_cmnd->device->simple_tags) ||
2367 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
2368 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
2369 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
2370 else
2371 fcp_cmnd_iu->task_attribute = UNTAGGED;
2372
2373 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
2374 fcp_cmnd_iu->add_fcp_cdb_length =
2375 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
2376
2377 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2378
2379 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2380 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2381
2382 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
2383 scsi_sglist(scsi_cmnd),
2384 FSF_MAX_SBALS_PER_REQ);
2385 if (unlikely(real_bytes < 0)) {
2386 if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
2387 retval = -EIO;
2388 else {
2389 dev_err(&adapter->ccw_device->dev,
2390 "Oversize data package, unit 0x%016Lx "
2391 "on port 0x%016Lx closed\n",
2392 (unsigned long long)unit->fcp_lun,
2393 (unsigned long long)unit->port->wwpn);
2394 zfcp_erp_unit_shutdown(unit, 0, 131, req);
2395 retval = -EINVAL;
2396 }
2397 goto failed_scsi_cmnd;
2398 }
2399
2400 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
2401
2402 if (use_timer)
2403 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2404
2405 retval = zfcp_fsf_req_send(req);
2406 if (unlikely(retval))
2407 goto failed_scsi_cmnd;
2408
2409 goto out;
2410
2411 failed_scsi_cmnd:
2412 zfcp_unit_put(unit);
2413 zfcp_fsf_req_free(req);
2414 scsi_cmnd->host_scribble = NULL;
2415 out:
2416 spin_unlock(&adapter->req_q_lock);
2417 return retval;
2418 }
2419
2420 /**
2421 * zfcp_fsf_send_fcp_ctm - send SCSI task management command
2422 * @adapter: pointer to struct zfcp-adapter
2423 * @unit: pointer to struct zfcp_unit
2424 * @tm_flags: unsigned byte for task management flags
2425 * @req_flags: int request flags
2426 * Returns: on success pointer to struct fsf_req, NULL otherwise
2427 */
2428 struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2429 struct zfcp_unit *unit,
2430 u8 tm_flags, int req_flags)
2431 {
2432 struct qdio_buffer_element *sbale;
2433 struct zfcp_fsf_req *req = NULL;
2434 struct fcp_cmnd_iu *fcp_cmnd_iu;
2435
2436 if (unlikely(!(atomic_read(&unit->status) &
2437 ZFCP_STATUS_COMMON_UNBLOCKED)))
2438 return NULL;
2439
2440 spin_lock(&adapter->req_q_lock);
2441 if (!zfcp_fsf_sbal_available(adapter))
2442 goto out;
2443 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2444 adapter->pool.fsf_req_scsi);
2445 if (IS_ERR(req))
2446 goto out;
2447
2448 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2449 req->data = unit;
2450 req->handler = zfcp_fsf_send_fcp_command_handler;
2451 req->qtcb->header.lun_handle = unit->handle;
2452 req->qtcb->header.port_handle = unit->port->handle;
2453 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2454 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2455 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2456 sizeof(u32);
2457
2458 sbale = zfcp_qdio_sbale_req(req);
2459 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2460 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2461
2462 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
2463 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2464 fcp_cmnd_iu->task_management_flags = tm_flags;
2465
2466 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2467 if (!zfcp_fsf_req_send(req))
2468 goto out;
2469
2470 zfcp_fsf_req_free(req);
2471 req = NULL;
2472 out:
2473 spin_unlock(&adapter->req_q_lock);
2474 return req;
2475 }
2476
2477 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2478 {
2479 if (req->qtcb->header.fsf_status != FSF_GOOD)
2480 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2481 }
2482
2483 /**
2484 * zfcp_fsf_control_file - control file upload/download
2485 * @adapter: pointer to struct zfcp_adapter
2486 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2487 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2488 */
2489 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2490 struct zfcp_fsf_cfdc *fsf_cfdc)
2491 {
2492 struct qdio_buffer_element *sbale;
2493 struct zfcp_fsf_req *req = NULL;
2494 struct fsf_qtcb_bottom_support *bottom;
2495 int direction, retval = -EIO, bytes;
2496
2497 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2498 return ERR_PTR(-EOPNOTSUPP);
2499
2500 switch (fsf_cfdc->command) {
2501 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2502 direction = SBAL_FLAGS0_TYPE_WRITE;
2503 break;
2504 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2505 direction = SBAL_FLAGS0_TYPE_READ;
2506 break;
2507 default:
2508 return ERR_PTR(-EINVAL);
2509 }
2510
2511 spin_lock_bh(&adapter->req_q_lock);
2512 if (zfcp_fsf_req_sbal_get(adapter))
2513 goto out;
2514
2515 req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL);
2516 if (IS_ERR(req)) {
2517 retval = -EPERM;
2518 goto out;
2519 }
2520
2521 req->handler = zfcp_fsf_control_file_handler;
2522
2523 sbale = zfcp_qdio_sbale_req(req);
2524 sbale[0].flags |= direction;
2525
2526 bottom = &req->qtcb->bottom.support;
2527 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2528 bottom->option = fsf_cfdc->option;
2529
2530 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg,
2531 FSF_MAX_SBALS_PER_REQ);
2532 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2533 retval = -ENOMEM;
2534 zfcp_fsf_req_free(req);
2535 goto out;
2536 }
2537
2538 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2539 retval = zfcp_fsf_req_send(req);
2540 out:
2541 spin_unlock_bh(&adapter->req_q_lock);
2542
2543 if (!retval) {
2544 wait_event(req->completion_wq,
2545 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2546 return req;
2547 }
2548 return ERR_PTR(retval);
2549 }