]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/qla2xxx/qla_bsg.c
[SCSI] qla2xxx: General checkpatch corrections.
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qla2xxx / qla_bsg.c
CommitLineData
6e98016c
GM
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/delay.h>
12
13/* BSG support for ELS/CT pass through */
14inline srb_t *
15qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16{
17 srb_t *sp;
18 struct qla_hw_data *ha = vha->hw;
19 struct srb_bsg_ctx *ctx;
20
21 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22 if (!sp)
23 goto done;
24 ctx = kzalloc(size, GFP_KERNEL);
25 if (!ctx) {
26 mempool_free(sp, ha->srb_mempool);
27 sp = NULL;
28 goto done;
29 }
30
31 memset(sp, 0, sizeof(*sp));
32 sp->fcport = fcport;
33 sp->ctx = ctx;
34done:
35 return sp;
36}
37
09ff701a
SR
38int
39qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
40{
41 int i, ret, num_valid;
42 uint8_t *bcode;
43 struct qla_fcp_prio_entry *pri_entry;
44
45 ret = 1;
46 num_valid = 0;
47 bcode = (uint8_t *)pri_cfg;
48
49 if (bcode[0x0] != 'H' || bcode[0x1] != 'Q' || bcode[0x2] != 'O' ||
50 bcode[0x3] != 'S') {
51 return 0;
52 }
53 if (flag != 1)
54 return ret;
55
56 pri_entry = &pri_cfg->entry[0];
57 for (i = 0; i < pri_cfg->num_entries; i++) {
58 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
59 num_valid++;
60 pri_entry++;
61 }
62
63 if (num_valid == 0)
64 ret = 0;
65
66 return ret;
67}
68
69static int
70qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
71{
72 struct Scsi_Host *host = bsg_job->shost;
73 scsi_qla_host_t *vha = shost_priv(host);
74 struct qla_hw_data *ha = vha->hw;
75 int ret = 0;
76 uint32_t len;
77 uint32_t oper;
78
79 bsg_job->reply->reply_payload_rcv_len = 0;
80
81 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
82 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
83 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
84 ret = -EBUSY;
85 goto exit_fcp_prio_cfg;
86 }
87
88 /* Get the sub command */
89 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
90
91 /* Only set config is allowed if config memory is not allocated */
92 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
93 ret = -EINVAL;
94 goto exit_fcp_prio_cfg;
95 }
96 switch (oper) {
97 case QLFC_FCP_PRIO_DISABLE:
98 if (ha->flags.fcp_prio_enabled) {
99 ha->flags.fcp_prio_enabled = 0;
100 ha->fcp_prio_cfg->attributes &=
101 ~FCP_PRIO_ATTR_ENABLE;
102 qla24xx_update_all_fcp_prio(vha);
103 bsg_job->reply->result = DID_OK;
104 } else {
105 ret = -EINVAL;
106 bsg_job->reply->result = (DID_ERROR << 16);
107 goto exit_fcp_prio_cfg;
108 }
109 break;
110
111 case QLFC_FCP_PRIO_ENABLE:
112 if (!ha->flags.fcp_prio_enabled) {
113 if (ha->fcp_prio_cfg) {
114 ha->flags.fcp_prio_enabled = 1;
115 ha->fcp_prio_cfg->attributes |=
116 FCP_PRIO_ATTR_ENABLE;
117 qla24xx_update_all_fcp_prio(vha);
118 bsg_job->reply->result = DID_OK;
119 } else {
120 ret = -EINVAL;
121 bsg_job->reply->result = (DID_ERROR << 16);
122 goto exit_fcp_prio_cfg;
123 }
124 }
125 break;
126
127 case QLFC_FCP_PRIO_GET_CONFIG:
128 len = bsg_job->reply_payload.payload_len;
129 if (!len || len > FCP_PRIO_CFG_SIZE) {
130 ret = -EINVAL;
131 bsg_job->reply->result = (DID_ERROR << 16);
132 goto exit_fcp_prio_cfg;
133 }
134
135 bsg_job->reply->result = DID_OK;
136 bsg_job->reply->reply_payload_rcv_len =
137 sg_copy_from_buffer(
138 bsg_job->reply_payload.sg_list,
139 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
140 len);
141
142 break;
143
144 case QLFC_FCP_PRIO_SET_CONFIG:
145 len = bsg_job->request_payload.payload_len;
146 if (!len || len > FCP_PRIO_CFG_SIZE) {
147 bsg_job->reply->result = (DID_ERROR << 16);
148 ret = -EINVAL;
149 goto exit_fcp_prio_cfg;
150 }
151
152 if (!ha->fcp_prio_cfg) {
153 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
154 if (!ha->fcp_prio_cfg) {
155 qla_printk(KERN_WARNING, ha,
156 "Unable to allocate memory "
157 "for fcp prio config data (%x).\n",
158 FCP_PRIO_CFG_SIZE);
159 bsg_job->reply->result = (DID_ERROR << 16);
160 ret = -ENOMEM;
161 goto exit_fcp_prio_cfg;
162 }
163 }
164
165 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
166 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
167 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
168 FCP_PRIO_CFG_SIZE);
169
170 /* validate fcp priority data */
171 if (!qla24xx_fcp_prio_cfg_valid(
172 (struct qla_fcp_prio_cfg *)
173 ha->fcp_prio_cfg, 1)) {
174 bsg_job->reply->result = (DID_ERROR << 16);
175 ret = -EINVAL;
176 /* If buffer was invalidatic int
177 * fcp_prio_cfg is of no use
178 */
179 vfree(ha->fcp_prio_cfg);
180 ha->fcp_prio_cfg = NULL;
181 goto exit_fcp_prio_cfg;
182 }
183
184 ha->flags.fcp_prio_enabled = 0;
185 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
186 ha->flags.fcp_prio_enabled = 1;
187 qla24xx_update_all_fcp_prio(vha);
188 bsg_job->reply->result = DID_OK;
189 break;
190 default:
191 ret = -EINVAL;
192 break;
193 }
194exit_fcp_prio_cfg:
195 bsg_job->job_done(bsg_job);
196 return ret;
197}
6e98016c
GM
198static int
199qla2x00_process_els(struct fc_bsg_job *bsg_job)
200{
201 struct fc_rport *rport;
202 fc_port_t *fcport;
203 struct Scsi_Host *host;
204 scsi_qla_host_t *vha;
205 struct qla_hw_data *ha;
206 srb_t *sp;
207 const char *type;
208 int req_sg_cnt, rsp_sg_cnt;
209 int rval = (DRIVER_ERROR << 16);
210 uint16_t nextlid = 0;
211 struct srb_bsg *els;
212
213 /* Multiple SG's are not supported for ELS requests */
214 if (bsg_job->request_payload.sg_cnt > 1 ||
215 bsg_job->reply_payload.sg_cnt > 1) {
216 DEBUG2(printk(KERN_INFO
217 "multiple SG's are not supported for ELS requests"
218 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
219 bsg_job->request_payload.sg_cnt,
220 bsg_job->reply_payload.sg_cnt));
221 rval = -EPERM;
222 goto done;
223 }
224
225 /* ELS request for rport */
226 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
227 rport = bsg_job->rport;
228 fcport = *(fc_port_t **) rport->dd_data;
229 host = rport_to_shost(rport);
230 vha = shost_priv(host);
231 ha = vha->hw;
232 type = "FC_BSG_RPT_ELS";
233
234 /* make sure the rport is logged in,
235 * if not perform fabric login
236 */
237 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
238 DEBUG2(qla_printk(KERN_WARNING, ha,
239 "failed to login port %06X for ELS passthru\n",
240 fcport->d_id.b24));
241 rval = -EIO;
242 goto done;
243 }
244 } else {
245 host = bsg_job->shost;
246 vha = shost_priv(host);
247 ha = vha->hw;
248 type = "FC_BSG_HST_ELS_NOLOGIN";
249
250 /* Allocate a dummy fcport structure, since functions
251 * preparing the IOCB and mailbox command retrieves port
252 * specific information from fcport structure. For Host based
253 * ELS commands there will be no fcport structure allocated
254 */
255 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
256 if (!fcport) {
257 rval = -ENOMEM;
258 goto done;
259 }
260
261 /* Initialize all required fields of fcport */
262 fcport->vha = vha;
263 fcport->vp_idx = vha->vp_idx;
264 fcport->d_id.b.al_pa =
265 bsg_job->request->rqst_data.h_els.port_id[0];
266 fcport->d_id.b.area =
267 bsg_job->request->rqst_data.h_els.port_id[1];
268 fcport->d_id.b.domain =
269 bsg_job->request->rqst_data.h_els.port_id[2];
270 fcport->loop_id =
271 (fcport->d_id.b.al_pa == 0xFD) ?
272 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
273 }
274
275 if (!vha->flags.online) {
276 DEBUG2(qla_printk(KERN_WARNING, ha,
6c452a45 277 "host not online\n"));
6e98016c
GM
278 rval = -EIO;
279 goto done;
280 }
281
282 req_sg_cnt =
283 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
284 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
285 if (!req_sg_cnt) {
286 rval = -ENOMEM;
287 goto done_free_fcport;
288 }
6c452a45
AV
289
290 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
291 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
6e98016c
GM
292 if (!rsp_sg_cnt) {
293 rval = -ENOMEM;
294 goto done_free_fcport;
295 }
296
297 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
6c452a45 298 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
6e98016c
GM
299 DEBUG2(printk(KERN_INFO
300 "dma mapping resulted in different sg counts \
301 [request_sg_cnt: %x dma_request_sg_cnt: %x\
302 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
303 bsg_job->request_payload.sg_cnt, req_sg_cnt,
304 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
305 rval = -EAGAIN;
306 goto done_unmap_sg;
307 }
308
309 /* Alloc SRB structure */
310 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
311 if (!sp) {
312 rval = -ENOMEM;
6c452a45 313 goto done_unmap_sg;
6e98016c
GM
314 }
315
316 els = sp->ctx;
317 els->ctx.type =
318 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
319 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
320 els->bsg_job = bsg_job;
321
322 DEBUG2(qla_printk(KERN_INFO, ha,
323 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
324 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
325 bsg_job->request->rqst_data.h_els.command_code,
326 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
327 fcport->d_id.b.al_pa));
328
329 rval = qla2x00_start_sp(sp);
330 if (rval != QLA_SUCCESS) {
331 kfree(sp->ctx);
332 mempool_free(sp, ha->srb_mempool);
333 rval = -EIO;
334 goto done_unmap_sg;
335 }
336 return rval;
337
338done_unmap_sg:
339 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
340 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
341 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
342 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
343 goto done_free_fcport;
344
345done_free_fcport:
346 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
347 kfree(fcport);
348done:
349 return rval;
350}
351
352static int
353qla2x00_process_ct(struct fc_bsg_job *bsg_job)
354{
355 srb_t *sp;
356 struct Scsi_Host *host = bsg_job->shost;
357 scsi_qla_host_t *vha = shost_priv(host);
358 struct qla_hw_data *ha = vha->hw;
359 int rval = (DRIVER_ERROR << 16);
360 int req_sg_cnt, rsp_sg_cnt;
361 uint16_t loop_id;
362 struct fc_port *fcport;
363 char *type = "FC_BSG_HST_CT";
364 struct srb_bsg *ct;
365
366 /* pass through is supported only for ISP 4Gb or higher */
6c452a45 367 if (!IS_FWI2_CAPABLE(ha)) {
6e98016c 368 DEBUG2(qla_printk(KERN_INFO, ha,
6c452a45
AV
369 "scsi(%ld):Firmware is not capable to support FC "
370 "CT pass thru\n", vha->host_no));
6e98016c
GM
371 rval = -EPERM;
372 goto done;
373 }
374
375 req_sg_cnt =
376 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
377 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
6c452a45 378 if (!req_sg_cnt) {
6e98016c
GM
379 rval = -ENOMEM;
380 goto done;
381 }
382
383 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
384 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
385 if (!rsp_sg_cnt) {
386 rval = -ENOMEM;
387 goto done;
388 }
389
390 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
6c452a45 391 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
6e98016c 392 DEBUG2(qla_printk(KERN_WARNING, ha,
6c452a45
AV
393 "[request_sg_cnt: %x dma_request_sg_cnt: %x\
394 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
395 bsg_job->request_payload.sg_cnt, req_sg_cnt,
396 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
6e98016c 397 rval = -EAGAIN;
6c452a45 398 goto done_unmap_sg;
6e98016c
GM
399 }
400
401 if (!vha->flags.online) {
402 DEBUG2(qla_printk(KERN_WARNING, ha,
403 "host not online\n"));
404 rval = -EIO;
405 goto done_unmap_sg;
406 }
407
408 loop_id =
409 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
410 >> 24;
411 switch (loop_id) {
6c452a45
AV
412 case 0xFC:
413 loop_id = cpu_to_le16(NPH_SNS);
414 break;
415 case 0xFA:
416 loop_id = vha->mgmt_svr_loop_id;
417 break;
418 default:
419 DEBUG2(qla_printk(KERN_INFO, ha,
420 "Unknown loop id: %x\n", loop_id));
421 rval = -EINVAL;
422 goto done_unmap_sg;
6e98016c
GM
423 }
424
425 /* Allocate a dummy fcport structure, since functions preparing the
426 * IOCB and mailbox command retrieves port specific information
427 * from fcport structure. For Host based ELS commands there will be
428 * no fcport structure allocated
429 */
430 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6c452a45 431 if (!fcport) {
6e98016c 432 rval = -ENOMEM;
6c452a45 433 goto done_unmap_sg;
6e98016c
GM
434 }
435
436 /* Initialize all required fields of fcport */
437 fcport->vha = vha;
438 fcport->vp_idx = vha->vp_idx;
439 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
440 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
441 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
442 fcport->loop_id = loop_id;
443
444 /* Alloc SRB structure */
445 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
446 if (!sp) {
447 rval = -ENOMEM;
448 goto done_free_fcport;
449 }
450
451 ct = sp->ctx;
452 ct->ctx.type = SRB_CT_CMD;
453 ct->bsg_job = bsg_job;
454
455 DEBUG2(qla_printk(KERN_INFO, ha,
456 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
457 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
458 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
459 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
460 fcport->d_id.b.al_pa));
461
462 rval = qla2x00_start_sp(sp);
463 if (rval != QLA_SUCCESS) {
464 kfree(sp->ctx);
465 mempool_free(sp, ha->srb_mempool);
466 rval = -EIO;
467 goto done_free_fcport;
468 }
469 return rval;
470
471done_free_fcport:
472 kfree(fcport);
473done_unmap_sg:
474 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
475 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
476 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
477 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
478done:
479 return rval;
480}
481
482static int
483qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
484{
485 struct Scsi_Host *host = bsg_job->shost;
486 scsi_qla_host_t *vha = shost_priv(host);
487 struct qla_hw_data *ha = vha->hw;
488 int rval;
489 uint8_t command_sent;
490 char *type;
491 struct msg_echo_lb elreq;
492 uint16_t response[MAILBOX_REGISTER_COUNT];
6c452a45 493 uint8_t *fw_sts_ptr;
6e98016c
GM
494 uint8_t *req_data = NULL;
495 dma_addr_t req_data_dma;
496 uint32_t req_data_len;
497 uint8_t *rsp_data = NULL;
498 dma_addr_t rsp_data_dma;
499 uint32_t rsp_data_len;
500
501 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
502 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
503 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
504 return -EBUSY;
505
506 if (!vha->flags.online) {
507 DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
508 return -EIO;
509 }
510
511 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
512 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
513 DMA_TO_DEVICE);
514
515 if (!elreq.req_sg_cnt)
516 return -ENOMEM;
517
518 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
519 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
520 DMA_FROM_DEVICE);
521
522 if (!elreq.rsp_sg_cnt) {
523 rval = -ENOMEM;
524 goto done_unmap_req_sg;
6c452a45 525 }
6e98016c
GM
526
527 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
528 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
529 DEBUG2(printk(KERN_INFO
530 "dma mapping resulted in different sg counts "
531 "[request_sg_cnt: %x dma_request_sg_cnt: %x "
532 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
533 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
534 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
535 rval = -EAGAIN;
536 goto done_unmap_sg;
537 }
538 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
539 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
540 &req_data_dma, GFP_KERNEL);
541 if (!req_data) {
542 DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
543 "failed for host=%lu\n", __func__, vha->host_no));
544 rval = -ENOMEM;
545 goto done_unmap_sg;
546 }
547
548 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
549 &rsp_data_dma, GFP_KERNEL);
550 if (!rsp_data) {
551 DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
552 "failed for host=%lu\n", __func__, vha->host_no));
553 rval = -ENOMEM;
554 goto done_free_dma_req;
555 }
556
557 /* Copy the request buffer in req_data now */
558 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
559 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
560
561 elreq.send_dma = req_data_dma;
562 elreq.rcv_dma = rsp_data_dma;
563 elreq.transfer_size = req_data_len;
564
565 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
566
567 if (ha->current_topology != ISP_CFG_F) {
568 type = "FC_BSG_HST_VENDOR_LOOPBACK";
569 DEBUG2(qla_printk(KERN_INFO, ha,
570 "scsi(%ld) bsg rqst type: %s\n",
571 vha->host_no, type));
572
573 command_sent = INT_DEF_LB_LOOPBACK_CMD;
574 rval = qla2x00_loopback_test(vha, &elreq, response);
575 if (IS_QLA81XX(ha)) {
576 if (response[0] == MBS_COMMAND_ERROR &&
577 response[1] == MBS_LB_RESET) {
578 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
579 "ISP\n", __func__, vha->host_no));
580 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
581 qla2xxx_wake_dpc(vha);
582 }
583 }
584 } else {
585 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
586 DEBUG2(qla_printk(KERN_INFO, ha,
6c452a45 587 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
6e98016c
GM
588 command_sent = INT_DEF_LB_ECHO_CMD;
589 rval = qla2x00_echo_test(vha, &elreq, response);
590 }
591
592 if (rval) {
593 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
6c452a45 594 "request %s failed\n", vha->host_no, type));
6e98016c
GM
595
596 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
6c452a45 597 sizeof(struct fc_bsg_reply);
6e98016c
GM
598
599 memcpy(fw_sts_ptr, response, sizeof(response));
600 fw_sts_ptr += sizeof(response);
6c452a45 601 *fw_sts_ptr = command_sent;
6e98016c
GM
602 rval = 0;
603 bsg_job->reply->reply_payload_rcv_len = 0;
604 bsg_job->reply->result = (DID_ERROR << 16);
605 } else {
606 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
607 "request %s completed\n", vha->host_no, type));
608
609 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
610 sizeof(response) + sizeof(uint8_t);
611 bsg_job->reply->reply_payload_rcv_len =
612 bsg_job->reply_payload.payload_len;
613 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
614 sizeof(struct fc_bsg_reply);
615 memcpy(fw_sts_ptr, response, sizeof(response));
616 fw_sts_ptr += sizeof(response);
617 *fw_sts_ptr = command_sent;
618 bsg_job->reply->result = DID_OK;
619 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
620 bsg_job->reply_payload.sg_cnt, rsp_data,
621 rsp_data_len);
622 }
623 bsg_job->job_done(bsg_job);
624
625 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
626 rsp_data, rsp_data_dma);
627done_free_dma_req:
628 dma_free_coherent(&ha->pdev->dev, req_data_len,
629 req_data, req_data_dma);
630done_unmap_sg:
631 dma_unmap_sg(&ha->pdev->dev,
632 bsg_job->reply_payload.sg_list,
633 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
634done_unmap_req_sg:
635 dma_unmap_sg(&ha->pdev->dev,
636 bsg_job->request_payload.sg_list,
637 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
6c452a45 638 return rval;
6e98016c
GM
639}
640
641static int
642qla84xx_reset(struct fc_bsg_job *bsg_job)
643{
644 struct Scsi_Host *host = bsg_job->shost;
645 scsi_qla_host_t *vha = shost_priv(host);
646 struct qla_hw_data *ha = vha->hw;
647 int rval = 0;
648 uint32_t flag;
649
650 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
651 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
652 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
653 return -EBUSY;
654
655 if (!IS_QLA84XX(ha)) {
656 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
657 "exiting.\n", vha->host_no));
658 return -EINVAL;
659 }
660
661 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
662
663 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
664
665 if (rval) {
666 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
667 "request 84xx reset failed\n", vha->host_no));
668 rval = bsg_job->reply->reply_payload_rcv_len = 0;
669 bsg_job->reply->result = (DID_ERROR << 16);
670
671 } else {
672 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
673 "request 84xx reset completed\n", vha->host_no));
674 bsg_job->reply->result = DID_OK;
675 }
676
677 bsg_job->job_done(bsg_job);
678 return rval;
679}
680
681static int
682qla84xx_updatefw(struct fc_bsg_job *bsg_job)
683{
684 struct Scsi_Host *host = bsg_job->shost;
685 scsi_qla_host_t *vha = shost_priv(host);
686 struct qla_hw_data *ha = vha->hw;
687 struct verify_chip_entry_84xx *mn = NULL;
688 dma_addr_t mn_dma, fw_dma;
689 void *fw_buf = NULL;
690 int rval = 0;
691 uint32_t sg_cnt;
692 uint32_t data_len;
693 uint16_t options;
694 uint32_t flag;
695 uint32_t fw_ver;
696
697 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
698 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
699 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
700 return -EBUSY;
701
702 if (!IS_QLA84XX(ha)) {
703 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
704 "exiting.\n", vha->host_no));
705 return -EINVAL;
706 }
707
708 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
709 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
710 if (!sg_cnt)
711 return -ENOMEM;
712
713 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
714 DEBUG2(printk(KERN_INFO
715 "dma mapping resulted in different sg counts "
716 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
717 bsg_job->request_payload.sg_cnt, sg_cnt));
718 rval = -EAGAIN;
719 goto done_unmap_sg;
720 }
721
722 data_len = bsg_job->request_payload.payload_len;
723 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
724 &fw_dma, GFP_KERNEL);
725 if (!fw_buf) {
726 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
727 "failed for host=%lu\n", __func__, vha->host_no));
728 rval = -ENOMEM;
729 goto done_unmap_sg;
730 }
731
732 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
733 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
734
735 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
736 if (!mn) {
737 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
738 "failed for host=%lu\n", __func__, vha->host_no));
739 rval = -ENOMEM;
740 goto done_free_fw_buf;
741 }
742
743 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
744 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
745
746 memset(mn, 0, sizeof(struct access_chip_84xx));
747 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
748 mn->entry_count = 1;
749
750 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
751 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
752 options |= VCO_DIAG_FW;
753
754 mn->options = cpu_to_le16(options);
755 mn->fw_ver = cpu_to_le32(fw_ver);
756 mn->fw_size = cpu_to_le32(data_len);
757 mn->fw_seq_size = cpu_to_le32(data_len);
758 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
759 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
760 mn->dseg_length = cpu_to_le32(data_len);
761 mn->data_seg_cnt = cpu_to_le16(1);
762
763 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
764
765 if (rval) {
766 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
767 "request 84xx updatefw failed\n", vha->host_no));
768
769 rval = bsg_job->reply->reply_payload_rcv_len = 0;
770 bsg_job->reply->result = (DID_ERROR << 16);
771
772 } else {
773 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
774 "request 84xx updatefw completed\n", vha->host_no));
775
776 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
777 bsg_job->reply->result = DID_OK;
778 }
779
780 bsg_job->job_done(bsg_job);
781 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
782
783done_free_fw_buf:
784 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
785
786done_unmap_sg:
787 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
788 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
789
790 return rval;
791}
792
793static int
794qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
795{
796 struct Scsi_Host *host = bsg_job->shost;
797 scsi_qla_host_t *vha = shost_priv(host);
798 struct qla_hw_data *ha = vha->hw;
799 struct access_chip_84xx *mn = NULL;
800 dma_addr_t mn_dma, mgmt_dma;
801 void *mgmt_b = NULL;
802 int rval = 0;
803 struct qla_bsg_a84_mgmt *ql84_mgmt;
804 uint32_t sg_cnt;
d5459083 805 uint32_t data_len = 0;
6e98016c
GM
806 uint32_t dma_direction = DMA_NONE;
807
808 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
809 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
810 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
811 return -EBUSY;
812
813 if (!IS_QLA84XX(ha)) {
814 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
815 "exiting.\n", vha->host_no));
816 return -EINVAL;
817 }
818
819 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
820 sizeof(struct fc_bsg_request));
821 if (!ql84_mgmt) {
822 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
823 __func__, vha->host_no));
824 return -EINVAL;
825 }
826
827 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
828 if (!mn) {
829 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
830 "failed for host=%lu\n", __func__, vha->host_no));
831 return -ENOMEM;
832 }
833
834 memset(mn, 0, sizeof(struct access_chip_84xx));
835 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
836 mn->entry_count = 1;
837
838 switch (ql84_mgmt->mgmt.cmd) {
839 case QLA84_MGMT_READ_MEM:
840 case QLA84_MGMT_GET_INFO:
841 sg_cnt = dma_map_sg(&ha->pdev->dev,
842 bsg_job->reply_payload.sg_list,
843 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
844 if (!sg_cnt) {
845 rval = -ENOMEM;
846 goto exit_mgmt;
847 }
848
849 dma_direction = DMA_FROM_DEVICE;
850
851 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
852 DEBUG2(printk(KERN_INFO
853 "dma mapping resulted in different sg counts "
854 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
855 bsg_job->reply_payload.sg_cnt, sg_cnt));
856 rval = -EAGAIN;
857 goto done_unmap_sg;
858 }
859
860 data_len = bsg_job->reply_payload.payload_len;
861
862 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
863 &mgmt_dma, GFP_KERNEL);
864 if (!mgmt_b) {
865 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
866 "failed for host=%lu\n",
867 __func__, vha->host_no));
868 rval = -ENOMEM;
869 goto done_unmap_sg;
870 }
871
872 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
873 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
874 mn->parameter1 =
875 cpu_to_le32(
876 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
877
878 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
879 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
880 mn->parameter1 =
881 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
882
883 mn->parameter2 =
884 cpu_to_le32(
885 ql84_mgmt->mgmt.mgmtp.u.info.context);
886 }
887 break;
888
889 case QLA84_MGMT_WRITE_MEM:
890 sg_cnt = dma_map_sg(&ha->pdev->dev,
891 bsg_job->request_payload.sg_list,
892 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
893
894 if (!sg_cnt) {
895 rval = -ENOMEM;
896 goto exit_mgmt;
897 }
898
899 dma_direction = DMA_TO_DEVICE;
900
901 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
902 DEBUG2(printk(KERN_INFO
903 "dma mapping resulted in different sg counts "
904 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
905 bsg_job->request_payload.sg_cnt, sg_cnt));
906 rval = -EAGAIN;
907 goto done_unmap_sg;
908 }
909
910 data_len = bsg_job->request_payload.payload_len;
911 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
912 &mgmt_dma, GFP_KERNEL);
913 if (!mgmt_b) {
914 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
915 "failed for host=%lu\n",
916 __func__, vha->host_no));
917 rval = -ENOMEM;
918 goto done_unmap_sg;
919 }
920
921 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
922 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
923
924 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
925 mn->parameter1 =
926 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
927 break;
928
929 case QLA84_MGMT_CHNG_CONFIG:
930 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
931 mn->parameter1 =
932 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
933
934 mn->parameter2 =
935 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
936
937 mn->parameter3 =
938 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
939 break;
940
941 default:
942 rval = -EIO;
943 goto exit_mgmt;
944 }
945
946 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
947 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
948 mn->dseg_count = cpu_to_le16(1);
949 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
950 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
951 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
952 }
953
954 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
955
956 if (rval) {
957 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
958 "request 84xx mgmt failed\n", vha->host_no));
959
960 rval = bsg_job->reply->reply_payload_rcv_len = 0;
961 bsg_job->reply->result = (DID_ERROR << 16);
962
963 } else {
964 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
965 "request 84xx mgmt completed\n", vha->host_no));
966
967 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
968 bsg_job->reply->result = DID_OK;
969
970 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
971 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
972 bsg_job->reply->reply_payload_rcv_len =
973 bsg_job->reply_payload.payload_len;
974
975 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
6c452a45
AV
976 bsg_job->reply_payload.sg_cnt, mgmt_b,
977 data_len);
6e98016c
GM
978 }
979 }
980
981 bsg_job->job_done(bsg_job);
6e98016c
GM
982
983done_unmap_sg:
d5459083
HZ
984 if (mgmt_b)
985 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
986
6e98016c
GM
987 if (dma_direction == DMA_TO_DEVICE)
988 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
989 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
990 else if (dma_direction == DMA_FROM_DEVICE)
991 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
992 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
993
994exit_mgmt:
995 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
996
997 return rval;
998}
999
1000static int
1001qla24xx_iidma(struct fc_bsg_job *bsg_job)
1002{
1003 struct Scsi_Host *host = bsg_job->shost;
1004 scsi_qla_host_t *vha = shost_priv(host);
1005 struct qla_hw_data *ha = vha->hw;
1006 int rval = 0;
1007 struct qla_port_param *port_param = NULL;
1008 fc_port_t *fcport = NULL;
1009 uint16_t mb[MAILBOX_REGISTER_COUNT];
1010 uint8_t *rsp_ptr = NULL;
1011
1012 bsg_job->reply->reply_payload_rcv_len = 0;
1013
1014 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1015 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1016 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
1017 return -EBUSY;
1018
1019 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1020 DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
1021 "supported\n", __func__, vha->host_no));
1022 return -EINVAL;
1023 }
1024
1025 port_param = (struct qla_port_param *)((char *)bsg_job->request +
1026 sizeof(struct fc_bsg_request));
1027 if (!port_param) {
1028 DEBUG2(printk("%s(%ld): port_param header not provided, "
1029 "exiting.\n", __func__, vha->host_no));
1030 return -EINVAL;
1031 }
1032
1033 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1034 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
1035 __func__, vha->host_no));
1036 return -EINVAL;
1037 }
1038
1039 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1040 if (fcport->port_type != FCT_TARGET)
1041 continue;
1042
1043 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1044 fcport->port_name, sizeof(fcport->port_name)))
1045 continue;
1046 break;
1047 }
1048
1049 if (!fcport) {
1050 DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
1051 __func__, vha->host_no));
1052 return -EINVAL;
1053 }
1054
1055 if (port_param->mode)
1056 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1057 port_param->speed, mb);
1058 else
1059 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1060 &port_param->speed, mb);
1061
1062 if (rval) {
1063 DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
6c452a45
AV
1064 "%02x%02x%02x%02x%02x%02x%02x%02x -- "
1065 "%04x %x %04x %04x.\n",
6e98016c
GM
1066 vha->host_no, fcport->port_name[0],
1067 fcport->port_name[1],
1068 fcport->port_name[2], fcport->port_name[3],
1069 fcport->port_name[4], fcport->port_name[5],
1070 fcport->port_name[6], fcport->port_name[7], rval,
1071 fcport->fp_speed, mb[0], mb[1]));
1072 rval = 0;
1073 bsg_job->reply->result = (DID_ERROR << 16);
1074
1075 } else {
1076 if (!port_param->mode) {
1077 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1078 sizeof(struct qla_port_param);
1079
1080 rsp_ptr = ((uint8_t *)bsg_job->reply) +
1081 sizeof(struct fc_bsg_reply);
1082
1083 memcpy(rsp_ptr, port_param,
1084 sizeof(struct qla_port_param));
1085 }
1086
1087 bsg_job->reply->result = DID_OK;
1088 }
1089
1090 bsg_job->job_done(bsg_job);
1091 return rval;
1092}
1093
1094static int
1095qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1096{
1097 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1098 case QL_VND_LOOPBACK:
1099 return qla2x00_process_loopback(bsg_job);
1100
1101 case QL_VND_A84_RESET:
1102 return qla84xx_reset(bsg_job);
1103
1104 case QL_VND_A84_UPDATE_FW:
1105 return qla84xx_updatefw(bsg_job);
1106
1107 case QL_VND_A84_MGMT_CMD:
1108 return qla84xx_mgmt_cmd(bsg_job);
1109
1110 case QL_VND_IIDMA:
1111 return qla24xx_iidma(bsg_job);
1112
09ff701a
SR
1113 case QL_VND_FCP_PRIO_CFG_CMD:
1114 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1115
6e98016c
GM
1116 default:
1117 bsg_job->reply->result = (DID_ERROR << 16);
1118 bsg_job->job_done(bsg_job);
1119 return -ENOSYS;
1120 }
1121}
1122
1123int
1124qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1125{
1126 int ret = -EINVAL;
1127
1128 switch (bsg_job->request->msgcode) {
1129 case FC_BSG_RPT_ELS:
1130 case FC_BSG_HST_ELS_NOLOGIN:
1131 ret = qla2x00_process_els(bsg_job);
1132 break;
1133 case FC_BSG_HST_CT:
1134 ret = qla2x00_process_ct(bsg_job);
1135 break;
1136 case FC_BSG_HST_VENDOR:
1137 ret = qla2x00_process_vendor_specific(bsg_job);
1138 break;
1139 case FC_BSG_HST_ADD_RPORT:
1140 case FC_BSG_HST_DEL_RPORT:
1141 case FC_BSG_RPT_CT:
1142 default:
1143 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
1144 break;
6c452a45 1145 }
6e98016c
GM
1146 return ret;
1147}
1148
1149int
1150qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1151{
1152 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1153 struct qla_hw_data *ha = vha->hw;
1154 srb_t *sp;
1155 int cnt, que;
1156 unsigned long flags;
1157 struct req_que *req;
1158 struct srb_bsg *sp_bsg;
1159
1160 /* find the bsg job from the active list of commands */
1161 spin_lock_irqsave(&ha->hardware_lock, flags);
1162 for (que = 0; que < ha->max_req_queues; que++) {
1163 req = ha->req_q_map[que];
1164 if (!req)
1165 continue;
1166
6c452a45 1167 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
6e98016c 1168 sp = req->outstanding_cmds[cnt];
6e98016c 1169 if (sp) {
6c452a45 1170 sp_bsg = (struct srb_bsg *)sp->ctx;
6e98016c
GM
1171
1172 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
1173 (sp_bsg->ctx.type == SRB_ELS_CMD_HST))
1174 && (sp_bsg->bsg_job == bsg_job)) {
1175 if (ha->isp_ops->abort_command(sp)) {
1176 DEBUG2(qla_printk(KERN_INFO, ha,
6c452a45
AV
1177 "scsi(%ld): mbx "
1178 "abort_command failed\n",
1179 vha->host_no));
6e98016c
GM
1180 bsg_job->req->errors =
1181 bsg_job->reply->result = -EIO;
1182 } else {
1183 DEBUG2(qla_printk(KERN_INFO, ha,
6c452a45
AV
1184 "scsi(%ld): mbx "
1185 "abort_command success\n",
1186 vha->host_no));
6e98016c
GM
1187 bsg_job->req->errors =
1188 bsg_job->reply->result = 0;
1189 }
1190 goto done;
1191 }
1192 }
1193 }
1194 }
1195 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1196 DEBUG2(qla_printk(KERN_INFO, ha,
1197 "scsi(%ld) SRB not found to abort\n", vha->host_no));
1198 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1199 return 0;
1200
1201done:
1202 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1203 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1204 kfree(sp->fcport);
1205 kfree(sp->ctx);
1206 mempool_free(sp, ha->srb_mempool);
1207 return 0;
1208}