]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/qla2xxx/qla_bsg.c
Merge branch 'stable-4.10' of git://git.infradead.org/users/pcmoore/audit
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / qla2xxx / qla_bsg.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13
14 /* BSG support for ELS/CT pass through */
15 void
16 qla2x00_bsg_job_done(void *data, void *ptr, int res)
17 {
18 srb_t *sp = (srb_t *)ptr;
19 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
20 struct bsg_job *bsg_job = sp->u.bsg_job;
21 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
22
23 bsg_reply->result = res;
24 bsg_job_done(bsg_job, bsg_reply->result,
25 bsg_reply->reply_payload_rcv_len);
26 sp->free(vha, sp);
27 }
28
29 void
30 qla2x00_bsg_sp_free(void *data, void *ptr)
31 {
32 srb_t *sp = (srb_t *)ptr;
33 struct scsi_qla_host *vha = sp->fcport->vha;
34 struct bsg_job *bsg_job = sp->u.bsg_job;
35 struct fc_bsg_request *bsg_request = bsg_job->request;
36
37 struct qla_hw_data *ha = vha->hw;
38 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
39
40 if (sp->type == SRB_FXIOCB_BCMD) {
41 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
42 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
43
44 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
45 dma_unmap_sg(&ha->pdev->dev,
46 bsg_job->request_payload.sg_list,
47 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
48
49 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
50 dma_unmap_sg(&ha->pdev->dev,
51 bsg_job->reply_payload.sg_list,
52 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
53 } else {
54 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
55 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
56
57 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
58 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
59 }
60
61 if (sp->type == SRB_CT_CMD ||
62 sp->type == SRB_FXIOCB_BCMD ||
63 sp->type == SRB_ELS_CMD_HST)
64 kfree(sp->fcport);
65 qla2x00_rel_sp(vha, sp);
66 }
67
68 int
69 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
70 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
71 {
72 int i, ret, num_valid;
73 uint8_t *bcode;
74 struct qla_fcp_prio_entry *pri_entry;
75 uint32_t *bcode_val_ptr, bcode_val;
76
77 ret = 1;
78 num_valid = 0;
79 bcode = (uint8_t *)pri_cfg;
80 bcode_val_ptr = (uint32_t *)pri_cfg;
81 bcode_val = (uint32_t)(*bcode_val_ptr);
82
83 if (bcode_val == 0xFFFFFFFF) {
84 /* No FCP Priority config data in flash */
85 ql_dbg(ql_dbg_user, vha, 0x7051,
86 "No FCP Priority config data.\n");
87 return 0;
88 }
89
90 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
91 bcode[3] != 'S') {
92 /* Invalid FCP priority data header*/
93 ql_dbg(ql_dbg_user, vha, 0x7052,
94 "Invalid FCP Priority data header. bcode=0x%x.\n",
95 bcode_val);
96 return 0;
97 }
98 if (flag != 1)
99 return ret;
100
101 pri_entry = &pri_cfg->entry[0];
102 for (i = 0; i < pri_cfg->num_entries; i++) {
103 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
104 num_valid++;
105 pri_entry++;
106 }
107
108 if (num_valid == 0) {
109 /* No valid FCP priority data entries */
110 ql_dbg(ql_dbg_user, vha, 0x7053,
111 "No valid FCP Priority data entries.\n");
112 ret = 0;
113 } else {
114 /* FCP priority data is valid */
115 ql_dbg(ql_dbg_user, vha, 0x7054,
116 "Valid FCP priority data. num entries = %d.\n",
117 num_valid);
118 }
119
120 return ret;
121 }
122
123 static int
124 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
125 {
126 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
127 struct fc_bsg_request *bsg_request = bsg_job->request;
128 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
129 scsi_qla_host_t *vha = shost_priv(host);
130 struct qla_hw_data *ha = vha->hw;
131 int ret = 0;
132 uint32_t len;
133 uint32_t oper;
134
135 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
136 ret = -EINVAL;
137 goto exit_fcp_prio_cfg;
138 }
139
140 /* Get the sub command */
141 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
142
143 /* Only set config is allowed if config memory is not allocated */
144 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
145 ret = -EINVAL;
146 goto exit_fcp_prio_cfg;
147 }
148 switch (oper) {
149 case QLFC_FCP_PRIO_DISABLE:
150 if (ha->flags.fcp_prio_enabled) {
151 ha->flags.fcp_prio_enabled = 0;
152 ha->fcp_prio_cfg->attributes &=
153 ~FCP_PRIO_ATTR_ENABLE;
154 qla24xx_update_all_fcp_prio(vha);
155 bsg_reply->result = DID_OK;
156 } else {
157 ret = -EINVAL;
158 bsg_reply->result = (DID_ERROR << 16);
159 goto exit_fcp_prio_cfg;
160 }
161 break;
162
163 case QLFC_FCP_PRIO_ENABLE:
164 if (!ha->flags.fcp_prio_enabled) {
165 if (ha->fcp_prio_cfg) {
166 ha->flags.fcp_prio_enabled = 1;
167 ha->fcp_prio_cfg->attributes |=
168 FCP_PRIO_ATTR_ENABLE;
169 qla24xx_update_all_fcp_prio(vha);
170 bsg_reply->result = DID_OK;
171 } else {
172 ret = -EINVAL;
173 bsg_reply->result = (DID_ERROR << 16);
174 goto exit_fcp_prio_cfg;
175 }
176 }
177 break;
178
179 case QLFC_FCP_PRIO_GET_CONFIG:
180 len = bsg_job->reply_payload.payload_len;
181 if (!len || len > FCP_PRIO_CFG_SIZE) {
182 ret = -EINVAL;
183 bsg_reply->result = (DID_ERROR << 16);
184 goto exit_fcp_prio_cfg;
185 }
186
187 bsg_reply->result = DID_OK;
188 bsg_reply->reply_payload_rcv_len =
189 sg_copy_from_buffer(
190 bsg_job->reply_payload.sg_list,
191 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
192 len);
193
194 break;
195
196 case QLFC_FCP_PRIO_SET_CONFIG:
197 len = bsg_job->request_payload.payload_len;
198 if (!len || len > FCP_PRIO_CFG_SIZE) {
199 bsg_reply->result = (DID_ERROR << 16);
200 ret = -EINVAL;
201 goto exit_fcp_prio_cfg;
202 }
203
204 if (!ha->fcp_prio_cfg) {
205 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
206 if (!ha->fcp_prio_cfg) {
207 ql_log(ql_log_warn, vha, 0x7050,
208 "Unable to allocate memory for fcp prio "
209 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
210 bsg_reply->result = (DID_ERROR << 16);
211 ret = -ENOMEM;
212 goto exit_fcp_prio_cfg;
213 }
214 }
215
216 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
217 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
218 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
219 FCP_PRIO_CFG_SIZE);
220
221 /* validate fcp priority data */
222
223 if (!qla24xx_fcp_prio_cfg_valid(vha,
224 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
225 bsg_reply->result = (DID_ERROR << 16);
226 ret = -EINVAL;
227 /* If buffer was invalidatic int
228 * fcp_prio_cfg is of no use
229 */
230 vfree(ha->fcp_prio_cfg);
231 ha->fcp_prio_cfg = NULL;
232 goto exit_fcp_prio_cfg;
233 }
234
235 ha->flags.fcp_prio_enabled = 0;
236 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
237 ha->flags.fcp_prio_enabled = 1;
238 qla24xx_update_all_fcp_prio(vha);
239 bsg_reply->result = DID_OK;
240 break;
241 default:
242 ret = -EINVAL;
243 break;
244 }
245 exit_fcp_prio_cfg:
246 if (!ret)
247 bsg_job_done(bsg_job, bsg_reply->result,
248 bsg_reply->reply_payload_rcv_len);
249 return ret;
250 }
251
252 static int
253 qla2x00_process_els(struct bsg_job *bsg_job)
254 {
255 struct fc_bsg_request *bsg_request = bsg_job->request;
256 struct fc_rport *rport;
257 fc_port_t *fcport = NULL;
258 struct Scsi_Host *host;
259 scsi_qla_host_t *vha;
260 struct qla_hw_data *ha;
261 srb_t *sp;
262 const char *type;
263 int req_sg_cnt, rsp_sg_cnt;
264 int rval = (DRIVER_ERROR << 16);
265 uint16_t nextlid = 0;
266
267 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
268 rport = fc_bsg_to_rport(bsg_job);
269 fcport = *(fc_port_t **) rport->dd_data;
270 host = rport_to_shost(rport);
271 vha = shost_priv(host);
272 ha = vha->hw;
273 type = "FC_BSG_RPT_ELS";
274 } else {
275 host = fc_bsg_to_shost(bsg_job);
276 vha = shost_priv(host);
277 ha = vha->hw;
278 type = "FC_BSG_HST_ELS_NOLOGIN";
279 }
280
281 if (!vha->flags.online) {
282 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
283 rval = -EIO;
284 goto done;
285 }
286
287 /* pass through is supported only for ISP 4Gb or higher */
288 if (!IS_FWI2_CAPABLE(ha)) {
289 ql_dbg(ql_dbg_user, vha, 0x7001,
290 "ELS passthru not supported for ISP23xx based adapters.\n");
291 rval = -EPERM;
292 goto done;
293 }
294
295 /* Multiple SG's are not supported for ELS requests */
296 if (bsg_job->request_payload.sg_cnt > 1 ||
297 bsg_job->reply_payload.sg_cnt > 1) {
298 ql_dbg(ql_dbg_user, vha, 0x7002,
299 "Multiple SG's are not suppored for ELS requests, "
300 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
301 bsg_job->request_payload.sg_cnt,
302 bsg_job->reply_payload.sg_cnt);
303 rval = -EPERM;
304 goto done;
305 }
306
307 /* ELS request for rport */
308 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
309 /* make sure the rport is logged in,
310 * if not perform fabric login
311 */
312 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
313 ql_dbg(ql_dbg_user, vha, 0x7003,
314 "Failed to login port %06X for ELS passthru.\n",
315 fcport->d_id.b24);
316 rval = -EIO;
317 goto done;
318 }
319 } else {
320 /* Allocate a dummy fcport structure, since functions
321 * preparing the IOCB and mailbox command retrieves port
322 * specific information from fcport structure. For Host based
323 * ELS commands there will be no fcport structure allocated
324 */
325 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
326 if (!fcport) {
327 rval = -ENOMEM;
328 goto done;
329 }
330
331 /* Initialize all required fields of fcport */
332 fcport->vha = vha;
333 fcport->d_id.b.al_pa =
334 bsg_request->rqst_data.h_els.port_id[0];
335 fcport->d_id.b.area =
336 bsg_request->rqst_data.h_els.port_id[1];
337 fcport->d_id.b.domain =
338 bsg_request->rqst_data.h_els.port_id[2];
339 fcport->loop_id =
340 (fcport->d_id.b.al_pa == 0xFD) ?
341 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
342 }
343
344 req_sg_cnt =
345 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
346 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
347 if (!req_sg_cnt) {
348 rval = -ENOMEM;
349 goto done_free_fcport;
350 }
351
352 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
353 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
354 if (!rsp_sg_cnt) {
355 rval = -ENOMEM;
356 goto done_free_fcport;
357 }
358
359 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
360 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
361 ql_log(ql_log_warn, vha, 0x7008,
362 "dma mapping resulted in different sg counts, "
363 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
364 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
365 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
366 rval = -EAGAIN;
367 goto done_unmap_sg;
368 }
369
370 /* Alloc SRB structure */
371 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
372 if (!sp) {
373 rval = -ENOMEM;
374 goto done_unmap_sg;
375 }
376
377 sp->type =
378 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
379 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
380 sp->name =
381 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
382 "bsg_els_rpt" : "bsg_els_hst");
383 sp->u.bsg_job = bsg_job;
384 sp->free = qla2x00_bsg_sp_free;
385 sp->done = qla2x00_bsg_job_done;
386
387 ql_dbg(ql_dbg_user, vha, 0x700a,
388 "bsg rqst type: %s els type: %x - loop-id=%x "
389 "portid=%-2x%02x%02x.\n", type,
390 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
391 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
392
393 rval = qla2x00_start_sp(sp);
394 if (rval != QLA_SUCCESS) {
395 ql_log(ql_log_warn, vha, 0x700e,
396 "qla2x00_start_sp failed = %d\n", rval);
397 qla2x00_rel_sp(vha, sp);
398 rval = -EIO;
399 goto done_unmap_sg;
400 }
401 return rval;
402
403 done_unmap_sg:
404 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
405 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
406 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
407 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
408 goto done_free_fcport;
409
410 done_free_fcport:
411 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
412 kfree(fcport);
413 done:
414 return rval;
415 }
416
417 static inline uint16_t
418 qla24xx_calc_ct_iocbs(uint16_t dsds)
419 {
420 uint16_t iocbs;
421
422 iocbs = 1;
423 if (dsds > 2) {
424 iocbs += (dsds - 2) / 5;
425 if ((dsds - 2) % 5)
426 iocbs++;
427 }
428 return iocbs;
429 }
430
431 static int
432 qla2x00_process_ct(struct bsg_job *bsg_job)
433 {
434 srb_t *sp;
435 struct fc_bsg_request *bsg_request = bsg_job->request;
436 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
437 scsi_qla_host_t *vha = shost_priv(host);
438 struct qla_hw_data *ha = vha->hw;
439 int rval = (DRIVER_ERROR << 16);
440 int req_sg_cnt, rsp_sg_cnt;
441 uint16_t loop_id;
442 struct fc_port *fcport;
443 char *type = "FC_BSG_HST_CT";
444
445 req_sg_cnt =
446 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
447 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
448 if (!req_sg_cnt) {
449 ql_log(ql_log_warn, vha, 0x700f,
450 "dma_map_sg return %d for request\n", req_sg_cnt);
451 rval = -ENOMEM;
452 goto done;
453 }
454
455 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
456 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
457 if (!rsp_sg_cnt) {
458 ql_log(ql_log_warn, vha, 0x7010,
459 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
460 rval = -ENOMEM;
461 goto done;
462 }
463
464 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
465 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
466 ql_log(ql_log_warn, vha, 0x7011,
467 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
468 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
469 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
470 rval = -EAGAIN;
471 goto done_unmap_sg;
472 }
473
474 if (!vha->flags.online) {
475 ql_log(ql_log_warn, vha, 0x7012,
476 "Host is not online.\n");
477 rval = -EIO;
478 goto done_unmap_sg;
479 }
480
481 loop_id =
482 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
483 >> 24;
484 switch (loop_id) {
485 case 0xFC:
486 loop_id = cpu_to_le16(NPH_SNS);
487 break;
488 case 0xFA:
489 loop_id = vha->mgmt_svr_loop_id;
490 break;
491 default:
492 ql_dbg(ql_dbg_user, vha, 0x7013,
493 "Unknown loop id: %x.\n", loop_id);
494 rval = -EINVAL;
495 goto done_unmap_sg;
496 }
497
498 /* Allocate a dummy fcport structure, since functions preparing the
499 * IOCB and mailbox command retrieves port specific information
500 * from fcport structure. For Host based ELS commands there will be
501 * no fcport structure allocated
502 */
503 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
504 if (!fcport) {
505 ql_log(ql_log_warn, vha, 0x7014,
506 "Failed to allocate fcport.\n");
507 rval = -ENOMEM;
508 goto done_unmap_sg;
509 }
510
511 /* Initialize all required fields of fcport */
512 fcport->vha = vha;
513 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
514 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
515 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
516 fcport->loop_id = loop_id;
517
518 /* Alloc SRB structure */
519 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
520 if (!sp) {
521 ql_log(ql_log_warn, vha, 0x7015,
522 "qla2x00_get_sp failed.\n");
523 rval = -ENOMEM;
524 goto done_free_fcport;
525 }
526
527 sp->type = SRB_CT_CMD;
528 sp->name = "bsg_ct";
529 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
530 sp->u.bsg_job = bsg_job;
531 sp->free = qla2x00_bsg_sp_free;
532 sp->done = qla2x00_bsg_job_done;
533
534 ql_dbg(ql_dbg_user, vha, 0x7016,
535 "bsg rqst type: %s else type: %x - "
536 "loop-id=%x portid=%02x%02x%02x.\n", type,
537 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
538 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
539 fcport->d_id.b.al_pa);
540
541 rval = qla2x00_start_sp(sp);
542 if (rval != QLA_SUCCESS) {
543 ql_log(ql_log_warn, vha, 0x7017,
544 "qla2x00_start_sp failed=%d.\n", rval);
545 qla2x00_rel_sp(vha, sp);
546 rval = -EIO;
547 goto done_free_fcport;
548 }
549 return rval;
550
551 done_free_fcport:
552 kfree(fcport);
553 done_unmap_sg:
554 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
555 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
556 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
557 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
558 done:
559 return rval;
560 }
561
562 /* Disable loopback mode */
563 static inline int
564 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
565 int wait, int wait2)
566 {
567 int ret = 0;
568 int rval = 0;
569 uint16_t new_config[4];
570 struct qla_hw_data *ha = vha->hw;
571
572 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
573 goto done_reset_internal;
574
575 memset(new_config, 0 , sizeof(new_config));
576 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
577 ENABLE_INTERNAL_LOOPBACK ||
578 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
579 ENABLE_EXTERNAL_LOOPBACK) {
580 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
581 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
582 (new_config[0] & INTERNAL_LOOPBACK_MASK));
583 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
584
585 ha->notify_dcbx_comp = wait;
586 ha->notify_lb_portup_comp = wait2;
587
588 ret = qla81xx_set_port_config(vha, new_config);
589 if (ret != QLA_SUCCESS) {
590 ql_log(ql_log_warn, vha, 0x7025,
591 "Set port config failed.\n");
592 ha->notify_dcbx_comp = 0;
593 ha->notify_lb_portup_comp = 0;
594 rval = -EINVAL;
595 goto done_reset_internal;
596 }
597
598 /* Wait for DCBX complete event */
599 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
600 (DCBX_COMP_TIMEOUT * HZ))) {
601 ql_dbg(ql_dbg_user, vha, 0x7026,
602 "DCBX completion not received.\n");
603 ha->notify_dcbx_comp = 0;
604 ha->notify_lb_portup_comp = 0;
605 rval = -EINVAL;
606 goto done_reset_internal;
607 } else
608 ql_dbg(ql_dbg_user, vha, 0x7027,
609 "DCBX completion received.\n");
610
611 if (wait2 &&
612 !wait_for_completion_timeout(&ha->lb_portup_comp,
613 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
614 ql_dbg(ql_dbg_user, vha, 0x70c5,
615 "Port up completion not received.\n");
616 ha->notify_lb_portup_comp = 0;
617 rval = -EINVAL;
618 goto done_reset_internal;
619 } else
620 ql_dbg(ql_dbg_user, vha, 0x70c6,
621 "Port up completion received.\n");
622
623 ha->notify_dcbx_comp = 0;
624 ha->notify_lb_portup_comp = 0;
625 }
626 done_reset_internal:
627 return rval;
628 }
629
630 /*
631 * Set the port configuration to enable the internal or external loopback
632 * depending on the loopback mode.
633 */
634 static inline int
635 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
636 uint16_t *new_config, uint16_t mode)
637 {
638 int ret = 0;
639 int rval = 0;
640 unsigned long rem_tmo = 0, current_tmo = 0;
641 struct qla_hw_data *ha = vha->hw;
642
643 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
644 goto done_set_internal;
645
646 if (mode == INTERNAL_LOOPBACK)
647 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
648 else if (mode == EXTERNAL_LOOPBACK)
649 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
650 ql_dbg(ql_dbg_user, vha, 0x70be,
651 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
652
653 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
654
655 ha->notify_dcbx_comp = 1;
656 ret = qla81xx_set_port_config(vha, new_config);
657 if (ret != QLA_SUCCESS) {
658 ql_log(ql_log_warn, vha, 0x7021,
659 "set port config failed.\n");
660 ha->notify_dcbx_comp = 0;
661 rval = -EINVAL;
662 goto done_set_internal;
663 }
664
665 /* Wait for DCBX complete event */
666 current_tmo = DCBX_COMP_TIMEOUT * HZ;
667 while (1) {
668 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
669 current_tmo);
670 if (!ha->idc_extend_tmo || rem_tmo) {
671 ha->idc_extend_tmo = 0;
672 break;
673 }
674 current_tmo = ha->idc_extend_tmo * HZ;
675 ha->idc_extend_tmo = 0;
676 }
677
678 if (!rem_tmo) {
679 ql_dbg(ql_dbg_user, vha, 0x7022,
680 "DCBX completion not received.\n");
681 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
682 /*
683 * If the reset of the loopback mode doesn't work take a FCoE
684 * dump and reset the chip.
685 */
686 if (ret) {
687 ha->isp_ops->fw_dump(vha, 0);
688 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
689 }
690 rval = -EINVAL;
691 } else {
692 if (ha->flags.idc_compl_status) {
693 ql_dbg(ql_dbg_user, vha, 0x70c3,
694 "Bad status in IDC Completion AEN\n");
695 rval = -EINVAL;
696 ha->flags.idc_compl_status = 0;
697 } else
698 ql_dbg(ql_dbg_user, vha, 0x7023,
699 "DCBX completion received.\n");
700 }
701
702 ha->notify_dcbx_comp = 0;
703 ha->idc_extend_tmo = 0;
704
705 done_set_internal:
706 return rval;
707 }
708
709 static int
710 qla2x00_process_loopback(struct bsg_job *bsg_job)
711 {
712 struct fc_bsg_request *bsg_request = bsg_job->request;
713 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
714 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
715 scsi_qla_host_t *vha = shost_priv(host);
716 struct qla_hw_data *ha = vha->hw;
717 int rval;
718 uint8_t command_sent;
719 char *type;
720 struct msg_echo_lb elreq;
721 uint16_t response[MAILBOX_REGISTER_COUNT];
722 uint16_t config[4], new_config[4];
723 uint8_t *fw_sts_ptr;
724 uint8_t *req_data = NULL;
725 dma_addr_t req_data_dma;
726 uint32_t req_data_len;
727 uint8_t *rsp_data = NULL;
728 dma_addr_t rsp_data_dma;
729 uint32_t rsp_data_len;
730
731 if (!vha->flags.online) {
732 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
733 return -EIO;
734 }
735
736 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
737 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
738 DMA_TO_DEVICE);
739
740 if (!elreq.req_sg_cnt) {
741 ql_log(ql_log_warn, vha, 0x701a,
742 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
743 return -ENOMEM;
744 }
745
746 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
747 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
748 DMA_FROM_DEVICE);
749
750 if (!elreq.rsp_sg_cnt) {
751 ql_log(ql_log_warn, vha, 0x701b,
752 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
753 rval = -ENOMEM;
754 goto done_unmap_req_sg;
755 }
756
757 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
758 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
759 ql_log(ql_log_warn, vha, 0x701c,
760 "dma mapping resulted in different sg counts, "
761 "request_sg_cnt: %x dma_request_sg_cnt: %x "
762 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
763 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
764 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
765 rval = -EAGAIN;
766 goto done_unmap_sg;
767 }
768 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
769 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
770 &req_data_dma, GFP_KERNEL);
771 if (!req_data) {
772 ql_log(ql_log_warn, vha, 0x701d,
773 "dma alloc failed for req_data.\n");
774 rval = -ENOMEM;
775 goto done_unmap_sg;
776 }
777
778 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
779 &rsp_data_dma, GFP_KERNEL);
780 if (!rsp_data) {
781 ql_log(ql_log_warn, vha, 0x7004,
782 "dma alloc failed for rsp_data.\n");
783 rval = -ENOMEM;
784 goto done_free_dma_req;
785 }
786
787 /* Copy the request buffer in req_data now */
788 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
789 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
790
791 elreq.send_dma = req_data_dma;
792 elreq.rcv_dma = rsp_data_dma;
793 elreq.transfer_size = req_data_len;
794
795 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
796 elreq.iteration_count =
797 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
798
799 if (atomic_read(&vha->loop_state) == LOOP_READY &&
800 (ha->current_topology == ISP_CFG_F ||
801 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
802 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
803 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
804 elreq.options == EXTERNAL_LOOPBACK) {
805 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
806 ql_dbg(ql_dbg_user, vha, 0x701e,
807 "BSG request type: %s.\n", type);
808 command_sent = INT_DEF_LB_ECHO_CMD;
809 rval = qla2x00_echo_test(vha, &elreq, response);
810 } else {
811 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
812 memset(config, 0, sizeof(config));
813 memset(new_config, 0, sizeof(new_config));
814
815 if (qla81xx_get_port_config(vha, config)) {
816 ql_log(ql_log_warn, vha, 0x701f,
817 "Get port config failed.\n");
818 rval = -EPERM;
819 goto done_free_dma_rsp;
820 }
821
822 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
823 ql_dbg(ql_dbg_user, vha, 0x70c4,
824 "Loopback operation already in "
825 "progress.\n");
826 rval = -EAGAIN;
827 goto done_free_dma_rsp;
828 }
829
830 ql_dbg(ql_dbg_user, vha, 0x70c0,
831 "elreq.options=%04x\n", elreq.options);
832
833 if (elreq.options == EXTERNAL_LOOPBACK)
834 if (IS_QLA8031(ha) || IS_QLA8044(ha))
835 rval = qla81xx_set_loopback_mode(vha,
836 config, new_config, elreq.options);
837 else
838 rval = qla81xx_reset_loopback_mode(vha,
839 config, 1, 0);
840 else
841 rval = qla81xx_set_loopback_mode(vha, config,
842 new_config, elreq.options);
843
844 if (rval) {
845 rval = -EPERM;
846 goto done_free_dma_rsp;
847 }
848
849 type = "FC_BSG_HST_VENDOR_LOOPBACK";
850 ql_dbg(ql_dbg_user, vha, 0x7028,
851 "BSG request type: %s.\n", type);
852
853 command_sent = INT_DEF_LB_LOOPBACK_CMD;
854 rval = qla2x00_loopback_test(vha, &elreq, response);
855
856 if (response[0] == MBS_COMMAND_ERROR &&
857 response[1] == MBS_LB_RESET) {
858 ql_log(ql_log_warn, vha, 0x7029,
859 "MBX command error, Aborting ISP.\n");
860 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
861 qla2xxx_wake_dpc(vha);
862 qla2x00_wait_for_chip_reset(vha);
863 /* Also reset the MPI */
864 if (IS_QLA81XX(ha)) {
865 if (qla81xx_restart_mpi_firmware(vha) !=
866 QLA_SUCCESS) {
867 ql_log(ql_log_warn, vha, 0x702a,
868 "MPI reset failed.\n");
869 }
870 }
871
872 rval = -EIO;
873 goto done_free_dma_rsp;
874 }
875
876 if (new_config[0]) {
877 int ret;
878
879 /* Revert back to original port config
880 * Also clear internal loopback
881 */
882 ret = qla81xx_reset_loopback_mode(vha,
883 new_config, 0, 1);
884 if (ret) {
885 /*
886 * If the reset of the loopback mode
887 * doesn't work take FCoE dump and then
888 * reset the chip.
889 */
890 ha->isp_ops->fw_dump(vha, 0);
891 set_bit(ISP_ABORT_NEEDED,
892 &vha->dpc_flags);
893 }
894
895 }
896
897 } else {
898 type = "FC_BSG_HST_VENDOR_LOOPBACK";
899 ql_dbg(ql_dbg_user, vha, 0x702b,
900 "BSG request type: %s.\n", type);
901 command_sent = INT_DEF_LB_LOOPBACK_CMD;
902 rval = qla2x00_loopback_test(vha, &elreq, response);
903 }
904 }
905
906 if (rval) {
907 ql_log(ql_log_warn, vha, 0x702c,
908 "Vendor request %s failed.\n", type);
909
910 rval = 0;
911 bsg_reply->result = (DID_ERROR << 16);
912 bsg_reply->reply_payload_rcv_len = 0;
913 } else {
914 ql_dbg(ql_dbg_user, vha, 0x702d,
915 "Vendor request %s completed.\n", type);
916 bsg_reply->result = (DID_OK << 16);
917 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
918 bsg_job->reply_payload.sg_cnt, rsp_data,
919 rsp_data_len);
920 }
921
922 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
923 sizeof(response) + sizeof(uint8_t);
924 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
925 sizeof(struct fc_bsg_reply);
926 memcpy(fw_sts_ptr, response, sizeof(response));
927 fw_sts_ptr += sizeof(response);
928 *fw_sts_ptr = command_sent;
929
930 done_free_dma_rsp:
931 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
932 rsp_data, rsp_data_dma);
933 done_free_dma_req:
934 dma_free_coherent(&ha->pdev->dev, req_data_len,
935 req_data, req_data_dma);
936 done_unmap_sg:
937 dma_unmap_sg(&ha->pdev->dev,
938 bsg_job->reply_payload.sg_list,
939 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
940 done_unmap_req_sg:
941 dma_unmap_sg(&ha->pdev->dev,
942 bsg_job->request_payload.sg_list,
943 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
944 if (!rval)
945 bsg_job_done(bsg_job, bsg_reply->result,
946 bsg_reply->reply_payload_rcv_len);
947 return rval;
948 }
949
950 static int
951 qla84xx_reset(struct bsg_job *bsg_job)
952 {
953 struct fc_bsg_request *bsg_request = bsg_job->request;
954 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
955 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
956 scsi_qla_host_t *vha = shost_priv(host);
957 struct qla_hw_data *ha = vha->hw;
958 int rval = 0;
959 uint32_t flag;
960
961 if (!IS_QLA84XX(ha)) {
962 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
963 return -EINVAL;
964 }
965
966 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
967
968 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
969
970 if (rval) {
971 ql_log(ql_log_warn, vha, 0x7030,
972 "Vendor request 84xx reset failed.\n");
973 rval = (DID_ERROR << 16);
974
975 } else {
976 ql_dbg(ql_dbg_user, vha, 0x7031,
977 "Vendor request 84xx reset completed.\n");
978 bsg_reply->result = DID_OK;
979 bsg_job_done(bsg_job, bsg_reply->result,
980 bsg_reply->reply_payload_rcv_len);
981 }
982
983 return rval;
984 }
985
986 static int
987 qla84xx_updatefw(struct bsg_job *bsg_job)
988 {
989 struct fc_bsg_request *bsg_request = bsg_job->request;
990 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
991 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
992 scsi_qla_host_t *vha = shost_priv(host);
993 struct qla_hw_data *ha = vha->hw;
994 struct verify_chip_entry_84xx *mn = NULL;
995 dma_addr_t mn_dma, fw_dma;
996 void *fw_buf = NULL;
997 int rval = 0;
998 uint32_t sg_cnt;
999 uint32_t data_len;
1000 uint16_t options;
1001 uint32_t flag;
1002 uint32_t fw_ver;
1003
1004 if (!IS_QLA84XX(ha)) {
1005 ql_dbg(ql_dbg_user, vha, 0x7032,
1006 "Not 84xx, exiting.\n");
1007 return -EINVAL;
1008 }
1009
1010 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1011 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1012 if (!sg_cnt) {
1013 ql_log(ql_log_warn, vha, 0x7033,
1014 "dma_map_sg returned %d for request.\n", sg_cnt);
1015 return -ENOMEM;
1016 }
1017
1018 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1019 ql_log(ql_log_warn, vha, 0x7034,
1020 "DMA mapping resulted in different sg counts, "
1021 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1022 bsg_job->request_payload.sg_cnt, sg_cnt);
1023 rval = -EAGAIN;
1024 goto done_unmap_sg;
1025 }
1026
1027 data_len = bsg_job->request_payload.payload_len;
1028 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1029 &fw_dma, GFP_KERNEL);
1030 if (!fw_buf) {
1031 ql_log(ql_log_warn, vha, 0x7035,
1032 "DMA alloc failed for fw_buf.\n");
1033 rval = -ENOMEM;
1034 goto done_unmap_sg;
1035 }
1036
1037 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1038 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1039
1040 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1041 if (!mn) {
1042 ql_log(ql_log_warn, vha, 0x7036,
1043 "DMA alloc failed for fw buffer.\n");
1044 rval = -ENOMEM;
1045 goto done_free_fw_buf;
1046 }
1047
1048 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1049 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1050
1051 memset(mn, 0, sizeof(struct access_chip_84xx));
1052 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1053 mn->entry_count = 1;
1054
1055 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1056 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1057 options |= VCO_DIAG_FW;
1058
1059 mn->options = cpu_to_le16(options);
1060 mn->fw_ver = cpu_to_le32(fw_ver);
1061 mn->fw_size = cpu_to_le32(data_len);
1062 mn->fw_seq_size = cpu_to_le32(data_len);
1063 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1064 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1065 mn->dseg_length = cpu_to_le32(data_len);
1066 mn->data_seg_cnt = cpu_to_le16(1);
1067
1068 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1069
1070 if (rval) {
1071 ql_log(ql_log_warn, vha, 0x7037,
1072 "Vendor request 84xx updatefw failed.\n");
1073
1074 rval = (DID_ERROR << 16);
1075 } else {
1076 ql_dbg(ql_dbg_user, vha, 0x7038,
1077 "Vendor request 84xx updatefw completed.\n");
1078
1079 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1080 bsg_reply->result = DID_OK;
1081 }
1082
1083 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1084
1085 done_free_fw_buf:
1086 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1087
1088 done_unmap_sg:
1089 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1090 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1091
1092 if (!rval)
1093 bsg_job_done(bsg_job, bsg_reply->result,
1094 bsg_reply->reply_payload_rcv_len);
1095 return rval;
1096 }
1097
1098 static int
1099 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1100 {
1101 struct fc_bsg_request *bsg_request = bsg_job->request;
1102 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1103 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1104 scsi_qla_host_t *vha = shost_priv(host);
1105 struct qla_hw_data *ha = vha->hw;
1106 struct access_chip_84xx *mn = NULL;
1107 dma_addr_t mn_dma, mgmt_dma;
1108 void *mgmt_b = NULL;
1109 int rval = 0;
1110 struct qla_bsg_a84_mgmt *ql84_mgmt;
1111 uint32_t sg_cnt;
1112 uint32_t data_len = 0;
1113 uint32_t dma_direction = DMA_NONE;
1114
1115 if (!IS_QLA84XX(ha)) {
1116 ql_log(ql_log_warn, vha, 0x703a,
1117 "Not 84xx, exiting.\n");
1118 return -EINVAL;
1119 }
1120
1121 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1122 if (!mn) {
1123 ql_log(ql_log_warn, vha, 0x703c,
1124 "DMA alloc failed for fw buffer.\n");
1125 return -ENOMEM;
1126 }
1127
1128 memset(mn, 0, sizeof(struct access_chip_84xx));
1129 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1130 mn->entry_count = 1;
1131 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1132 switch (ql84_mgmt->mgmt.cmd) {
1133 case QLA84_MGMT_READ_MEM:
1134 case QLA84_MGMT_GET_INFO:
1135 sg_cnt = dma_map_sg(&ha->pdev->dev,
1136 bsg_job->reply_payload.sg_list,
1137 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1138 if (!sg_cnt) {
1139 ql_log(ql_log_warn, vha, 0x703d,
1140 "dma_map_sg returned %d for reply.\n", sg_cnt);
1141 rval = -ENOMEM;
1142 goto exit_mgmt;
1143 }
1144
1145 dma_direction = DMA_FROM_DEVICE;
1146
1147 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1148 ql_log(ql_log_warn, vha, 0x703e,
1149 "DMA mapping resulted in different sg counts, "
1150 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1151 bsg_job->reply_payload.sg_cnt, sg_cnt);
1152 rval = -EAGAIN;
1153 goto done_unmap_sg;
1154 }
1155
1156 data_len = bsg_job->reply_payload.payload_len;
1157
1158 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1159 &mgmt_dma, GFP_KERNEL);
1160 if (!mgmt_b) {
1161 ql_log(ql_log_warn, vha, 0x703f,
1162 "DMA alloc failed for mgmt_b.\n");
1163 rval = -ENOMEM;
1164 goto done_unmap_sg;
1165 }
1166
1167 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1168 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1169 mn->parameter1 =
1170 cpu_to_le32(
1171 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1172
1173 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1174 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1175 mn->parameter1 =
1176 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1177
1178 mn->parameter2 =
1179 cpu_to_le32(
1180 ql84_mgmt->mgmt.mgmtp.u.info.context);
1181 }
1182 break;
1183
1184 case QLA84_MGMT_WRITE_MEM:
1185 sg_cnt = dma_map_sg(&ha->pdev->dev,
1186 bsg_job->request_payload.sg_list,
1187 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1188
1189 if (!sg_cnt) {
1190 ql_log(ql_log_warn, vha, 0x7040,
1191 "dma_map_sg returned %d.\n", sg_cnt);
1192 rval = -ENOMEM;
1193 goto exit_mgmt;
1194 }
1195
1196 dma_direction = DMA_TO_DEVICE;
1197
1198 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1199 ql_log(ql_log_warn, vha, 0x7041,
1200 "DMA mapping resulted in different sg counts, "
1201 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1202 bsg_job->request_payload.sg_cnt, sg_cnt);
1203 rval = -EAGAIN;
1204 goto done_unmap_sg;
1205 }
1206
1207 data_len = bsg_job->request_payload.payload_len;
1208 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1209 &mgmt_dma, GFP_KERNEL);
1210 if (!mgmt_b) {
1211 ql_log(ql_log_warn, vha, 0x7042,
1212 "DMA alloc failed for mgmt_b.\n");
1213 rval = -ENOMEM;
1214 goto done_unmap_sg;
1215 }
1216
1217 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1218 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1219
1220 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1221 mn->parameter1 =
1222 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1223 break;
1224
1225 case QLA84_MGMT_CHNG_CONFIG:
1226 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1227 mn->parameter1 =
1228 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1229
1230 mn->parameter2 =
1231 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1232
1233 mn->parameter3 =
1234 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1235 break;
1236
1237 default:
1238 rval = -EIO;
1239 goto exit_mgmt;
1240 }
1241
1242 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1243 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1244 mn->dseg_count = cpu_to_le16(1);
1245 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1246 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1247 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1248 }
1249
1250 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1251
1252 if (rval) {
1253 ql_log(ql_log_warn, vha, 0x7043,
1254 "Vendor request 84xx mgmt failed.\n");
1255
1256 rval = (DID_ERROR << 16);
1257
1258 } else {
1259 ql_dbg(ql_dbg_user, vha, 0x7044,
1260 "Vendor request 84xx mgmt completed.\n");
1261
1262 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1263 bsg_reply->result = DID_OK;
1264
1265 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1266 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1267 bsg_reply->reply_payload_rcv_len =
1268 bsg_job->reply_payload.payload_len;
1269
1270 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1271 bsg_job->reply_payload.sg_cnt, mgmt_b,
1272 data_len);
1273 }
1274 }
1275
1276 done_unmap_sg:
1277 if (mgmt_b)
1278 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1279
1280 if (dma_direction == DMA_TO_DEVICE)
1281 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1282 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1283 else if (dma_direction == DMA_FROM_DEVICE)
1284 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1285 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1286
1287 exit_mgmt:
1288 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1289
1290 if (!rval)
1291 bsg_job_done(bsg_job, bsg_reply->result,
1292 bsg_reply->reply_payload_rcv_len);
1293 return rval;
1294 }
1295
1296 static int
1297 qla24xx_iidma(struct bsg_job *bsg_job)
1298 {
1299 struct fc_bsg_request *bsg_request = bsg_job->request;
1300 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1301 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1302 scsi_qla_host_t *vha = shost_priv(host);
1303 int rval = 0;
1304 struct qla_port_param *port_param = NULL;
1305 fc_port_t *fcport = NULL;
1306 int found = 0;
1307 uint16_t mb[MAILBOX_REGISTER_COUNT];
1308 uint8_t *rsp_ptr = NULL;
1309
1310 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1311 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1312 return -EINVAL;
1313 }
1314
1315 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1316 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1317 ql_log(ql_log_warn, vha, 0x7048,
1318 "Invalid destination type.\n");
1319 return -EINVAL;
1320 }
1321
1322 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1323 if (fcport->port_type != FCT_TARGET)
1324 continue;
1325
1326 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1327 fcport->port_name, sizeof(fcport->port_name)))
1328 continue;
1329
1330 found = 1;
1331 break;
1332 }
1333
1334 if (!found) {
1335 ql_log(ql_log_warn, vha, 0x7049,
1336 "Failed to find port.\n");
1337 return -EINVAL;
1338 }
1339
1340 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1341 ql_log(ql_log_warn, vha, 0x704a,
1342 "Port is not online.\n");
1343 return -EINVAL;
1344 }
1345
1346 if (fcport->flags & FCF_LOGIN_NEEDED) {
1347 ql_log(ql_log_warn, vha, 0x704b,
1348 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1349 return -EINVAL;
1350 }
1351
1352 if (port_param->mode)
1353 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1354 port_param->speed, mb);
1355 else
1356 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1357 &port_param->speed, mb);
1358
1359 if (rval) {
1360 ql_log(ql_log_warn, vha, 0x704c,
1361 "iIDMA cmd failed for %8phN -- "
1362 "%04x %x %04x %04x.\n", fcport->port_name,
1363 rval, fcport->fp_speed, mb[0], mb[1]);
1364 rval = (DID_ERROR << 16);
1365 } else {
1366 if (!port_param->mode) {
1367 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1368 sizeof(struct qla_port_param);
1369
1370 rsp_ptr = ((uint8_t *)bsg_reply) +
1371 sizeof(struct fc_bsg_reply);
1372
1373 memcpy(rsp_ptr, port_param,
1374 sizeof(struct qla_port_param));
1375 }
1376
1377 bsg_reply->result = DID_OK;
1378 bsg_job_done(bsg_job, bsg_reply->result,
1379 bsg_reply->reply_payload_rcv_len);
1380 }
1381
1382 return rval;
1383 }
1384
1385 static int
1386 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1387 uint8_t is_update)
1388 {
1389 struct fc_bsg_request *bsg_request = bsg_job->request;
1390 uint32_t start = 0;
1391 int valid = 0;
1392 struct qla_hw_data *ha = vha->hw;
1393
1394 if (unlikely(pci_channel_offline(ha->pdev)))
1395 return -EINVAL;
1396
1397 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1398 if (start > ha->optrom_size) {
1399 ql_log(ql_log_warn, vha, 0x7055,
1400 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1401 return -EINVAL;
1402 }
1403
1404 if (ha->optrom_state != QLA_SWAITING) {
1405 ql_log(ql_log_info, vha, 0x7056,
1406 "optrom_state %d.\n", ha->optrom_state);
1407 return -EBUSY;
1408 }
1409
1410 ha->optrom_region_start = start;
1411 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1412 if (is_update) {
1413 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1414 valid = 1;
1415 else if (start == (ha->flt_region_boot * 4) ||
1416 start == (ha->flt_region_fw * 4))
1417 valid = 1;
1418 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1419 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1420 valid = 1;
1421 if (!valid) {
1422 ql_log(ql_log_warn, vha, 0x7058,
1423 "Invalid start region 0x%x/0x%x.\n", start,
1424 bsg_job->request_payload.payload_len);
1425 return -EINVAL;
1426 }
1427
1428 ha->optrom_region_size = start +
1429 bsg_job->request_payload.payload_len > ha->optrom_size ?
1430 ha->optrom_size - start :
1431 bsg_job->request_payload.payload_len;
1432 ha->optrom_state = QLA_SWRITING;
1433 } else {
1434 ha->optrom_region_size = start +
1435 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1436 ha->optrom_size - start :
1437 bsg_job->reply_payload.payload_len;
1438 ha->optrom_state = QLA_SREADING;
1439 }
1440
1441 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1442 if (!ha->optrom_buffer) {
1443 ql_log(ql_log_warn, vha, 0x7059,
1444 "Read: Unable to allocate memory for optrom retrieval "
1445 "(%x)\n", ha->optrom_region_size);
1446
1447 ha->optrom_state = QLA_SWAITING;
1448 return -ENOMEM;
1449 }
1450
1451 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1452 return 0;
1453 }
1454
1455 static int
1456 qla2x00_read_optrom(struct bsg_job *bsg_job)
1457 {
1458 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1459 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1460 scsi_qla_host_t *vha = shost_priv(host);
1461 struct qla_hw_data *ha = vha->hw;
1462 int rval = 0;
1463
1464 if (ha->flags.nic_core_reset_hdlr_active)
1465 return -EBUSY;
1466
1467 mutex_lock(&ha->optrom_mutex);
1468 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1469 if (rval) {
1470 mutex_unlock(&ha->optrom_mutex);
1471 return rval;
1472 }
1473
1474 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1475 ha->optrom_region_start, ha->optrom_region_size);
1476
1477 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1478 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1479 ha->optrom_region_size);
1480
1481 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1482 bsg_reply->result = DID_OK;
1483 vfree(ha->optrom_buffer);
1484 ha->optrom_buffer = NULL;
1485 ha->optrom_state = QLA_SWAITING;
1486 mutex_unlock(&ha->optrom_mutex);
1487 bsg_job_done(bsg_job, bsg_reply->result,
1488 bsg_reply->reply_payload_rcv_len);
1489 return rval;
1490 }
1491
1492 static int
1493 qla2x00_update_optrom(struct bsg_job *bsg_job)
1494 {
1495 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1496 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1497 scsi_qla_host_t *vha = shost_priv(host);
1498 struct qla_hw_data *ha = vha->hw;
1499 int rval = 0;
1500
1501 mutex_lock(&ha->optrom_mutex);
1502 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1503 if (rval) {
1504 mutex_unlock(&ha->optrom_mutex);
1505 return rval;
1506 }
1507
1508 /* Set the isp82xx_no_md_cap not to capture minidump */
1509 ha->flags.isp82xx_no_md_cap = 1;
1510
1511 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1512 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1513 ha->optrom_region_size);
1514
1515 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1516 ha->optrom_region_start, ha->optrom_region_size);
1517
1518 bsg_reply->result = DID_OK;
1519 vfree(ha->optrom_buffer);
1520 ha->optrom_buffer = NULL;
1521 ha->optrom_state = QLA_SWAITING;
1522 mutex_unlock(&ha->optrom_mutex);
1523 bsg_job_done(bsg_job, bsg_reply->result,
1524 bsg_reply->reply_payload_rcv_len);
1525 return rval;
1526 }
1527
1528 static int
1529 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1530 {
1531 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1532 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1533 scsi_qla_host_t *vha = shost_priv(host);
1534 struct qla_hw_data *ha = vha->hw;
1535 int rval = 0;
1536 uint8_t bsg[DMA_POOL_SIZE];
1537 struct qla_image_version_list *list = (void *)bsg;
1538 struct qla_image_version *image;
1539 uint32_t count;
1540 dma_addr_t sfp_dma;
1541 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1542 if (!sfp) {
1543 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1544 EXT_STATUS_NO_MEMORY;
1545 goto done;
1546 }
1547
1548 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1549 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1550
1551 image = list->version;
1552 count = list->count;
1553 while (count--) {
1554 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1555 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1556 image->field_address.device, image->field_address.offset,
1557 sizeof(image->field_info), image->field_address.option);
1558 if (rval) {
1559 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1560 EXT_STATUS_MAILBOX;
1561 goto dealloc;
1562 }
1563 image++;
1564 }
1565
1566 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1567
1568 dealloc:
1569 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1570
1571 done:
1572 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1573 bsg_reply->result = DID_OK << 16;
1574 bsg_job_done(bsg_job, bsg_reply->result,
1575 bsg_reply->reply_payload_rcv_len);
1576
1577 return 0;
1578 }
1579
1580 static int
1581 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1582 {
1583 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1584 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1585 scsi_qla_host_t *vha = shost_priv(host);
1586 struct qla_hw_data *ha = vha->hw;
1587 int rval = 0;
1588 uint8_t bsg[DMA_POOL_SIZE];
1589 struct qla_status_reg *sr = (void *)bsg;
1590 dma_addr_t sfp_dma;
1591 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1592 if (!sfp) {
1593 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1594 EXT_STATUS_NO_MEMORY;
1595 goto done;
1596 }
1597
1598 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1599 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1600
1601 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1602 sr->field_address.device, sr->field_address.offset,
1603 sizeof(sr->status_reg), sr->field_address.option);
1604 sr->status_reg = *sfp;
1605
1606 if (rval) {
1607 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1608 EXT_STATUS_MAILBOX;
1609 goto dealloc;
1610 }
1611
1612 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1613 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1614
1615 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1616
1617 dealloc:
1618 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1619
1620 done:
1621 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1622 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1623 bsg_reply->result = DID_OK << 16;
1624 bsg_job_done(bsg_job, bsg_reply->result,
1625 bsg_reply->reply_payload_rcv_len);
1626
1627 return 0;
1628 }
1629
1630 static int
1631 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1632 {
1633 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1634 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1635 scsi_qla_host_t *vha = shost_priv(host);
1636 struct qla_hw_data *ha = vha->hw;
1637 int rval = 0;
1638 uint8_t bsg[DMA_POOL_SIZE];
1639 struct qla_status_reg *sr = (void *)bsg;
1640 dma_addr_t sfp_dma;
1641 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1642 if (!sfp) {
1643 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1644 EXT_STATUS_NO_MEMORY;
1645 goto done;
1646 }
1647
1648 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1649 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1650
1651 *sfp = sr->status_reg;
1652 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1653 sr->field_address.device, sr->field_address.offset,
1654 sizeof(sr->status_reg), sr->field_address.option);
1655
1656 if (rval) {
1657 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1658 EXT_STATUS_MAILBOX;
1659 goto dealloc;
1660 }
1661
1662 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1663
1664 dealloc:
1665 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1666
1667 done:
1668 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1669 bsg_reply->result = DID_OK << 16;
1670 bsg_job_done(bsg_job, bsg_reply->result,
1671 bsg_reply->reply_payload_rcv_len);
1672
1673 return 0;
1674 }
1675
1676 static int
1677 qla2x00_write_i2c(struct bsg_job *bsg_job)
1678 {
1679 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1680 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1681 scsi_qla_host_t *vha = shost_priv(host);
1682 struct qla_hw_data *ha = vha->hw;
1683 int rval = 0;
1684 uint8_t bsg[DMA_POOL_SIZE];
1685 struct qla_i2c_access *i2c = (void *)bsg;
1686 dma_addr_t sfp_dma;
1687 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1688 if (!sfp) {
1689 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1690 EXT_STATUS_NO_MEMORY;
1691 goto done;
1692 }
1693
1694 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1695 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1696
1697 memcpy(sfp, i2c->buffer, i2c->length);
1698 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1699 i2c->device, i2c->offset, i2c->length, i2c->option);
1700
1701 if (rval) {
1702 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1703 EXT_STATUS_MAILBOX;
1704 goto dealloc;
1705 }
1706
1707 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1708
1709 dealloc:
1710 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1711
1712 done:
1713 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1714 bsg_reply->result = DID_OK << 16;
1715 bsg_job_done(bsg_job, bsg_reply->result,
1716 bsg_reply->reply_payload_rcv_len);
1717
1718 return 0;
1719 }
1720
1721 static int
1722 qla2x00_read_i2c(struct bsg_job *bsg_job)
1723 {
1724 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1725 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1726 scsi_qla_host_t *vha = shost_priv(host);
1727 struct qla_hw_data *ha = vha->hw;
1728 int rval = 0;
1729 uint8_t bsg[DMA_POOL_SIZE];
1730 struct qla_i2c_access *i2c = (void *)bsg;
1731 dma_addr_t sfp_dma;
1732 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1733 if (!sfp) {
1734 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1735 EXT_STATUS_NO_MEMORY;
1736 goto done;
1737 }
1738
1739 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1740 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1741
1742 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1743 i2c->device, i2c->offset, i2c->length, i2c->option);
1744
1745 if (rval) {
1746 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1747 EXT_STATUS_MAILBOX;
1748 goto dealloc;
1749 }
1750
1751 memcpy(i2c->buffer, sfp, i2c->length);
1752 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1753 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1754
1755 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1756
1757 dealloc:
1758 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1759
1760 done:
1761 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1762 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1763 bsg_reply->result = DID_OK << 16;
1764 bsg_job_done(bsg_job, bsg_reply->result,
1765 bsg_reply->reply_payload_rcv_len);
1766
1767 return 0;
1768 }
1769
1770 static int
1771 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1772 {
1773 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1774 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1775 scsi_qla_host_t *vha = shost_priv(host);
1776 struct qla_hw_data *ha = vha->hw;
1777 uint32_t rval = EXT_STATUS_OK;
1778 uint16_t req_sg_cnt = 0;
1779 uint16_t rsp_sg_cnt = 0;
1780 uint16_t nextlid = 0;
1781 uint32_t tot_dsds;
1782 srb_t *sp = NULL;
1783 uint32_t req_data_len = 0;
1784 uint32_t rsp_data_len = 0;
1785
1786 /* Check the type of the adapter */
1787 if (!IS_BIDI_CAPABLE(ha)) {
1788 ql_log(ql_log_warn, vha, 0x70a0,
1789 "This adapter is not supported\n");
1790 rval = EXT_STATUS_NOT_SUPPORTED;
1791 goto done;
1792 }
1793
1794 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1795 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1796 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1797 rval = EXT_STATUS_BUSY;
1798 goto done;
1799 }
1800
1801 /* Check if host is online */
1802 if (!vha->flags.online) {
1803 ql_log(ql_log_warn, vha, 0x70a1,
1804 "Host is not online\n");
1805 rval = EXT_STATUS_DEVICE_OFFLINE;
1806 goto done;
1807 }
1808
1809 /* Check if cable is plugged in or not */
1810 if (vha->device_flags & DFLG_NO_CABLE) {
1811 ql_log(ql_log_warn, vha, 0x70a2,
1812 "Cable is unplugged...\n");
1813 rval = EXT_STATUS_INVALID_CFG;
1814 goto done;
1815 }
1816
1817 /* Check if the switch is connected or not */
1818 if (ha->current_topology != ISP_CFG_F) {
1819 ql_log(ql_log_warn, vha, 0x70a3,
1820 "Host is not connected to the switch\n");
1821 rval = EXT_STATUS_INVALID_CFG;
1822 goto done;
1823 }
1824
1825 /* Check if operating mode is P2P */
1826 if (ha->operating_mode != P2P) {
1827 ql_log(ql_log_warn, vha, 0x70a4,
1828 "Host is operating mode is not P2p\n");
1829 rval = EXT_STATUS_INVALID_CFG;
1830 goto done;
1831 }
1832
1833 mutex_lock(&ha->selflogin_lock);
1834 if (vha->self_login_loop_id == 0) {
1835 /* Initialize all required fields of fcport */
1836 vha->bidir_fcport.vha = vha;
1837 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1838 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1839 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1840 vha->bidir_fcport.loop_id = vha->loop_id;
1841
1842 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1843 ql_log(ql_log_warn, vha, 0x70a7,
1844 "Failed to login port %06X for bidirectional IOCB\n",
1845 vha->bidir_fcport.d_id.b24);
1846 mutex_unlock(&ha->selflogin_lock);
1847 rval = EXT_STATUS_MAILBOX;
1848 goto done;
1849 }
1850 vha->self_login_loop_id = nextlid - 1;
1851
1852 }
1853 /* Assign the self login loop id to fcport */
1854 mutex_unlock(&ha->selflogin_lock);
1855
1856 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1857
1858 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1859 bsg_job->request_payload.sg_list,
1860 bsg_job->request_payload.sg_cnt,
1861 DMA_TO_DEVICE);
1862
1863 if (!req_sg_cnt) {
1864 rval = EXT_STATUS_NO_MEMORY;
1865 goto done;
1866 }
1867
1868 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1869 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1870 DMA_FROM_DEVICE);
1871
1872 if (!rsp_sg_cnt) {
1873 rval = EXT_STATUS_NO_MEMORY;
1874 goto done_unmap_req_sg;
1875 }
1876
1877 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1878 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1879 ql_dbg(ql_dbg_user, vha, 0x70a9,
1880 "Dma mapping resulted in different sg counts "
1881 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1882 "%x dma_reply_sg_cnt: %x]\n",
1883 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1884 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1885 rval = EXT_STATUS_NO_MEMORY;
1886 goto done_unmap_sg;
1887 }
1888
1889 if (req_data_len != rsp_data_len) {
1890 rval = EXT_STATUS_BUSY;
1891 ql_log(ql_log_warn, vha, 0x70aa,
1892 "req_data_len != rsp_data_len\n");
1893 goto done_unmap_sg;
1894 }
1895
1896 req_data_len = bsg_job->request_payload.payload_len;
1897 rsp_data_len = bsg_job->reply_payload.payload_len;
1898
1899
1900 /* Alloc SRB structure */
1901 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1902 if (!sp) {
1903 ql_dbg(ql_dbg_user, vha, 0x70ac,
1904 "Alloc SRB structure failed\n");
1905 rval = EXT_STATUS_NO_MEMORY;
1906 goto done_unmap_sg;
1907 }
1908
1909 /*Populate srb->ctx with bidir ctx*/
1910 sp->u.bsg_job = bsg_job;
1911 sp->free = qla2x00_bsg_sp_free;
1912 sp->type = SRB_BIDI_CMD;
1913 sp->done = qla2x00_bsg_job_done;
1914
1915 /* Add the read and write sg count */
1916 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1917
1918 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1919 if (rval != EXT_STATUS_OK)
1920 goto done_free_srb;
1921 /* the bsg request will be completed in the interrupt handler */
1922 return rval;
1923
1924 done_free_srb:
1925 mempool_free(sp, ha->srb_mempool);
1926 done_unmap_sg:
1927 dma_unmap_sg(&ha->pdev->dev,
1928 bsg_job->reply_payload.sg_list,
1929 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1930 done_unmap_req_sg:
1931 dma_unmap_sg(&ha->pdev->dev,
1932 bsg_job->request_payload.sg_list,
1933 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1934 done:
1935
1936 /* Return an error vendor specific response
1937 * and complete the bsg request
1938 */
1939 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1940 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1941 bsg_reply->reply_payload_rcv_len = 0;
1942 bsg_reply->result = (DID_OK) << 16;
1943 bsg_job_done(bsg_job, bsg_reply->result,
1944 bsg_reply->reply_payload_rcv_len);
1945 /* Always return success, vendor rsp carries correct status */
1946 return 0;
1947 }
1948
1949 static int
1950 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1951 {
1952 struct fc_bsg_request *bsg_request = bsg_job->request;
1953 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1954 scsi_qla_host_t *vha = shost_priv(host);
1955 struct qla_hw_data *ha = vha->hw;
1956 int rval = (DRIVER_ERROR << 16);
1957 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1958 srb_t *sp;
1959 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1960 struct fc_port *fcport;
1961 char *type = "FC_BSG_HST_FX_MGMT";
1962
1963 /* Copy the IOCB specific information */
1964 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1965 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1966
1967 /* Dump the vendor information */
1968 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1969 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1970
1971 if (!vha->flags.online) {
1972 ql_log(ql_log_warn, vha, 0x70d0,
1973 "Host is not online.\n");
1974 rval = -EIO;
1975 goto done;
1976 }
1977
1978 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1979 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1980 bsg_job->request_payload.sg_list,
1981 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1982 if (!req_sg_cnt) {
1983 ql_log(ql_log_warn, vha, 0x70c7,
1984 "dma_map_sg return %d for request\n", req_sg_cnt);
1985 rval = -ENOMEM;
1986 goto done;
1987 }
1988 }
1989
1990 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1991 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1992 bsg_job->reply_payload.sg_list,
1993 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1994 if (!rsp_sg_cnt) {
1995 ql_log(ql_log_warn, vha, 0x70c8,
1996 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1997 rval = -ENOMEM;
1998 goto done_unmap_req_sg;
1999 }
2000 }
2001
2002 ql_dbg(ql_dbg_user, vha, 0x70c9,
2003 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2004 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2005 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2006
2007 /* Allocate a dummy fcport structure, since functions preparing the
2008 * IOCB and mailbox command retrieves port specific information
2009 * from fcport structure. For Host based ELS commands there will be
2010 * no fcport structure allocated
2011 */
2012 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2013 if (!fcport) {
2014 ql_log(ql_log_warn, vha, 0x70ca,
2015 "Failed to allocate fcport.\n");
2016 rval = -ENOMEM;
2017 goto done_unmap_rsp_sg;
2018 }
2019
2020 /* Alloc SRB structure */
2021 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2022 if (!sp) {
2023 ql_log(ql_log_warn, vha, 0x70cb,
2024 "qla2x00_get_sp failed.\n");
2025 rval = -ENOMEM;
2026 goto done_free_fcport;
2027 }
2028
2029 /* Initialize all required fields of fcport */
2030 fcport->vha = vha;
2031 fcport->loop_id = piocb_rqst->dataword;
2032
2033 sp->type = SRB_FXIOCB_BCMD;
2034 sp->name = "bsg_fx_mgmt";
2035 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2036 sp->u.bsg_job = bsg_job;
2037 sp->free = qla2x00_bsg_sp_free;
2038 sp->done = qla2x00_bsg_job_done;
2039
2040 ql_dbg(ql_dbg_user, vha, 0x70cc,
2041 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2042 type, piocb_rqst->func_type, fcport->loop_id);
2043
2044 rval = qla2x00_start_sp(sp);
2045 if (rval != QLA_SUCCESS) {
2046 ql_log(ql_log_warn, vha, 0x70cd,
2047 "qla2x00_start_sp failed=%d.\n", rval);
2048 mempool_free(sp, ha->srb_mempool);
2049 rval = -EIO;
2050 goto done_free_fcport;
2051 }
2052 return rval;
2053
2054 done_free_fcport:
2055 kfree(fcport);
2056
2057 done_unmap_rsp_sg:
2058 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2059 dma_unmap_sg(&ha->pdev->dev,
2060 bsg_job->reply_payload.sg_list,
2061 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2062 done_unmap_req_sg:
2063 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2064 dma_unmap_sg(&ha->pdev->dev,
2065 bsg_job->request_payload.sg_list,
2066 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2067
2068 done:
2069 return rval;
2070 }
2071
2072 static int
2073 qla26xx_serdes_op(struct bsg_job *bsg_job)
2074 {
2075 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2076 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2077 scsi_qla_host_t *vha = shost_priv(host);
2078 int rval = 0;
2079 struct qla_serdes_reg sr;
2080
2081 memset(&sr, 0, sizeof(sr));
2082
2083 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2084 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2085
2086 switch (sr.cmd) {
2087 case INT_SC_SERDES_WRITE_REG:
2088 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2089 bsg_reply->reply_payload_rcv_len = 0;
2090 break;
2091 case INT_SC_SERDES_READ_REG:
2092 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2093 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2094 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2095 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2096 break;
2097 default:
2098 ql_dbg(ql_dbg_user, vha, 0x708c,
2099 "Unknown serdes cmd %x.\n", sr.cmd);
2100 rval = -EINVAL;
2101 break;
2102 }
2103
2104 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2105 rval ? EXT_STATUS_MAILBOX : 0;
2106
2107 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2108 bsg_reply->result = DID_OK << 16;
2109 bsg_job_done(bsg_job, bsg_reply->result,
2110 bsg_reply->reply_payload_rcv_len);
2111 return 0;
2112 }
2113
2114 static int
2115 qla8044_serdes_op(struct bsg_job *bsg_job)
2116 {
2117 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2118 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2119 scsi_qla_host_t *vha = shost_priv(host);
2120 int rval = 0;
2121 struct qla_serdes_reg_ex sr;
2122
2123 memset(&sr, 0, sizeof(sr));
2124
2125 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2126 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2127
2128 switch (sr.cmd) {
2129 case INT_SC_SERDES_WRITE_REG:
2130 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2131 bsg_reply->reply_payload_rcv_len = 0;
2132 break;
2133 case INT_SC_SERDES_READ_REG:
2134 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2135 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2136 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2137 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2138 break;
2139 default:
2140 ql_dbg(ql_dbg_user, vha, 0x70cf,
2141 "Unknown serdes cmd %x.\n", sr.cmd);
2142 rval = -EINVAL;
2143 break;
2144 }
2145
2146 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2147 rval ? EXT_STATUS_MAILBOX : 0;
2148
2149 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2150 bsg_reply->result = DID_OK << 16;
2151 bsg_job_done(bsg_job, bsg_reply->result,
2152 bsg_reply->reply_payload_rcv_len);
2153 return 0;
2154 }
2155
2156 static int
2157 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2158 {
2159 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2160 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2161 scsi_qla_host_t *vha = shost_priv(host);
2162 struct qla_hw_data *ha = vha->hw;
2163 struct qla_flash_update_caps cap;
2164
2165 if (!(IS_QLA27XX(ha)))
2166 return -EPERM;
2167
2168 memset(&cap, 0, sizeof(cap));
2169 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2170 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2171 (uint64_t)ha->fw_attributes_h << 16 |
2172 (uint64_t)ha->fw_attributes;
2173
2174 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2175 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2176 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2177
2178 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2179 EXT_STATUS_OK;
2180
2181 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2182 bsg_reply->result = DID_OK << 16;
2183 bsg_job_done(bsg_job, bsg_reply->result,
2184 bsg_reply->reply_payload_rcv_len);
2185 return 0;
2186 }
2187
2188 static int
2189 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2190 {
2191 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2192 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2193 scsi_qla_host_t *vha = shost_priv(host);
2194 struct qla_hw_data *ha = vha->hw;
2195 uint64_t online_fw_attr = 0;
2196 struct qla_flash_update_caps cap;
2197
2198 if (!(IS_QLA27XX(ha)))
2199 return -EPERM;
2200
2201 memset(&cap, 0, sizeof(cap));
2202 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2203 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2204
2205 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2206 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2207 (uint64_t)ha->fw_attributes_h << 16 |
2208 (uint64_t)ha->fw_attributes;
2209
2210 if (online_fw_attr != cap.capabilities) {
2211 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2212 EXT_STATUS_INVALID_PARAM;
2213 return -EINVAL;
2214 }
2215
2216 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2217 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2218 EXT_STATUS_INVALID_PARAM;
2219 return -EINVAL;
2220 }
2221
2222 bsg_reply->reply_payload_rcv_len = 0;
2223
2224 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2225 EXT_STATUS_OK;
2226
2227 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2228 bsg_reply->result = DID_OK << 16;
2229 bsg_job_done(bsg_job, bsg_reply->result,
2230 bsg_reply->reply_payload_rcv_len);
2231 return 0;
2232 }
2233
2234 static int
2235 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2236 {
2237 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2238 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2239 scsi_qla_host_t *vha = shost_priv(host);
2240 struct qla_hw_data *ha = vha->hw;
2241 struct qla_bbcr_data bbcr;
2242 uint16_t loop_id, topo, sw_cap;
2243 uint8_t domain, area, al_pa, state;
2244 int rval;
2245
2246 if (!(IS_QLA27XX(ha)))
2247 return -EPERM;
2248
2249 memset(&bbcr, 0, sizeof(bbcr));
2250
2251 if (vha->flags.bbcr_enable)
2252 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2253 else
2254 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2255
2256 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2257 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2258 &area, &domain, &topo, &sw_cap);
2259 if (rval != QLA_SUCCESS) {
2260 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2261 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2262 bbcr.mbx1 = loop_id;
2263 goto done;
2264 }
2265
2266 state = (vha->bbcr >> 12) & 0x1;
2267
2268 if (state) {
2269 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2270 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2271 } else {
2272 bbcr.state = QLA_BBCR_STATE_ONLINE;
2273 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2274 }
2275
2276 bbcr.configured_bbscn = vha->bbcr & 0xf;
2277 }
2278
2279 done:
2280 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2281 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2282 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2283
2284 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2285
2286 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2287 bsg_reply->result = DID_OK << 16;
2288 bsg_job_done(bsg_job, bsg_reply->result,
2289 bsg_reply->reply_payload_rcv_len);
2290 return 0;
2291 }
2292
2293 static int
2294 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2295 {
2296 struct fc_bsg_request *bsg_request = bsg_job->request;
2297 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2298 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2299 scsi_qla_host_t *vha = shost_priv(host);
2300 struct qla_hw_data *ha = vha->hw;
2301 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2302 struct link_statistics *stats = NULL;
2303 dma_addr_t stats_dma;
2304 int rval;
2305 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2306 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2307
2308 if (test_bit(UNLOADING, &vha->dpc_flags))
2309 return -ENODEV;
2310
2311 if (unlikely(pci_channel_offline(ha->pdev)))
2312 return -ENODEV;
2313
2314 if (qla2x00_reset_active(vha))
2315 return -EBUSY;
2316
2317 if (!IS_FWI2_CAPABLE(ha))
2318 return -EPERM;
2319
2320 stats = dma_alloc_coherent(&ha->pdev->dev,
2321 sizeof(*stats), &stats_dma, GFP_KERNEL);
2322 if (!stats) {
2323 ql_log(ql_log_warn, vha, 0x70e2,
2324 "Failed to allocate memory for stats.\n");
2325 return -ENOMEM;
2326 }
2327
2328 memset(stats, 0, sizeof(*stats));
2329
2330 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2331
2332 if (rval == QLA_SUCCESS) {
2333 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
2334 (uint8_t *)stats, sizeof(*stats));
2335 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2336 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2337 }
2338
2339 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2340 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2341 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2342
2343 bsg_job->reply_len = sizeof(*bsg_reply);
2344 bsg_reply->result = DID_OK << 16;
2345 bsg_job_done(bsg_job, bsg_reply->result,
2346 bsg_reply->reply_payload_rcv_len);
2347
2348 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2349 stats, stats_dma);
2350
2351 return 0;
2352 }
2353
2354 static int
2355 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2356 {
2357 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2358 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2359 scsi_qla_host_t *vha = shost_priv(host);
2360 int rval;
2361 struct qla_dport_diag *dd;
2362
2363 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
2364 return -EPERM;
2365
2366 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2367 if (!dd) {
2368 ql_log(ql_log_warn, vha, 0x70db,
2369 "Failed to allocate memory for dport.\n");
2370 return -ENOMEM;
2371 }
2372
2373 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2374 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2375
2376 rval = qla26xx_dport_diagnostics(
2377 vha, dd->buf, sizeof(dd->buf), dd->options);
2378 if (rval == QLA_SUCCESS) {
2379 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2380 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2381 }
2382
2383 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2384 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2385 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2386
2387 bsg_job->reply_len = sizeof(*bsg_reply);
2388 bsg_reply->result = DID_OK << 16;
2389 bsg_job_done(bsg_job, bsg_reply->result,
2390 bsg_reply->reply_payload_rcv_len);
2391
2392 kfree(dd);
2393
2394 return 0;
2395 }
2396
2397 static int
2398 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2399 {
2400 struct fc_bsg_request *bsg_request = bsg_job->request;
2401
2402 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2403 case QL_VND_LOOPBACK:
2404 return qla2x00_process_loopback(bsg_job);
2405
2406 case QL_VND_A84_RESET:
2407 return qla84xx_reset(bsg_job);
2408
2409 case QL_VND_A84_UPDATE_FW:
2410 return qla84xx_updatefw(bsg_job);
2411
2412 case QL_VND_A84_MGMT_CMD:
2413 return qla84xx_mgmt_cmd(bsg_job);
2414
2415 case QL_VND_IIDMA:
2416 return qla24xx_iidma(bsg_job);
2417
2418 case QL_VND_FCP_PRIO_CFG_CMD:
2419 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2420
2421 case QL_VND_READ_FLASH:
2422 return qla2x00_read_optrom(bsg_job);
2423
2424 case QL_VND_UPDATE_FLASH:
2425 return qla2x00_update_optrom(bsg_job);
2426
2427 case QL_VND_SET_FRU_VERSION:
2428 return qla2x00_update_fru_versions(bsg_job);
2429
2430 case QL_VND_READ_FRU_STATUS:
2431 return qla2x00_read_fru_status(bsg_job);
2432
2433 case QL_VND_WRITE_FRU_STATUS:
2434 return qla2x00_write_fru_status(bsg_job);
2435
2436 case QL_VND_WRITE_I2C:
2437 return qla2x00_write_i2c(bsg_job);
2438
2439 case QL_VND_READ_I2C:
2440 return qla2x00_read_i2c(bsg_job);
2441
2442 case QL_VND_DIAG_IO_CMD:
2443 return qla24xx_process_bidir_cmd(bsg_job);
2444
2445 case QL_VND_FX00_MGMT_CMD:
2446 return qlafx00_mgmt_cmd(bsg_job);
2447
2448 case QL_VND_SERDES_OP:
2449 return qla26xx_serdes_op(bsg_job);
2450
2451 case QL_VND_SERDES_OP_EX:
2452 return qla8044_serdes_op(bsg_job);
2453
2454 case QL_VND_GET_FLASH_UPDATE_CAPS:
2455 return qla27xx_get_flash_upd_cap(bsg_job);
2456
2457 case QL_VND_SET_FLASH_UPDATE_CAPS:
2458 return qla27xx_set_flash_upd_cap(bsg_job);
2459
2460 case QL_VND_GET_BBCR_DATA:
2461 return qla27xx_get_bbcr_data(bsg_job);
2462
2463 case QL_VND_GET_PRIV_STATS:
2464 case QL_VND_GET_PRIV_STATS_EX:
2465 return qla2x00_get_priv_stats(bsg_job);
2466
2467 case QL_VND_DPORT_DIAGNOSTICS:
2468 return qla2x00_do_dport_diagnostics(bsg_job);
2469
2470 default:
2471 return -ENOSYS;
2472 }
2473 }
2474
2475 int
2476 qla24xx_bsg_request(struct bsg_job *bsg_job)
2477 {
2478 struct fc_bsg_request *bsg_request = bsg_job->request;
2479 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2480 int ret = -EINVAL;
2481 struct fc_rport *rport;
2482 struct Scsi_Host *host;
2483 scsi_qla_host_t *vha;
2484
2485 /* In case no data transferred. */
2486 bsg_reply->reply_payload_rcv_len = 0;
2487
2488 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2489 rport = fc_bsg_to_rport(bsg_job);
2490 host = rport_to_shost(rport);
2491 vha = shost_priv(host);
2492 } else {
2493 host = fc_bsg_to_shost(bsg_job);
2494 vha = shost_priv(host);
2495 }
2496
2497 if (qla2x00_reset_active(vha)) {
2498 ql_dbg(ql_dbg_user, vha, 0x709f,
2499 "BSG: ISP abort active/needed -- cmd=%d.\n",
2500 bsg_request->msgcode);
2501 return -EBUSY;
2502 }
2503
2504 ql_dbg(ql_dbg_user, vha, 0x7000,
2505 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2506
2507 switch (bsg_request->msgcode) {
2508 case FC_BSG_RPT_ELS:
2509 case FC_BSG_HST_ELS_NOLOGIN:
2510 ret = qla2x00_process_els(bsg_job);
2511 break;
2512 case FC_BSG_HST_CT:
2513 ret = qla2x00_process_ct(bsg_job);
2514 break;
2515 case FC_BSG_HST_VENDOR:
2516 ret = qla2x00_process_vendor_specific(bsg_job);
2517 break;
2518 case FC_BSG_HST_ADD_RPORT:
2519 case FC_BSG_HST_DEL_RPORT:
2520 case FC_BSG_RPT_CT:
2521 default:
2522 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2523 break;
2524 }
2525 return ret;
2526 }
2527
2528 int
2529 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2530 {
2531 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2532 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2533 struct qla_hw_data *ha = vha->hw;
2534 srb_t *sp;
2535 int cnt, que;
2536 unsigned long flags;
2537 struct req_que *req;
2538
2539 /* find the bsg job from the active list of commands */
2540 spin_lock_irqsave(&ha->hardware_lock, flags);
2541 for (que = 0; que < ha->max_req_queues; que++) {
2542 req = ha->req_q_map[que];
2543 if (!req)
2544 continue;
2545
2546 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2547 sp = req->outstanding_cmds[cnt];
2548 if (sp) {
2549 if (((sp->type == SRB_CT_CMD) ||
2550 (sp->type == SRB_ELS_CMD_HST) ||
2551 (sp->type == SRB_FXIOCB_BCMD))
2552 && (sp->u.bsg_job == bsg_job)) {
2553 req->outstanding_cmds[cnt] = NULL;
2554 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2555 if (ha->isp_ops->abort_command(sp)) {
2556 ql_log(ql_log_warn, vha, 0x7089,
2557 "mbx abort_command "
2558 "failed.\n");
2559 bsg_job->req->errors =
2560 bsg_reply->result = -EIO;
2561 } else {
2562 ql_dbg(ql_dbg_user, vha, 0x708a,
2563 "mbx abort_command "
2564 "success.\n");
2565 bsg_job->req->errors =
2566 bsg_reply->result = 0;
2567 }
2568 spin_lock_irqsave(&ha->hardware_lock, flags);
2569 goto done;
2570 }
2571 }
2572 }
2573 }
2574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2575 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2576 bsg_job->req->errors = bsg_reply->result = -ENXIO;
2577 return 0;
2578
2579 done:
2580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2581 sp->free(vha, sp);
2582 return 0;
2583 }