]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/qla2xxx/qla_attr.c
[SCSI] qla2xxx: Disable MSI on qla24xx chips other than QLA2432.
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / qla2xxx / qla_attr.c
CommitLineData
8482e118 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
01e58d8e 3 * Copyright (c) 2003-2008 QLogic Corporation
8482e118 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
8482e118
AV
6 */
7#include "qla_def.h"
8
2c3dfe3f 9#include <linux/kthread.h>
7aaef27b 10#include <linux/vmalloc.h>
00eabe7c 11#include <linux/delay.h>
8482e118 12
a824ebb3 13static int qla24xx_vport_disable(struct fc_vport *, bool);
9a069e19
GM
14static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
15int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
16static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
8482e118
AV
17/* SYSFS attributes --------------------------------------------------------- */
18
19static ssize_t
91a69029
ZR
20qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
21 struct bin_attribute *bin_attr,
22 char *buf, loff_t off, size_t count)
8482e118 23{
7b867cf7 24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 25 struct device, kobj)));
7b867cf7 26 struct qla_hw_data *ha = vha->hw;
8482e118
AV
27
28 if (ha->fw_dump_reading == 0)
29 return 0;
8482e118 30
b3dc9088
AM
31 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
32 ha->fw_dump_len);
8482e118
AV
33}
34
35static ssize_t
91a69029
ZR
36qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
37 struct bin_attribute *bin_attr,
38 char *buf, loff_t off, size_t count)
8482e118 39{
7b867cf7 40 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 41 struct device, kobj)));
7b867cf7 42 struct qla_hw_data *ha = vha->hw;
8482e118 43 int reading;
8482e118
AV
44
45 if (off != 0)
46 return (0);
47
48 reading = simple_strtol(buf, NULL, 10);
49 switch (reading) {
50 case 0:
a7a167bf
AV
51 if (!ha->fw_dump_reading)
52 break;
8482e118 53
a7a167bf 54 qla_printk(KERN_INFO, ha,
7b867cf7 55 "Firmware dump cleared on (%ld).\n", vha->host_no);
a7a167bf
AV
56
57 ha->fw_dump_reading = 0;
58 ha->fw_dumped = 0;
8482e118
AV
59 break;
60 case 1:
d4e3e04d 61 if (ha->fw_dumped && !ha->fw_dump_reading) {
8482e118
AV
62 ha->fw_dump_reading = 1;
63
8482e118 64 qla_printk(KERN_INFO, ha,
a7a167bf 65 "Raw firmware dump ready for read on (%ld).\n",
7b867cf7 66 vha->host_no);
8482e118
AV
67 }
68 break;
a7a167bf 69 case 2:
7b867cf7 70 qla2x00_alloc_fw_dump(vha);
a7a167bf 71 break;
68af0811 72 case 3:
7b867cf7 73 qla2x00_system_error(vha);
68af0811 74 break;
8482e118
AV
75 }
76 return (count);
77}
78
79static struct bin_attribute sysfs_fw_dump_attr = {
80 .attr = {
81 .name = "fw_dump",
82 .mode = S_IRUSR | S_IWUSR,
8482e118
AV
83 },
84 .size = 0,
85 .read = qla2x00_sysfs_read_fw_dump,
86 .write = qla2x00_sysfs_write_fw_dump,
87};
88
89static ssize_t
91a69029
ZR
90qla2x00_sysfs_read_nvram(struct kobject *kobj,
91 struct bin_attribute *bin_attr,
92 char *buf, loff_t off, size_t count)
8482e118 93{
7b867cf7 94 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 95 struct device, kobj)));
7b867cf7 96 struct qla_hw_data *ha = vha->hw;
8482e118 97
b3dc9088 98 if (!capable(CAP_SYS_ADMIN))
8482e118
AV
99 return 0;
100
6749ce36 101 if (IS_NOCACHE_VPD_TYPE(ha))
8f979751 102 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
6749ce36 103 ha->nvram_size);
b3dc9088
AM
104 return memory_read_from_buffer(buf, count, &off, ha->nvram,
105 ha->nvram_size);
8482e118
AV
106}
107
108static ssize_t
91a69029
ZR
109qla2x00_sysfs_write_nvram(struct kobject *kobj,
110 struct bin_attribute *bin_attr,
111 char *buf, loff_t off, size_t count)
8482e118 112{
7b867cf7 113 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 114 struct device, kobj)));
7b867cf7 115 struct qla_hw_data *ha = vha->hw;
8482e118 116 uint16_t cnt;
8482e118 117
3d79038f
AV
118 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
119 !ha->isp_ops->write_nvram)
8482e118
AV
120 return 0;
121
122 /* Checksum NVRAM. */
e428924c 123 if (IS_FWI2_CAPABLE(ha)) {
459c5378
AV
124 uint32_t *iter;
125 uint32_t chksum;
126
127 iter = (uint32_t *)buf;
128 chksum = 0;
129 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
130 chksum += le32_to_cpu(*iter++);
131 chksum = ~chksum + 1;
132 *iter = cpu_to_le32(chksum);
133 } else {
134 uint8_t *iter;
135 uint8_t chksum;
136
137 iter = (uint8_t *)buf;
138 chksum = 0;
139 for (cnt = 0; cnt < count - 1; cnt++)
140 chksum += *iter++;
141 chksum = ~chksum + 1;
142 *iter = chksum;
143 }
8482e118 144
2533cf67
LC
145 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
146 qla_printk(KERN_WARNING, ha,
147 "HBA not online, failing NVRAM update.\n");
148 return -EAGAIN;
149 }
150
8482e118 151 /* Write NVRAM. */
7b867cf7
AC
152 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
153 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
281afe19 154 count);
8482e118 155
2533cf67 156 /* NVRAM settings take effect immediately. */
7b867cf7 157 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2533cf67
LC
158 qla2xxx_wake_dpc(vha);
159 qla2x00_wait_for_chip_reset(vha);
26b8d348 160
8482e118
AV
161 return (count);
162}
163
164static struct bin_attribute sysfs_nvram_attr = {
165 .attr = {
166 .name = "nvram",
167 .mode = S_IRUSR | S_IWUSR,
8482e118 168 },
1b3f6365 169 .size = 512,
8482e118
AV
170 .read = qla2x00_sysfs_read_nvram,
171 .write = qla2x00_sysfs_write_nvram,
172};
173
854165f4 174static ssize_t
91a69029
ZR
175qla2x00_sysfs_read_optrom(struct kobject *kobj,
176 struct bin_attribute *bin_attr,
177 char *buf, loff_t off, size_t count)
854165f4 178{
7b867cf7 179 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
854165f4 180 struct device, kobj)));
7b867cf7 181 struct qla_hw_data *ha = vha->hw;
854165f4
AV
182
183 if (ha->optrom_state != QLA_SREADING)
184 return 0;
854165f4 185
b3dc9088
AM
186 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
187 ha->optrom_region_size);
854165f4
AV
188}
189
190static ssize_t
91a69029
ZR
191qla2x00_sysfs_write_optrom(struct kobject *kobj,
192 struct bin_attribute *bin_attr,
193 char *buf, loff_t off, size_t count)
854165f4 194{
7b867cf7 195 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
854165f4 196 struct device, kobj)));
7b867cf7 197 struct qla_hw_data *ha = vha->hw;
854165f4
AV
198
199 if (ha->optrom_state != QLA_SWRITING)
200 return -EINVAL;
b7cc176c 201 if (off > ha->optrom_region_size)
854165f4 202 return -ERANGE;
b7cc176c
JC
203 if (off + count > ha->optrom_region_size)
204 count = ha->optrom_region_size - off;
854165f4
AV
205
206 memcpy(&ha->optrom_buffer[off], buf, count);
207
208 return count;
209}
210
211static struct bin_attribute sysfs_optrom_attr = {
212 .attr = {
213 .name = "optrom",
214 .mode = S_IRUSR | S_IWUSR,
854165f4 215 },
c3a2f0df 216 .size = 0,
854165f4
AV
217 .read = qla2x00_sysfs_read_optrom,
218 .write = qla2x00_sysfs_write_optrom,
219};
220
221static ssize_t
91a69029
ZR
222qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
223 struct bin_attribute *bin_attr,
224 char *buf, loff_t off, size_t count)
854165f4 225{
7b867cf7 226 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
854165f4 227 struct device, kobj)));
7b867cf7
AC
228 struct qla_hw_data *ha = vha->hw;
229
b7cc176c
JC
230 uint32_t start = 0;
231 uint32_t size = ha->optrom_size;
232 int val, valid;
854165f4
AV
233
234 if (off)
235 return 0;
236
85880801
AV
237 if (unlikely(pci_channel_offline(ha->pdev)))
238 return 0;
239
b7cc176c
JC
240 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
241 return -EINVAL;
242 if (start > ha->optrom_size)
854165f4
AV
243 return -EINVAL;
244
245 switch (val) {
246 case 0:
247 if (ha->optrom_state != QLA_SREADING &&
248 ha->optrom_state != QLA_SWRITING)
249 break;
250
251 ha->optrom_state = QLA_SWAITING;
b7cc176c
JC
252
253 DEBUG2(qla_printk(KERN_INFO, ha,
254 "Freeing flash region allocation -- 0x%x bytes.\n",
255 ha->optrom_region_size));
256
854165f4
AV
257 vfree(ha->optrom_buffer);
258 ha->optrom_buffer = NULL;
259 break;
260 case 1:
261 if (ha->optrom_state != QLA_SWAITING)
262 break;
263
b7cc176c
JC
264 ha->optrom_region_start = start;
265 ha->optrom_region_size = start + size > ha->optrom_size ?
266 ha->optrom_size - start : size;
267
854165f4 268 ha->optrom_state = QLA_SREADING;
b7cc176c 269 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
854165f4
AV
270 if (ha->optrom_buffer == NULL) {
271 qla_printk(KERN_WARNING, ha,
272 "Unable to allocate memory for optrom retrieval "
b7cc176c 273 "(%x).\n", ha->optrom_region_size);
854165f4
AV
274
275 ha->optrom_state = QLA_SWAITING;
276 return count;
277 }
278
b7cc176c
JC
279 DEBUG2(qla_printk(KERN_INFO, ha,
280 "Reading flash region -- 0x%x/0x%x.\n",
281 ha->optrom_region_start, ha->optrom_region_size));
282
283 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
7b867cf7 284 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
b7cc176c 285 ha->optrom_region_start, ha->optrom_region_size);
854165f4
AV
286 break;
287 case 2:
288 if (ha->optrom_state != QLA_SWAITING)
289 break;
290
b7cc176c
JC
291 /*
292 * We need to be more restrictive on which FLASH regions are
293 * allowed to be updated via user-space. Regions accessible
294 * via this method include:
295 *
296 * ISP21xx/ISP22xx/ISP23xx type boards:
297 *
298 * 0x000000 -> 0x020000 -- Boot code.
299 *
300 * ISP2322/ISP24xx type boards:
301 *
302 * 0x000000 -> 0x07ffff -- Boot code.
303 * 0x080000 -> 0x0fffff -- Firmware.
304 *
305 * ISP25xx type boards:
306 *
307 * 0x000000 -> 0x07ffff -- Boot code.
308 * 0x080000 -> 0x0fffff -- Firmware.
309 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
310 */
311 valid = 0;
312 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
313 valid = 1;
c00d8994
AV
314 else if (start == (ha->flt_region_boot * 4) ||
315 start == (ha->flt_region_fw * 4))
b7cc176c 316 valid = 1;
6431c5dc 317 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
b7cc176c
JC
318 valid = 1;
319 if (!valid) {
320 qla_printk(KERN_WARNING, ha,
321 "Invalid start region 0x%x/0x%x.\n", start, size);
322 return -EINVAL;
323 }
324
325 ha->optrom_region_start = start;
326 ha->optrom_region_size = start + size > ha->optrom_size ?
327 ha->optrom_size - start : size;
328
854165f4 329 ha->optrom_state = QLA_SWRITING;
b7cc176c 330 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
854165f4
AV
331 if (ha->optrom_buffer == NULL) {
332 qla_printk(KERN_WARNING, ha,
333 "Unable to allocate memory for optrom update "
b7cc176c 334 "(%x).\n", ha->optrom_region_size);
854165f4
AV
335
336 ha->optrom_state = QLA_SWAITING;
337 return count;
338 }
b7cc176c
JC
339
340 DEBUG2(qla_printk(KERN_INFO, ha,
341 "Staging flash region write -- 0x%x/0x%x.\n",
342 ha->optrom_region_start, ha->optrom_region_size));
343
344 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
854165f4
AV
345 break;
346 case 3:
347 if (ha->optrom_state != QLA_SWRITING)
348 break;
349
2533cf67
LC
350 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
351 qla_printk(KERN_WARNING, ha,
352 "HBA not online, failing flash update.\n");
353 return -EAGAIN;
354 }
355
b7cc176c
JC
356 DEBUG2(qla_printk(KERN_INFO, ha,
357 "Writing flash region -- 0x%x/0x%x.\n",
358 ha->optrom_region_start, ha->optrom_region_size));
359
7b867cf7 360 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
b7cc176c 361 ha->optrom_region_start, ha->optrom_region_size);
854165f4 362 break;
b7cc176c
JC
363 default:
364 count = -EINVAL;
854165f4
AV
365 }
366 return count;
367}
368
369static struct bin_attribute sysfs_optrom_ctl_attr = {
370 .attr = {
371 .name = "optrom_ctl",
372 .mode = S_IWUSR,
854165f4
AV
373 },
374 .size = 0,
375 .write = qla2x00_sysfs_write_optrom_ctl,
376};
377
6f641790 378static ssize_t
91a69029
ZR
379qla2x00_sysfs_read_vpd(struct kobject *kobj,
380 struct bin_attribute *bin_attr,
381 char *buf, loff_t off, size_t count)
6f641790 382{
7b867cf7 383 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
6f641790 384 struct device, kobj)));
7b867cf7 385 struct qla_hw_data *ha = vha->hw;
6f641790 386
85880801
AV
387 if (unlikely(pci_channel_offline(ha->pdev)))
388 return 0;
389
b3dc9088 390 if (!capable(CAP_SYS_ADMIN))
6f641790
AV
391 return 0;
392
6749ce36
AV
393 if (IS_NOCACHE_VPD_TYPE(ha))
394 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
395 ha->vpd_size);
b3dc9088 396 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
6f641790
AV
397}
398
399static ssize_t
91a69029
ZR
400qla2x00_sysfs_write_vpd(struct kobject *kobj,
401 struct bin_attribute *bin_attr,
402 char *buf, loff_t off, size_t count)
6f641790 403{
7b867cf7 404 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
6f641790 405 struct device, kobj)));
7b867cf7 406 struct qla_hw_data *ha = vha->hw;
d0c3eefa 407 uint8_t *tmp_data;
6f641790 408
85880801
AV
409 if (unlikely(pci_channel_offline(ha->pdev)))
410 return 0;
411
3d79038f
AV
412 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
413 !ha->isp_ops->write_nvram)
6f641790
AV
414 return 0;
415
2533cf67
LC
416 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
417 qla_printk(KERN_WARNING, ha,
418 "HBA not online, failing VPD update.\n");
419 return -EAGAIN;
420 }
421
6f641790 422 /* Write NVRAM. */
7b867cf7
AC
423 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
424 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
6f641790 425
d0c3eefa
LC
426 /* Update flash version information for 4Gb & above. */
427 if (!IS_FWI2_CAPABLE(ha))
428 goto done;
429
430 tmp_data = vmalloc(256);
431 if (!tmp_data) {
432 qla_printk(KERN_WARNING, ha,
433 "Unable to allocate memory for VPD information update.\n");
434 goto done;
435 }
436 ha->isp_ops->get_flash_version(vha, tmp_data);
437 vfree(tmp_data);
438done:
6f641790
AV
439 return count;
440}
441
442static struct bin_attribute sysfs_vpd_attr = {
443 .attr = {
444 .name = "vpd",
445 .mode = S_IRUSR | S_IWUSR,
6f641790
AV
446 },
447 .size = 0,
448 .read = qla2x00_sysfs_read_vpd,
449 .write = qla2x00_sysfs_write_vpd,
450};
451
88729e53 452static ssize_t
91a69029
ZR
453qla2x00_sysfs_read_sfp(struct kobject *kobj,
454 struct bin_attribute *bin_attr,
455 char *buf, loff_t off, size_t count)
88729e53 456{
7b867cf7 457 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
88729e53 458 struct device, kobj)));
7b867cf7 459 struct qla_hw_data *ha = vha->hw;
88729e53
AV
460 uint16_t iter, addr, offset;
461 int rval;
462
463 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
464 return 0;
465
e8711085
AV
466 if (ha->sfp_data)
467 goto do_read;
468
469 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
470 &ha->sfp_data_dma);
471 if (!ha->sfp_data) {
472 qla_printk(KERN_WARNING, ha,
473 "Unable to allocate memory for SFP read-data.\n");
474 return 0;
475 }
476
477do_read:
478 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
88729e53
AV
479 addr = 0xa0;
480 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
481 iter++, offset += SFP_BLOCK_SIZE) {
482 if (iter == 4) {
483 /* Skip to next device address. */
484 addr = 0xa2;
485 offset = 0;
486 }
487
7b867cf7 488 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
88729e53
AV
489 SFP_BLOCK_SIZE);
490 if (rval != QLA_SUCCESS) {
491 qla_printk(KERN_WARNING, ha,
492 "Unable to read SFP data (%x/%x/%x).\n", rval,
493 addr, offset);
494 count = 0;
495 break;
496 }
497 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
498 buf += SFP_BLOCK_SIZE;
499 }
500
501 return count;
502}
503
504static struct bin_attribute sysfs_sfp_attr = {
505 .attr = {
506 .name = "sfp",
507 .mode = S_IRUSR | S_IWUSR,
88729e53
AV
508 },
509 .size = SFP_DEV_SIZE * 2,
510 .read = qla2x00_sysfs_read_sfp,
511};
512
6e181be5
LC
513static ssize_t
514qla2x00_sysfs_write_reset(struct kobject *kobj,
515 struct bin_attribute *bin_attr,
516 char *buf, loff_t off, size_t count)
517{
518 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
519 struct device, kobj)));
520 struct qla_hw_data *ha = vha->hw;
521 int type;
522
523 if (off != 0)
524 return 0;
525
526 type = simple_strtol(buf, NULL, 10);
527 switch (type) {
528 case 0x2025c:
529 qla_printk(KERN_INFO, ha,
530 "Issuing ISP reset on (%ld).\n", vha->host_no);
531
532 scsi_block_requests(vha->host);
533 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
534 qla2xxx_wake_dpc(vha);
535 qla2x00_wait_for_chip_reset(vha);
536 scsi_unblock_requests(vha->host);
537 break;
538 case 0x2025d:
539 if (!IS_QLA81XX(ha))
540 break;
541
542 qla_printk(KERN_INFO, ha,
543 "Issuing MPI reset on (%ld).\n", vha->host_no);
544
545 /* Make sure FC side is not in reset */
546 qla2x00_wait_for_hba_online(vha);
547
548 /* Issue MPI reset */
549 scsi_block_requests(vha->host);
550 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
551 qla_printk(KERN_WARNING, ha,
552 "MPI reset failed on (%ld).\n", vha->host_no);
553 scsi_unblock_requests(vha->host);
554 break;
555 }
556 return count;
557}
558
559static struct bin_attribute sysfs_reset_attr = {
560 .attr = {
561 .name = "reset",
562 .mode = S_IWUSR,
563 },
564 .size = 0,
565 .write = qla2x00_sysfs_write_reset,
566};
567
ad0ecd61
JC
568static ssize_t
569qla2x00_sysfs_write_edc(struct kobject *kobj,
570 struct bin_attribute *bin_attr,
571 char *buf, loff_t off, size_t count)
572{
573 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
574 struct device, kobj)));
575 struct qla_hw_data *ha = vha->hw;
576 uint16_t dev, adr, opt, len;
577 int rval;
578
579 ha->edc_data_len = 0;
580
581 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
582 return 0;
583
584 if (!ha->edc_data) {
585 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
586 &ha->edc_data_dma);
587 if (!ha->edc_data) {
588 DEBUG2(qla_printk(KERN_INFO, ha,
589 "Unable to allocate memory for EDC write.\n"));
590 return 0;
591 }
592 }
593
594 dev = le16_to_cpup((void *)&buf[0]);
595 adr = le16_to_cpup((void *)&buf[2]);
596 opt = le16_to_cpup((void *)&buf[4]);
597 len = le16_to_cpup((void *)&buf[6]);
598
599 if (!(opt & BIT_0))
600 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
601 return -EINVAL;
602
603 memcpy(ha->edc_data, &buf[8], len);
604
605 rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma,
606 ha->edc_data, len, opt);
607 if (rval != QLA_SUCCESS) {
608 DEBUG2(qla_printk(KERN_INFO, ha,
609 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
610 rval, dev, adr, opt, len, *buf));
611 return 0;
612 }
613
614 return count;
615}
616
617static struct bin_attribute sysfs_edc_attr = {
618 .attr = {
619 .name = "edc",
620 .mode = S_IWUSR,
621 },
622 .size = 0,
623 .write = qla2x00_sysfs_write_edc,
624};
625
626static ssize_t
627qla2x00_sysfs_write_edc_status(struct kobject *kobj,
628 struct bin_attribute *bin_attr,
629 char *buf, loff_t off, size_t count)
630{
631 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
632 struct device, kobj)));
633 struct qla_hw_data *ha = vha->hw;
634 uint16_t dev, adr, opt, len;
635 int rval;
636
637 ha->edc_data_len = 0;
638
639 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
640 return 0;
641
642 if (!ha->edc_data) {
643 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
644 &ha->edc_data_dma);
645 if (!ha->edc_data) {
646 DEBUG2(qla_printk(KERN_INFO, ha,
647 "Unable to allocate memory for EDC status.\n"));
648 return 0;
649 }
650 }
651
652 dev = le16_to_cpup((void *)&buf[0]);
653 adr = le16_to_cpup((void *)&buf[2]);
654 opt = le16_to_cpup((void *)&buf[4]);
655 len = le16_to_cpup((void *)&buf[6]);
656
657 if (!(opt & BIT_0))
658 if (len == 0 || len > DMA_POOL_SIZE)
659 return -EINVAL;
660
661 memset(ha->edc_data, 0, len);
662 rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma,
663 ha->edc_data, len, opt);
664 if (rval != QLA_SUCCESS) {
665 DEBUG2(qla_printk(KERN_INFO, ha,
666 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
667 rval, dev, adr, opt, len));
668 return 0;
669 }
670
671 ha->edc_data_len = len;
672
673 return count;
674}
675
676static ssize_t
677qla2x00_sysfs_read_edc_status(struct kobject *kobj,
678 struct bin_attribute *bin_attr,
679 char *buf, loff_t off, size_t count)
680{
681 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
682 struct device, kobj)));
683 struct qla_hw_data *ha = vha->hw;
684
685 if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
686 return 0;
687
688 if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
689 return -EINVAL;
690
691 memcpy(buf, ha->edc_data, ha->edc_data_len);
692
693 return ha->edc_data_len;
694}
695
696static struct bin_attribute sysfs_edc_status_attr = {
697 .attr = {
698 .name = "edc_status",
699 .mode = S_IRUSR | S_IWUSR,
700 },
701 .size = 0,
702 .write = qla2x00_sysfs_write_edc_status,
703 .read = qla2x00_sysfs_read_edc_status,
704};
705
ce0423f4
AV
706static ssize_t
707qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
708 struct bin_attribute *bin_attr,
709 char *buf, loff_t off, size_t count)
710{
711 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
712 struct device, kobj)));
713 struct qla_hw_data *ha = vha->hw;
714 int rval;
715 uint16_t actual_size;
716
717 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
718 return 0;
719
720 if (ha->xgmac_data)
721 goto do_read;
722
723 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
724 &ha->xgmac_data_dma, GFP_KERNEL);
725 if (!ha->xgmac_data) {
726 qla_printk(KERN_WARNING, ha,
727 "Unable to allocate memory for XGMAC read-data.\n");
728 return 0;
729 }
730
731do_read:
732 actual_size = 0;
733 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
734
735 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
736 XGMAC_DATA_SIZE, &actual_size);
737 if (rval != QLA_SUCCESS) {
738 qla_printk(KERN_WARNING, ha,
739 "Unable to read XGMAC data (%x).\n", rval);
740 count = 0;
741 }
742
743 count = actual_size > count ? count: actual_size;
744 memcpy(buf, ha->xgmac_data, count);
745
746 return count;
747}
748
749static struct bin_attribute sysfs_xgmac_stats_attr = {
750 .attr = {
751 .name = "xgmac_stats",
752 .mode = S_IRUSR,
753 },
754 .size = 0,
755 .read = qla2x00_sysfs_read_xgmac_stats,
756};
757
11bbc1d8
AV
758static ssize_t
759qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
760 struct bin_attribute *bin_attr,
761 char *buf, loff_t off, size_t count)
762{
763 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
764 struct device, kobj)));
765 struct qla_hw_data *ha = vha->hw;
766 int rval;
767 uint16_t actual_size;
768
769 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
770 return 0;
771
772 if (ha->dcbx_tlv)
773 goto do_read;
774
775 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
776 &ha->dcbx_tlv_dma, GFP_KERNEL);
777 if (!ha->dcbx_tlv) {
778 qla_printk(KERN_WARNING, ha,
779 "Unable to allocate memory for DCBX TLV read-data.\n");
780 return 0;
781 }
782
783do_read:
784 actual_size = 0;
785 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
786
787 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
788 DCBX_TLV_DATA_SIZE);
789 if (rval != QLA_SUCCESS) {
790 qla_printk(KERN_WARNING, ha,
791 "Unable to read DCBX TLV data (%x).\n", rval);
792 count = 0;
793 }
794
795 memcpy(buf, ha->dcbx_tlv, count);
796
797 return count;
798}
799
800static struct bin_attribute sysfs_dcbx_tlv_attr = {
801 .attr = {
802 .name = "dcbx_tlv",
803 .mode = S_IRUSR,
804 },
805 .size = 0,
806 .read = qla2x00_sysfs_read_dcbx_tlv,
807};
808
f1663ad5
AV
809static struct sysfs_entry {
810 char *name;
811 struct bin_attribute *attr;
812 int is4GBp_only;
813} bin_file_entries[] = {
814 { "fw_dump", &sysfs_fw_dump_attr, },
815 { "nvram", &sysfs_nvram_attr, },
816 { "optrom", &sysfs_optrom_attr, },
817 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
818 { "vpd", &sysfs_vpd_attr, 1 },
819 { "sfp", &sysfs_sfp_attr, 1 },
6e181be5 820 { "reset", &sysfs_reset_attr, },
ad0ecd61
JC
821 { "edc", &sysfs_edc_attr, 2 },
822 { "edc_status", &sysfs_edc_status_attr, 2 },
ce0423f4 823 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
11bbc1d8 824 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
46ddab7b 825 { NULL },
f1663ad5
AV
826};
827
8482e118 828void
7b867cf7 829qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
8482e118 830{
7b867cf7 831 struct Scsi_Host *host = vha->host;
f1663ad5
AV
832 struct sysfs_entry *iter;
833 int ret;
8482e118 834
f1663ad5 835 for (iter = bin_file_entries; iter->name; iter++) {
7b867cf7 836 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
f1663ad5 837 continue;
ad0ecd61
JC
838 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
839 continue;
ce0423f4
AV
840 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
841 continue;
f1663ad5
AV
842
843 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
844 iter->attr);
845 if (ret)
7b867cf7 846 qla_printk(KERN_INFO, vha->hw,
f1663ad5
AV
847 "Unable to create sysfs %s binary attribute "
848 "(%d).\n", iter->name, ret);
7914d004 849 }
8482e118
AV
850}
851
852void
7b867cf7 853qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
8482e118 854{
7b867cf7 855 struct Scsi_Host *host = vha->host;
f1663ad5 856 struct sysfs_entry *iter;
7b867cf7 857 struct qla_hw_data *ha = vha->hw;
f1663ad5
AV
858
859 for (iter = bin_file_entries; iter->name; iter++) {
e428924c 860 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
f1663ad5 861 continue;
ad0ecd61
JC
862 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
863 continue;
ce0423f4
AV
864 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
865 continue;
8482e118 866
88729e53 867 sysfs_remove_bin_file(&host->shost_gendev.kobj,
f1663ad5 868 iter->attr);
7914d004 869 }
f6df144c
AV
870
871 if (ha->beacon_blink_led == 1)
7b867cf7 872 ha->isp_ops->beacon_off(vha);
8482e118
AV
873}
874
afb046e2
AV
875/* Scsi_Host attributes. */
876
877static ssize_t
ee959b00
TJ
878qla2x00_drvr_version_show(struct device *dev,
879 struct device_attribute *attr, char *buf)
afb046e2
AV
880{
881 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
882}
883
884static ssize_t
ee959b00
TJ
885qla2x00_fw_version_show(struct device *dev,
886 struct device_attribute *attr, char *buf)
afb046e2 887{
7b867cf7
AC
888 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
889 struct qla_hw_data *ha = vha->hw;
890 char fw_str[128];
afb046e2
AV
891
892 return snprintf(buf, PAGE_SIZE, "%s\n",
7b867cf7 893 ha->isp_ops->fw_version_str(vha, fw_str));
afb046e2
AV
894}
895
896static ssize_t
ee959b00
TJ
897qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
898 char *buf)
afb046e2 899{
7b867cf7
AC
900 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
901 struct qla_hw_data *ha = vha->hw;
afb046e2
AV
902 uint32_t sn;
903
1ee27146 904 if (IS_FWI2_CAPABLE(ha)) {
7b867cf7 905 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
1ee27146
JC
906 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
907 }
8b7afc2a 908
afb046e2
AV
909 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
910 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
911 sn % 100000);
912}
913
914static ssize_t
ee959b00
TJ
915qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
916 char *buf)
afb046e2 917{
7b867cf7
AC
918 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
919 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
afb046e2
AV
920}
921
922static ssize_t
ee959b00
TJ
923qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
924 char *buf)
afb046e2 925{
7b867cf7
AC
926 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
927 struct qla_hw_data *ha = vha->hw;
afb046e2
AV
928 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
929 ha->product_id[0], ha->product_id[1], ha->product_id[2],
930 ha->product_id[3]);
931}
932
933static ssize_t
ee959b00
TJ
934qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
935 char *buf)
afb046e2 936{
7b867cf7
AC
937 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
938 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
afb046e2
AV
939}
940
941static ssize_t
ee959b00
TJ
942qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
943 char *buf)
afb046e2 944{
7b867cf7 945 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
afb046e2 946 return snprintf(buf, PAGE_SIZE, "%s\n",
7b867cf7 947 vha->hw->model_desc ? vha->hw->model_desc : "");
afb046e2
AV
948}
949
950static ssize_t
ee959b00
TJ
951qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
952 char *buf)
afb046e2 953{
7b867cf7 954 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
afb046e2
AV
955 char pci_info[30];
956
957 return snprintf(buf, PAGE_SIZE, "%s\n",
7b867cf7 958 vha->hw->isp_ops->pci_info_str(vha, pci_info));
afb046e2
AV
959}
960
961static ssize_t
bbd1ae41
HR
962qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
963 char *buf)
afb046e2 964{
7b867cf7
AC
965 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
966 struct qla_hw_data *ha = vha->hw;
afb046e2
AV
967 int len = 0;
968
7b867cf7
AC
969 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
970 atomic_read(&vha->loop_state) == LOOP_DEAD)
afb046e2 971 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
7b867cf7
AC
972 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
973 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
974 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
afb046e2
AV
975 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
976 else {
977 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
978
979 switch (ha->current_topology) {
980 case ISP_CFG_NL:
981 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
982 break;
983 case ISP_CFG_FL:
984 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
985 break;
986 case ISP_CFG_N:
987 len += snprintf(buf + len, PAGE_SIZE-len,
988 "N_Port to N_Port\n");
989 break;
990 case ISP_CFG_F:
991 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
992 break;
993 default:
994 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
995 break;
996 }
997 }
998 return len;
999}
1000
4fdfefe5 1001static ssize_t
ee959b00
TJ
1002qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1003 char *buf)
4fdfefe5 1004{
7b867cf7 1005 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
4fdfefe5
AV
1006 int len = 0;
1007
7b867cf7 1008 switch (vha->hw->zio_mode) {
4fdfefe5
AV
1009 case QLA_ZIO_MODE_6:
1010 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1011 break;
1012 case QLA_ZIO_DISABLED:
1013 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1014 break;
1015 }
1016 return len;
1017}
1018
1019static ssize_t
ee959b00
TJ
1020qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1021 const char *buf, size_t count)
4fdfefe5 1022{
7b867cf7
AC
1023 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1024 struct qla_hw_data *ha = vha->hw;
4fdfefe5
AV
1025 int val = 0;
1026 uint16_t zio_mode;
1027
4a59f71d
AV
1028 if (!IS_ZIO_SUPPORTED(ha))
1029 return -ENOTSUPP;
1030
4fdfefe5
AV
1031 if (sscanf(buf, "%d", &val) != 1)
1032 return -EINVAL;
1033
4a59f71d 1034 if (val)
4fdfefe5 1035 zio_mode = QLA_ZIO_MODE_6;
4a59f71d 1036 else
4fdfefe5 1037 zio_mode = QLA_ZIO_DISABLED;
4fdfefe5
AV
1038
1039 /* Update per-hba values and queue a reset. */
1040 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1041 ha->zio_mode = zio_mode;
7b867cf7 1042 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4fdfefe5
AV
1043 }
1044 return strlen(buf);
1045}
1046
1047static ssize_t
ee959b00
TJ
1048qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1049 char *buf)
4fdfefe5 1050{
7b867cf7 1051 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
4fdfefe5 1052
7b867cf7 1053 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
4fdfefe5
AV
1054}
1055
1056static ssize_t
ee959b00
TJ
1057qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1058 const char *buf, size_t count)
4fdfefe5 1059{
7b867cf7 1060 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
4fdfefe5
AV
1061 int val = 0;
1062 uint16_t zio_timer;
1063
1064 if (sscanf(buf, "%d", &val) != 1)
1065 return -EINVAL;
1066 if (val > 25500 || val < 100)
1067 return -ERANGE;
1068
1069 zio_timer = (uint16_t)(val / 100);
7b867cf7 1070 vha->hw->zio_timer = zio_timer;
4fdfefe5
AV
1071
1072 return strlen(buf);
1073}
1074
f6df144c 1075static ssize_t
ee959b00
TJ
1076qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1077 char *buf)
f6df144c 1078{
7b867cf7 1079 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
f6df144c
AV
1080 int len = 0;
1081
7b867cf7 1082 if (vha->hw->beacon_blink_led)
f6df144c
AV
1083 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1084 else
1085 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1086 return len;
1087}
1088
1089static ssize_t
ee959b00
TJ
1090qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1091 const char *buf, size_t count)
f6df144c 1092{
7b867cf7
AC
1093 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1094 struct qla_hw_data *ha = vha->hw;
f6df144c
AV
1095 int val = 0;
1096 int rval;
1097
1098 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1099 return -EPERM;
1100
7b867cf7 1101 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
f6df144c
AV
1102 qla_printk(KERN_WARNING, ha,
1103 "Abort ISP active -- ignoring beacon request.\n");
1104 return -EBUSY;
1105 }
1106
1107 if (sscanf(buf, "%d", &val) != 1)
1108 return -EINVAL;
1109
1110 if (val)
7b867cf7 1111 rval = ha->isp_ops->beacon_on(vha);
f6df144c 1112 else
7b867cf7 1113 rval = ha->isp_ops->beacon_off(vha);
f6df144c
AV
1114
1115 if (rval != QLA_SUCCESS)
1116 count = 0;
1117
1118 return count;
1119}
1120
30c47662 1121static ssize_t
ee959b00
TJ
1122qla2x00_optrom_bios_version_show(struct device *dev,
1123 struct device_attribute *attr, char *buf)
30c47662 1124{
7b867cf7
AC
1125 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1126 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1127 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1128 ha->bios_revision[0]);
1129}
1130
1131static ssize_t
ee959b00
TJ
1132qla2x00_optrom_efi_version_show(struct device *dev,
1133 struct device_attribute *attr, char *buf)
30c47662 1134{
7b867cf7
AC
1135 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1136 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1137 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1138 ha->efi_revision[0]);
1139}
1140
1141static ssize_t
ee959b00
TJ
1142qla2x00_optrom_fcode_version_show(struct device *dev,
1143 struct device_attribute *attr, char *buf)
30c47662 1144{
7b867cf7
AC
1145 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1146 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1147 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1148 ha->fcode_revision[0]);
1149}
1150
1151static ssize_t
ee959b00
TJ
1152qla2x00_optrom_fw_version_show(struct device *dev,
1153 struct device_attribute *attr, char *buf)
30c47662 1154{
7b867cf7
AC
1155 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1156 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1157 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1158 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1159 ha->fw_revision[3]);
1160}
1161
e5f5f6f7
HZ
1162static ssize_t
1163qla2x00_total_isp_aborts_show(struct device *dev,
1164 struct device_attribute *attr, char *buf)
1165{
7b867cf7
AC
1166 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1167 struct qla_hw_data *ha = vha->hw;
e5f5f6f7
HZ
1168 return snprintf(buf, PAGE_SIZE, "%d\n",
1169 ha->qla_stats.total_isp_aborts);
1170}
1171
9a069e19
GM
1172static ssize_t
1173qla24xx_84xx_fw_version_show(struct device *dev,
1174 struct device_attribute *attr, char *buf)
1175{
1176 int rval = QLA_SUCCESS;
1177 uint16_t status[2] = {0, 0};
1178 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1179 struct qla_hw_data *ha = vha->hw;
1180
1181 if (IS_QLA84XX(ha) && ha->cs84xx) {
1182 if (ha->cs84xx->op_fw_version == 0) {
1183 rval = qla84xx_verify_chip(vha, status);
1184 }
1185
1186 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1187 return snprintf(buf, PAGE_SIZE, "%u\n",
1188 (uint32_t)ha->cs84xx->op_fw_version);
1189 }
1190
1191 return snprintf(buf, PAGE_SIZE, "\n");
1192}
1193
3a03eb79
AV
1194static ssize_t
1195qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1196 char *buf)
1197{
1198 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1199 struct qla_hw_data *ha = vha->hw;
1200
1201 if (!IS_QLA81XX(ha))
1202 return snprintf(buf, PAGE_SIZE, "\n");
1203
55a96158 1204 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
3a03eb79 1205 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
55a96158
AV
1206 ha->mpi_capabilities);
1207}
1208
1209static ssize_t
1210qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1211 char *buf)
1212{
1213 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1214 struct qla_hw_data *ha = vha->hw;
1215
1216 if (!IS_QLA81XX(ha))
1217 return snprintf(buf, PAGE_SIZE, "\n");
1218
1219 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1220 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
3a03eb79
AV
1221}
1222
fbcbb5d0
LC
1223static ssize_t
1224qla2x00_flash_block_size_show(struct device *dev,
1225 struct device_attribute *attr, char *buf)
1226{
1227 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1228 struct qla_hw_data *ha = vha->hw;
1229
1230 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1231}
1232
bad7001c
AV
1233static ssize_t
1234qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1235 char *buf)
1236{
1237 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1238
1239 if (!IS_QLA81XX(vha->hw))
1240 return snprintf(buf, PAGE_SIZE, "\n");
1241
1242 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1243}
1244
1245static ssize_t
1246qla2x00_vn_port_mac_address_show(struct device *dev,
1247 struct device_attribute *attr, char *buf)
1248{
1249 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1250
1251 if (!IS_QLA81XX(vha->hw))
1252 return snprintf(buf, PAGE_SIZE, "\n");
1253
1254 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1255 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1256 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1257 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1258}
1259
7f774025
AV
1260static ssize_t
1261qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1262 char *buf)
1263{
1264 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1265
1266 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1267}
1268
656e8912
AV
1269static ssize_t
1270qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1271 char *buf)
1272{
1273 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
85880801 1274 int rval = QLA_FUNCTION_FAILED;
656e8912
AV
1275 uint16_t state[5];
1276
85880801
AV
1277 if (!vha->hw->flags.eeh_busy)
1278 rval = qla2x00_get_firmware_state(vha, state);
656e8912
AV
1279 if (rval != QLA_SUCCESS)
1280 memset(state, -1, sizeof(state));
1281
1282 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1283 state[1], state[2], state[3], state[4]);
1284}
1285
ee959b00
TJ
1286static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1287static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1288static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1289static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1290static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1291static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1292static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1293static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
bbd1ae41 1294static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
ee959b00
TJ
1295static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1296static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1297 qla2x00_zio_timer_store);
1298static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1299 qla2x00_beacon_store);
1300static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1301 qla2x00_optrom_bios_version_show, NULL);
1302static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1303 qla2x00_optrom_efi_version_show, NULL);
1304static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1305 qla2x00_optrom_fcode_version_show, NULL);
1306static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1307 NULL);
9a069e19
GM
1308static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1309 NULL);
e5f5f6f7
HZ
1310static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1311 NULL);
3a03eb79 1312static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
55a96158 1313static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
fbcbb5d0
LC
1314static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1315 NULL);
bad7001c
AV
1316static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1317static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1318 qla2x00_vn_port_mac_address_show, NULL);
7f774025 1319static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
656e8912 1320static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
ee959b00
TJ
1321
1322struct device_attribute *qla2x00_host_attrs[] = {
1323 &dev_attr_driver_version,
1324 &dev_attr_fw_version,
1325 &dev_attr_serial_num,
1326 &dev_attr_isp_name,
1327 &dev_attr_isp_id,
1328 &dev_attr_model_name,
1329 &dev_attr_model_desc,
1330 &dev_attr_pci_info,
bbd1ae41 1331 &dev_attr_link_state,
ee959b00
TJ
1332 &dev_attr_zio,
1333 &dev_attr_zio_timer,
1334 &dev_attr_beacon,
1335 &dev_attr_optrom_bios_version,
1336 &dev_attr_optrom_efi_version,
1337 &dev_attr_optrom_fcode_version,
1338 &dev_attr_optrom_fw_version,
9a069e19 1339 &dev_attr_84xx_fw_version,
e5f5f6f7 1340 &dev_attr_total_isp_aborts,
3a03eb79 1341 &dev_attr_mpi_version,
55a96158 1342 &dev_attr_phy_version,
fbcbb5d0 1343 &dev_attr_flash_block_size,
bad7001c
AV
1344 &dev_attr_vlan_id,
1345 &dev_attr_vn_port_mac_address,
7f774025 1346 &dev_attr_fabric_param,
656e8912 1347 &dev_attr_fw_state,
afb046e2
AV
1348 NULL,
1349};
1350
8482e118
AV
1351/* Host attributes. */
1352
1353static void
1354qla2x00_get_host_port_id(struct Scsi_Host *shost)
1355{
7b867cf7 1356 scsi_qla_host_t *vha = shost_priv(shost);
8482e118 1357
7b867cf7
AC
1358 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1359 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
8482e118
AV
1360}
1361
04414013
AV
1362static void
1363qla2x00_get_host_speed(struct Scsi_Host *shost)
1364{
7b867cf7
AC
1365 struct qla_hw_data *ha = ((struct scsi_qla_host *)
1366 (shost_priv(shost)))->hw;
2ae2b370 1367 u32 speed = FC_PORTSPEED_UNKNOWN;
04414013
AV
1368
1369 switch (ha->link_data_rate) {
d8b45213 1370 case PORT_SPEED_1GB:
2ae2b370 1371 speed = FC_PORTSPEED_1GBIT;
04414013 1372 break;
d8b45213 1373 case PORT_SPEED_2GB:
2ae2b370 1374 speed = FC_PORTSPEED_2GBIT;
04414013 1375 break;
d8b45213 1376 case PORT_SPEED_4GB:
2ae2b370 1377 speed = FC_PORTSPEED_4GBIT;
04414013 1378 break;
da4541b6 1379 case PORT_SPEED_8GB:
2ae2b370 1380 speed = FC_PORTSPEED_8GBIT;
da4541b6 1381 break;
3a03eb79
AV
1382 case PORT_SPEED_10GB:
1383 speed = FC_PORTSPEED_10GBIT;
1384 break;
04414013
AV
1385 }
1386 fc_host_speed(shost) = speed;
1387}
1388
8d067623
AV
1389static void
1390qla2x00_get_host_port_type(struct Scsi_Host *shost)
1391{
7b867cf7 1392 scsi_qla_host_t *vha = shost_priv(shost);
8d067623
AV
1393 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1394
7b867cf7 1395 if (vha->vp_idx) {
2f2fa13d
SS
1396 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1397 return;
1398 }
7b867cf7 1399 switch (vha->hw->current_topology) {
8d067623
AV
1400 case ISP_CFG_NL:
1401 port_type = FC_PORTTYPE_LPORT;
1402 break;
1403 case ISP_CFG_FL:
1404 port_type = FC_PORTTYPE_NLPORT;
1405 break;
1406 case ISP_CFG_N:
1407 port_type = FC_PORTTYPE_PTP;
1408 break;
1409 case ISP_CFG_F:
1410 port_type = FC_PORTTYPE_NPORT;
1411 break;
1412 }
1413 fc_host_port_type(shost) = port_type;
1414}
1415
8482e118
AV
1416static void
1417qla2x00_get_starget_node_name(struct scsi_target *starget)
1418{
1419 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
7b867cf7 1420 scsi_qla_host_t *vha = shost_priv(host);
bdf79621 1421 fc_port_t *fcport;
f8b02a85 1422 u64 node_name = 0;
8482e118 1423
7b867cf7 1424 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5ab5a4dd
AV
1425 if (fcport->rport &&
1426 starget->id == fcport->rport->scsi_target_id) {
f8b02a85 1427 node_name = wwn_to_u64(fcport->node_name);
bdf79621
AV
1428 break;
1429 }
1430 }
1431
f8b02a85 1432 fc_starget_node_name(starget) = node_name;
8482e118
AV
1433}
1434
1435static void
1436qla2x00_get_starget_port_name(struct scsi_target *starget)
1437{
1438 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
7b867cf7 1439 scsi_qla_host_t *vha = shost_priv(host);
bdf79621 1440 fc_port_t *fcport;
f8b02a85 1441 u64 port_name = 0;
8482e118 1442
7b867cf7 1443 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5ab5a4dd
AV
1444 if (fcport->rport &&
1445 starget->id == fcport->rport->scsi_target_id) {
f8b02a85 1446 port_name = wwn_to_u64(fcport->port_name);
bdf79621
AV
1447 break;
1448 }
1449 }
1450
f8b02a85 1451 fc_starget_port_name(starget) = port_name;
8482e118
AV
1452}
1453
1454static void
1455qla2x00_get_starget_port_id(struct scsi_target *starget)
1456{
1457 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
7b867cf7 1458 scsi_qla_host_t *vha = shost_priv(host);
bdf79621
AV
1459 fc_port_t *fcport;
1460 uint32_t port_id = ~0U;
1461
7b867cf7 1462 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5ab5a4dd
AV
1463 if (fcport->rport &&
1464 starget->id == fcport->rport->scsi_target_id) {
bdf79621
AV
1465 port_id = fcport->d_id.b.domain << 16 |
1466 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1467 break;
1468 }
1469 }
8482e118 1470
8482e118
AV
1471 fc_starget_port_id(starget) = port_id;
1472}
1473
8482e118
AV
1474static void
1475qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1476{
8482e118 1477 if (timeout)
85821c90 1478 rport->dev_loss_tmo = timeout;
8482e118 1479 else
85821c90 1480 rport->dev_loss_tmo = 1;
8482e118
AV
1481}
1482
5f3a9a20
SJ
1483static void
1484qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1485{
1486 struct Scsi_Host *host = rport_to_shost(rport);
1487 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1488
3c01b4f9
SJ
1489 if (!fcport)
1490 return;
1491
85880801
AV
1492 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1493 return;
1494
1495 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
b9b12f73 1496 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
85880801
AV
1497 return;
1498 }
5f3a9a20
SJ
1499
1500 /*
1501 * Transport has effectively 'deleted' the rport, clear
1502 * all local references.
1503 */
1504 spin_lock_irq(host->host_lock);
1505 fcport->rport = NULL;
1506 *((fc_port_t **)rport->dd_data) = NULL;
1507 spin_unlock_irq(host->host_lock);
1508}
1509
1510static void
1511qla2x00_terminate_rport_io(struct fc_rport *rport)
1512{
1513 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1514
3c01b4f9
SJ
1515 if (!fcport)
1516 return;
1517
85880801
AV
1518 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1519 return;
1520
b9b12f73
SJ
1521 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1522 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1523 return;
1524 }
6390d1f3
AV
1525 /*
1526 * At this point all fcport's software-states are cleared. Perform any
1527 * final cleanup of firmware resources (PCBs and XCBs).
1528 */
6805c150
AV
1529 if (fcport->loop_id != FC_NO_LOOP_ID &&
1530 !test_bit(UNLOADING, &fcport->vha->dpc_flags))
7b867cf7
AC
1531 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1532 fcport->loop_id, fcport->d_id.b.domain,
1533 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5f3a9a20
SJ
1534}
1535
91ca7b01
AV
1536static int
1537qla2x00_issue_lip(struct Scsi_Host *shost)
1538{
7b867cf7 1539 scsi_qla_host_t *vha = shost_priv(shost);
91ca7b01 1540
7b867cf7 1541 qla2x00_loop_reset(vha);
91ca7b01
AV
1542 return 0;
1543}
1544
392e2f65
AV
1545static struct fc_host_statistics *
1546qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1547{
7b867cf7
AC
1548 scsi_qla_host_t *vha = shost_priv(shost);
1549 struct qla_hw_data *ha = vha->hw;
1550 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
392e2f65 1551 int rval;
43ef0580
AV
1552 struct link_statistics *stats;
1553 dma_addr_t stats_dma;
392e2f65
AV
1554 struct fc_host_statistics *pfc_host_stat;
1555
1556 pfc_host_stat = &ha->fc_host_stat;
1557 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1558
85880801
AV
1559 if (test_bit(UNLOADING, &vha->dpc_flags))
1560 goto done;
1561
1562 if (unlikely(pci_channel_offline(ha->pdev)))
1563 goto done;
1564
43ef0580
AV
1565 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1566 if (stats == NULL) {
1567 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
7b867cf7 1568 __func__, base_vha->host_no));
43ef0580
AV
1569 goto done;
1570 }
1571 memset(stats, 0, DMA_POOL_SIZE);
1572
1573 rval = QLA_FUNCTION_FAILED;
e428924c 1574 if (IS_FWI2_CAPABLE(ha)) {
7b867cf7
AC
1575 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1576 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1577 !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1578 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
178779a6
AV
1579 !ha->dpc_active) {
1580 /* Must be in a 'READY' state for statistics retrieval. */
7b867cf7
AC
1581 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1582 stats, stats_dma);
392e2f65 1583 }
178779a6
AV
1584
1585 if (rval != QLA_SUCCESS)
43ef0580
AV
1586 goto done_free;
1587
1588 pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1589 pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1590 pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1591 pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1592 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1593 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1594 if (IS_FWI2_CAPABLE(ha)) {
032d8dd7 1595 pfc_host_stat->lip_count = stats->lip_cnt;
43ef0580
AV
1596 pfc_host_stat->tx_frames = stats->tx_frames;
1597 pfc_host_stat->rx_frames = stats->rx_frames;
1598 pfc_host_stat->dumped_frames = stats->dumped_frames;
1599 pfc_host_stat->nos_count = stats->nos_rcvd;
1600 }
49fd462a
HZ
1601 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1602 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
392e2f65 1603
43ef0580
AV
1604done_free:
1605 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
178779a6 1606done:
392e2f65
AV
1607 return pfc_host_stat;
1608}
1609
1620f7c2
AV
1610static void
1611qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1612{
7b867cf7 1613 scsi_qla_host_t *vha = shost_priv(shost);
1620f7c2 1614
7b867cf7 1615 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1620f7c2
AV
1616}
1617
a740a3f0
AV
1618static void
1619qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1620{
7b867cf7 1621 scsi_qla_host_t *vha = shost_priv(shost);
a740a3f0 1622
7b867cf7 1623 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
a740a3f0
AV
1624}
1625
90991c85
AV
1626static void
1627qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1628{
7b867cf7 1629 scsi_qla_host_t *vha = shost_priv(shost);
90991c85
AV
1630 u64 node_name;
1631
7b867cf7
AC
1632 if (vha->device_flags & SWITCH_FOUND)
1633 node_name = wwn_to_u64(vha->fabric_node_name);
90991c85 1634 else
7b867cf7 1635 node_name = wwn_to_u64(vha->node_name);
90991c85
AV
1636
1637 fc_host_fabric_name(shost) = node_name;
1638}
1639
7047fcdd
AV
1640static void
1641qla2x00_get_host_port_state(struct Scsi_Host *shost)
1642{
7b867cf7
AC
1643 scsi_qla_host_t *vha = shost_priv(shost);
1644 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
7047fcdd 1645
7b867cf7 1646 if (!base_vha->flags.online)
7047fcdd 1647 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
7b867cf7 1648 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
7047fcdd
AV
1649 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1650 else
1651 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1652}
1653
2c3dfe3f
SJ
1654static int
1655qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1656{
1657 int ret = 0;
2afa19a9 1658 uint8_t qos = 0;
7b867cf7
AC
1659 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1660 scsi_qla_host_t *vha = NULL;
73208dfd 1661 struct qla_hw_data *ha = base_vha->hw;
2afa19a9
AC
1662 uint16_t options = 0;
1663 int cnt;
59e0b8b0 1664 struct req_que *req = ha->req_q_map[0];
2c3dfe3f
SJ
1665
1666 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1667 if (ret) {
1668 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
1669 "status %x\n", ret));
1670 return (ret);
1671 }
1672
1673 vha = qla24xx_create_vhost(fc_vport);
1674 if (vha == NULL) {
1675 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
1676 vha));
1677 return FC_VPORT_FAILED;
1678 }
1679 if (disable) {
1680 atomic_set(&vha->vp_state, VP_OFFLINE);
1681 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1682 } else
1683 atomic_set(&vha->vp_state, VP_FAILED);
1684
1685 /* ready to create vport */
7b867cf7
AC
1686 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1687 vha->vp_idx);
2c3dfe3f
SJ
1688
1689 /* initialized vport states */
1690 atomic_set(&vha->loop_state, LOOP_DOWN);
1691 vha->vp_err_state= VP_ERR_PORTDWN;
1692 vha->vp_prev_err_state= VP_ERR_UNKWN;
1693 /* Check if physical ha port is Up */
7b867cf7
AC
1694 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1695 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2c3dfe3f
SJ
1696 /* Don't retry or attempt login of this virtual port */
1697 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
7b867cf7 1698 base_vha->host_no));
2c3dfe3f
SJ
1699 atomic_set(&vha->loop_state, LOOP_DEAD);
1700 if (!disable)
1701 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1702 }
1703
d139b9bd
JB
1704 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1705 &ha->pdev->dev)) {
2c3dfe3f
SJ
1706 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
1707 vha->host_no, vha->vp_idx));
1708 goto vport_create_failed_2;
1709 }
1710
1711 /* initialize attributes */
1712 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1713 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1714 fc_host_supported_classes(vha->host) =
7b867cf7 1715 fc_host_supported_classes(base_vha->host);
2c3dfe3f 1716 fc_host_supported_speeds(vha->host) =
7b867cf7 1717 fc_host_supported_speeds(base_vha->host);
2c3dfe3f
SJ
1718
1719 qla24xx_vport_disable(fc_vport, disable);
1720
7163ea81 1721 if (ha->flags.cpu_affinity_enabled) {
59e0b8b0
AC
1722 req = ha->req_q_map[1];
1723 goto vport_queue;
1724 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
2afa19a9
AC
1725 goto vport_queue;
1726 /* Create a request queue in QoS mode for the vport */
40859ae5
AC
1727 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1728 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1729 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
59e0b8b0 1730 8) == 0) {
2afa19a9
AC
1731 qos = ha->npiv_info[cnt].q_qos;
1732 break;
73208dfd 1733 }
2afa19a9
AC
1734 }
1735 if (qos) {
1736 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1737 qos);
1738 if (!ret)
1739 qla_printk(KERN_WARNING, ha,
1740 "Can't create request queue for vp_idx:%d\n",
1741 vha->vp_idx);
59e0b8b0 1742 else {
2afa19a9 1743 DEBUG2(qla_printk(KERN_INFO, ha,
40859ae5
AC
1744 "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1745 ret, qos, vha->vp_idx));
59e0b8b0
AC
1746 req = ha->req_q_map[ret];
1747 }
73208dfd
AC
1748 }
1749
2afa19a9 1750vport_queue:
59e0b8b0 1751 vha->req = req;
2c3dfe3f 1752 return 0;
2afa19a9 1753
2c3dfe3f
SJ
1754vport_create_failed_2:
1755 qla24xx_disable_vp(vha);
1756 qla24xx_deallocate_vp_id(vha);
2c3dfe3f
SJ
1757 scsi_host_put(vha->host);
1758 return FC_VPORT_FAILED;
1759}
1760
a824ebb3 1761static int
2c3dfe3f
SJ
1762qla24xx_vport_delete(struct fc_vport *fc_vport)
1763{
2c3dfe3f 1764 scsi_qla_host_t *vha = fc_vport->dd_data;
7b867cf7 1765 fc_port_t *fcport, *tfcport;
73208dfd
AC
1766 struct qla_hw_data *ha = vha->hw;
1767 uint16_t id = vha->vp_idx;
c9c5ced9
AV
1768
1769 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
7b867cf7 1770 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
c9c5ced9 1771 msleep(1000);
2c3dfe3f
SJ
1772
1773 qla24xx_disable_vp(vha);
2c3dfe3f 1774
7b867cf7
AC
1775 fc_remove_host(vha->host);
1776
1777 scsi_remove_host(vha->host);
1778
1779 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1780 list_del(&fcport->list);
1781 kfree(fcport);
1782 fcport = NULL;
1783 }
1784
1785 qla24xx_deallocate_vp_id(vha);
2c3dfe3f 1786
0d6e61bc
AV
1787 mutex_lock(&ha->vport_lock);
1788 ha->cur_vport_count--;
1789 clear_bit(vha->vp_idx, ha->vp_idx_map);
1790 mutex_unlock(&ha->vport_lock);
1791
2c3dfe3f
SJ
1792 if (vha->timer_active) {
1793 qla2x00_vp_stop_timer(vha);
1794 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1795 "has stopped\n",
1796 vha->host_no, vha->vp_idx, vha));
1797 }
1798
7163ea81 1799 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
2afa19a9 1800 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
cf5a1631
AC
1801 qla_printk(KERN_WARNING, ha,
1802 "Queue delete failed.\n");
1803 }
1804
2c3dfe3f 1805 scsi_host_put(vha->host);
73208dfd 1806 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
2c3dfe3f
SJ
1807 return 0;
1808}
1809
a824ebb3 1810static int
2c3dfe3f
SJ
1811qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1812{
1813 scsi_qla_host_t *vha = fc_vport->dd_data;
1814
1815 if (disable)
1816 qla24xx_disable_vp(vha);
1817 else
1818 qla24xx_enable_vp(vha);
1819
1820 return 0;
1821}
1822
9a069e19
GM
1823/* BSG support for ELS/CT pass through */
1824inline srb_t *
1825qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1826{
1827 srb_t *sp;
1828 struct qla_hw_data *ha = vha->hw;
1829 struct srb_bsg_ctx *ctx;
1830
1831 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1832 if (!sp)
1833 goto done;
1834 ctx = kzalloc(size, GFP_KERNEL);
1835 if (!ctx) {
1836 mempool_free(sp, ha->srb_mempool);
1837 goto done;
1838 }
1839
1840 memset(sp, 0, sizeof(*sp));
1841 sp->fcport = fcport;
1842 sp->ctx = ctx;
1843done:
1844 return sp;
1845}
1846
1847static int
1848qla2x00_process_els(struct fc_bsg_job *bsg_job)
1849{
1850 struct fc_rport *rport;
1851 fc_port_t *fcport;
1852 struct Scsi_Host *host;
1853 scsi_qla_host_t *vha;
1854 struct qla_hw_data *ha;
1855 srb_t *sp;
1856 const char *type;
1857 int req_sg_cnt, rsp_sg_cnt;
1858 int rval = (DRIVER_ERROR << 16);
1859 uint16_t nextlid = 0;
1860 struct srb_bsg *els;
1861
1862 /* Multiple SG's are not supported for ELS requests */
1863 if (bsg_job->request_payload.sg_cnt > 1 ||
1864 bsg_job->reply_payload.sg_cnt > 1) {
1865 DEBUG2(printk(KERN_INFO
1866 "multiple SG's are not supported for ELS requests"
1867 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1868 bsg_job->request_payload.sg_cnt,
1869 bsg_job->reply_payload.sg_cnt));
1870 rval = -EPERM;
1871 goto done;
1872 }
1873
1874 /* ELS request for rport */
1875 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1876 rport = bsg_job->rport;
1877 fcport = *(fc_port_t **) rport->dd_data;
1878 host = rport_to_shost(rport);
1879 vha = shost_priv(host);
1880 ha = vha->hw;
1881 type = "FC_BSG_RPT_ELS";
1882
9a069e19
GM
1883 /* make sure the rport is logged in,
1884 * if not perform fabric login
1885 */
1886 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1887 DEBUG2(qla_printk(KERN_WARNING, ha,
1888 "failed to login port %06X for ELS passthru\n",
1889 fcport->d_id.b24));
1890 rval = -EIO;
1891 goto done;
1892 }
1893 } else {
1894 host = bsg_job->shost;
1895 vha = shost_priv(host);
1896 ha = vha->hw;
1897 type = "FC_BSG_HST_ELS_NOLOGIN";
1898
9a069e19
GM
1899 /* Allocate a dummy fcport structure, since functions
1900 * preparing the IOCB and mailbox command retrieves port
1901 * specific information from fcport structure. For Host based
1902 * ELS commands there will be no fcport structure allocated
1903 */
1904 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1905 if (!fcport) {
1906 rval = -ENOMEM;
1907 goto done;
1908 }
1909
1910 /* Initialize all required fields of fcport */
1911 fcport->vha = vha;
1912 fcport->vp_idx = vha->vp_idx;
1913 fcport->d_id.b.al_pa =
1914 bsg_job->request->rqst_data.h_els.port_id[0];
1915 fcport->d_id.b.area =
1916 bsg_job->request->rqst_data.h_els.port_id[1];
1917 fcport->d_id.b.domain =
1918 bsg_job->request->rqst_data.h_els.port_id[2];
1919 fcport->loop_id =
1920 (fcport->d_id.b.al_pa == 0xFD) ?
1921 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1922 }
1923
db3ad7f8
GM
1924 if (!vha->flags.online) {
1925 DEBUG2(qla_printk(KERN_WARNING, ha,
1926 "host not online\n"));
1927 rval = -EIO;
1928 goto done;
1929 }
9a069e19
GM
1930
1931 req_sg_cnt =
1932 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1933 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1934 if (!req_sg_cnt) {
1935 rval = -ENOMEM;
1936 goto done_free_fcport;
1937 }
1938 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1939 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1940 if (!rsp_sg_cnt) {
1941 rval = -ENOMEM;
1942 goto done_free_fcport;
1943 }
1944
1945 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1946 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1947 {
1948 DEBUG2(printk(KERN_INFO
1949 "dma mapping resulted in different sg counts \
1950 [request_sg_cnt: %x dma_request_sg_cnt: %x\
1951 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1952 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1953 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1954 rval = -EAGAIN;
1955 goto done_unmap_sg;
1956 }
1957
1958 /* Alloc SRB structure */
1959 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1960 if (!sp) {
1961 rval = -ENOMEM;
1962 goto done_unmap_sg;
1963 }
1964
1965 els = sp->ctx;
1966 els->ctx.type =
1967 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1968 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1969 els->bsg_job = bsg_job;
1970
1971 DEBUG2(qla_printk(KERN_INFO, ha,
1972 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1973 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1974 bsg_job->request->rqst_data.h_els.command_code,
1975 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1976 fcport->d_id.b.al_pa));
1977
1978 rval = qla2x00_start_sp(sp);
1979 if (rval != QLA_SUCCESS) {
1980 kfree(sp->ctx);
1981 mempool_free(sp, ha->srb_mempool);
1982 rval = -EIO;
1983 goto done_unmap_sg;
1984 }
1985 return rval;
1986
1987done_unmap_sg:
1988 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1989 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1990 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1991 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1992 goto done_free_fcport;
1993
1994done_free_fcport:
1995 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
1996 kfree(fcport);
1997done:
1998 return rval;
1999}
2000
2001static int
2002qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2003{
2004 srb_t *sp;
2005 struct Scsi_Host *host = bsg_job->shost;
2006 scsi_qla_host_t *vha = shost_priv(host);
2007 struct qla_hw_data *ha = vha->hw;
2008 int rval = (DRIVER_ERROR << 16);
2009 int req_sg_cnt, rsp_sg_cnt;
2010 uint16_t loop_id;
2011 struct fc_port *fcport;
2012 char *type = "FC_BSG_HST_CT";
2013 struct srb_bsg *ct;
2014
2015 /* pass through is supported only for ISP 4Gb or higher */
2016 if (!IS_FWI2_CAPABLE(ha)) {
2017 DEBUG2(qla_printk(KERN_INFO, ha,
2018 "scsi(%ld):Firmware is not capable to support FC "
2019 "CT pass thru\n", vha->host_no));
2020 rval = -EPERM;
2021 goto done;
2022 }
2023
2024 req_sg_cnt =
2025 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2026 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2027 if (!req_sg_cnt) {
2028 rval = -ENOMEM;
2029 goto done;
2030 }
2031
2032 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2033 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2034 if (!rsp_sg_cnt) {
2035 rval = -ENOMEM;
2036 goto done;
2037 }
2038
2039 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2040 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2041 {
2042 DEBUG2(qla_printk(KERN_WARNING, ha,
2043 "dma mapping resulted in different sg counts \
2044 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2045 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2046 bsg_job->request_payload.sg_cnt, req_sg_cnt,
2047 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2048 rval = -EAGAIN;
2049 goto done_unmap_sg;
2050 }
2051
db3ad7f8
GM
2052 if (!vha->flags.online) {
2053 DEBUG2(qla_printk(KERN_WARNING, ha,
2054 "host not online\n"));
2055 rval = -EIO;
2056 goto done_unmap_sg;
2057 }
2058
9a069e19
GM
2059 loop_id =
2060 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2061 >> 24;
2062 switch (loop_id) {
2063 case 0xFC:
2064 loop_id = cpu_to_le16(NPH_SNS);
2065 break;
2066 case 0xFA:
2067 loop_id = vha->mgmt_svr_loop_id;
2068 break;
2069 default:
2070 DEBUG2(qla_printk(KERN_INFO, ha,
2071 "Unknown loop id: %x\n", loop_id));
2072 rval = -EINVAL;
2073 goto done_unmap_sg;
2074 }
2075
2076 /* Allocate a dummy fcport structure, since functions preparing the
2077 * IOCB and mailbox command retrieves port specific information
2078 * from fcport structure. For Host based ELS commands there will be
2079 * no fcport structure allocated
2080 */
2081 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2082 if (!fcport)
2083 {
2084 rval = -ENOMEM;
2085 goto done_unmap_sg;
2086 }
2087
2088 /* Initialize all required fields of fcport */
2089 fcport->vha = vha;
2090 fcport->vp_idx = vha->vp_idx;
2091 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2092 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2093 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2094 fcport->loop_id = loop_id;
2095
2096 /* Alloc SRB structure */
2097 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2098 if (!sp) {
2099 rval = -ENOMEM;
2100 goto done_free_fcport;
2101 }
2102
2103 ct = sp->ctx;
2104 ct->ctx.type = SRB_CT_CMD;
2105 ct->bsg_job = bsg_job;
2106
2107 DEBUG2(qla_printk(KERN_INFO, ha,
2108 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2109 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2110 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2111 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2112 fcport->d_id.b.al_pa));
2113
2114 rval = qla2x00_start_sp(sp);
2115 if (rval != QLA_SUCCESS) {
2116 kfree(sp->ctx);
2117 mempool_free(sp, ha->srb_mempool);
2118 rval = -EIO;
2119 goto done_free_fcport;
2120 }
2121 return rval;
2122
2123done_free_fcport:
2124 kfree(fcport);
2125done_unmap_sg:
2126 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2127 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2128 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2129 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2130done:
2131 return rval;
2132}
2133
2134static int
2135qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2136{
2137 struct Scsi_Host *host = bsg_job->shost;
2138 scsi_qla_host_t *vha = shost_priv(host);
2139 struct qla_hw_data *ha = vha->hw;
2140 int rval;
2141 uint8_t command_sent;
2142 uint32_t vendor_cmd;
2143 char *type;
2144 struct msg_echo_lb elreq;
2145 uint16_t response[MAILBOX_REGISTER_COUNT];
2146 uint8_t* fw_sts_ptr;
2147 uint8_t *req_data;
2148 dma_addr_t req_data_dma;
2149 uint32_t req_data_len;
2150 uint8_t *rsp_data;
2151 dma_addr_t rsp_data_dma;
2152 uint32_t rsp_data_len;
2153
2154 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2155 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2156 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2157 rval = -EBUSY;
2158 goto done;
2159 }
2160
db3ad7f8
GM
2161 if (!vha->flags.online) {
2162 DEBUG2(qla_printk(KERN_WARNING, ha,
2163 "host not online\n"));
2164 rval = -EIO;
2165 goto done;
2166 }
2167
9a069e19
GM
2168 elreq.req_sg_cnt =
2169 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2170 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2171 if (!elreq.req_sg_cnt) {
2172 rval = -ENOMEM;
2173 goto done;
2174 }
2175 elreq.rsp_sg_cnt =
2176 dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2177 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2178 if (!elreq.rsp_sg_cnt) {
2179 rval = -ENOMEM;
2180 goto done;
2181 }
2182
2183 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2184 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2185 {
2186 DEBUG2(printk(KERN_INFO
2187 "dma mapping resulted in different sg counts \
2188 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2189 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2190 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2191 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2192 rval = -EAGAIN;
2193 goto done_unmap_sg;
2194 }
2195 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2196 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2197 &req_data_dma, GFP_KERNEL);
2198
2199 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2200 &rsp_data_dma, GFP_KERNEL);
2201
2202 /* Copy the request buffer in req_data now */
2203 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2204 bsg_job->request_payload.sg_cnt, req_data,
2205 req_data_len);
2206
2207 elreq.send_dma = req_data_dma;
2208 elreq.rcv_dma = rsp_data_dma;
2209 elreq.transfer_size = req_data_len;
2210
2211 /* Vendor cmd : loopback or ECHO diagnostic
2212 * Options:
2213 * Loopback : Either internal or external loopback
2214 * ECHO: ECHO ELS or Vendor specific FC4 link data
2215 */
2216 vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2217 elreq.options =
2218 *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2219 + 1);
2220
2221 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2222 case QL_VND_LOOPBACK:
2223 if (ha->current_topology != ISP_CFG_F) {
2224 type = "FC_BSG_HST_VENDOR_LOOPBACK";
2225
9a069e19
GM
2226 DEBUG2(qla_printk(KERN_INFO, ha,
2227 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2228 vha->host_no, type, vendor_cmd, elreq.options));
db3ad7f8 2229
9a069e19
GM
2230 command_sent = INT_DEF_LB_LOOPBACK_CMD;
2231 rval = qla2x00_loopback_test(vha, &elreq, response);
2232 if (IS_QLA81XX(ha)) {
2233 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2234 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2235 "ISP\n", __func__, vha->host_no));
2236 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2237 qla2xxx_wake_dpc(vha);
2238 }
2239 }
2240 } else {
2241 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2242 DEBUG2(qla_printk(KERN_INFO, ha,
2243 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2244 vha->host_no, type, vendor_cmd, elreq.options));
db3ad7f8 2245
9a069e19
GM
2246 command_sent = INT_DEF_LB_ECHO_CMD;
2247 rval = qla2x00_echo_test(vha, &elreq, response);
2248 }
2249 break;
2250 case QLA84_RESET:
2251 if (!IS_QLA84XX(vha->hw)) {
2252 rval = -EINVAL;
2253 DEBUG16(printk(
2254 "%s(%ld): 8xxx exiting.\n",
2255 __func__, vha->host_no));
2256 return rval;
2257 }
2258 rval = qla84xx_reset(vha, &elreq, bsg_job);
2259 break;
2260 case QLA84_MGMT_CMD:
2261 if (!IS_QLA84XX(vha->hw)) {
2262 rval = -EINVAL;
2263 DEBUG16(printk(
2264 "%s(%ld): 8xxx exiting.\n",
2265 __func__, vha->host_no));
2266 return rval;
2267 }
2268 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2269 break;
2270 default:
2271 rval = -ENOSYS;
2272 }
2273
2274 if (rval != QLA_SUCCESS) {
2275 DEBUG2(qla_printk(KERN_WARNING, ha,
2276 "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2277 rval = 0;
2278 bsg_job->reply->result = (DID_ERROR << 16);
236b0249 2279 bsg_job->reply->reply_payload_rcv_len = 0;
9a069e19
GM
2280 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2281 memcpy( fw_sts_ptr, response, sizeof(response));
2282 fw_sts_ptr += sizeof(response);
2283 *fw_sts_ptr = command_sent;
2284 } else {
2285 DEBUG2(qla_printk(KERN_WARNING, ha,
2286 "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2287 rval = bsg_job->reply->result = 0;
2288 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2289 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2290 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2291 memcpy(fw_sts_ptr, response, sizeof(response));
2292 fw_sts_ptr += sizeof(response);
2293 *fw_sts_ptr = command_sent;
2294 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2295 bsg_job->reply_payload.sg_cnt, rsp_data,
2296 rsp_data_len);
2297 }
2298 bsg_job->job_done(bsg_job);
2299
2300done_unmap_sg:
2301
2302 if(req_data)
2303 dma_free_coherent(&ha->pdev->dev, req_data_len,
2304 req_data, req_data_dma);
2305 dma_unmap_sg(&ha->pdev->dev,
2306 bsg_job->request_payload.sg_list,
2307 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2308 dma_unmap_sg(&ha->pdev->dev,
2309 bsg_job->reply_payload.sg_list,
2310 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2311
2312done:
2313 return rval;
2314}
2315
2316static int
2317qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2318{
2319 int ret = -EINVAL;
2320
2321 switch (bsg_job->request->msgcode) {
2322 case FC_BSG_RPT_ELS:
2323 case FC_BSG_HST_ELS_NOLOGIN:
2324 ret = qla2x00_process_els(bsg_job);
2325 break;
2326 case FC_BSG_HST_CT:
2327 ret = qla2x00_process_ct(bsg_job);
2328 break;
2329 case FC_BSG_HST_VENDOR:
2330 ret = qla2x00_process_vendor_specific(bsg_job);
2331 break;
2332 case FC_BSG_HST_ADD_RPORT:
2333 case FC_BSG_HST_DEL_RPORT:
2334 case FC_BSG_RPT_CT:
2335 default:
2336 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2337 break;
2338 }
2339 return ret;
2340}
2341
2342static int
2343qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2344{
2345 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2346 struct qla_hw_data *ha = vha->hw;
2347 srb_t *sp;
db3ad7f8 2348 int cnt, que;
9a069e19 2349 unsigned long flags;
9a069e19 2350 struct req_que *req;
9a069e19
GM
2351 struct srb_bsg *sp_bsg;
2352
2353 /* find the bsg job from the active list of commands */
2354 spin_lock_irqsave(&ha->hardware_lock, flags);
db3ad7f8
GM
2355 for (que = 0; que < ha->max_req_queues; que++) {
2356 req = ha->req_q_map[que];
2357 if (!req)
2358 continue;
9a069e19 2359
db3ad7f8
GM
2360 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2361 sp = req->outstanding_cmds[cnt];
2362
2363 if (sp) {
2364 sp_bsg = (struct srb_bsg*)sp->ctx;
2365
2366 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2367 (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2368 || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2369 (sp_bsg->bsg_job == bsg_job)) {
2370 if (ha->isp_ops->abort_command(sp)) {
2371 DEBUG2(qla_printk(KERN_INFO, ha,
2372 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2373 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2374 } else {
2375 DEBUG2(qla_printk(KERN_INFO, ha,
2376 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2377 bsg_job->req->errors = bsg_job->reply->result = 0;
2378 }
2379 goto done;
2380 }
2381 }
9a069e19
GM
2382 }
2383 }
2384 spin_unlock_irqrestore(&ha->hardware_lock, flags);
db3ad7f8
GM
2385 DEBUG2(qla_printk(KERN_INFO, ha,
2386 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2387 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2388 return 0;
9a069e19 2389
db3ad7f8 2390done:
9a069e19
GM
2391 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2392 kfree(sp->fcport);
2393 kfree(sp->ctx);
2394 mempool_free(sp, ha->srb_mempool);
2395 return 0;
2396}
2397
1c97a12a 2398struct fc_function_template qla2xxx_transport_functions = {
8482e118
AV
2399
2400 .show_host_node_name = 1,
2401 .show_host_port_name = 1,
ad3e0eda 2402 .show_host_supported_classes = 1,
2ae2b370 2403 .show_host_supported_speeds = 1,
ad3e0eda 2404
8482e118
AV
2405 .get_host_port_id = qla2x00_get_host_port_id,
2406 .show_host_port_id = 1,
04414013
AV
2407 .get_host_speed = qla2x00_get_host_speed,
2408 .show_host_speed = 1,
8d067623
AV
2409 .get_host_port_type = qla2x00_get_host_port_type,
2410 .show_host_port_type = 1,
1620f7c2
AV
2411 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2412 .show_host_symbolic_name = 1,
a740a3f0
AV
2413 .set_host_system_hostname = qla2x00_set_host_system_hostname,
2414 .show_host_system_hostname = 1,
90991c85
AV
2415 .get_host_fabric_name = qla2x00_get_host_fabric_name,
2416 .show_host_fabric_name = 1,
7047fcdd
AV
2417 .get_host_port_state = qla2x00_get_host_port_state,
2418 .show_host_port_state = 1,
8482e118 2419
bdf79621 2420 .dd_fcrport_size = sizeof(struct fc_port *),
ad3e0eda 2421 .show_rport_supported_classes = 1,
8482e118
AV
2422
2423 .get_starget_node_name = qla2x00_get_starget_node_name,
2424 .show_starget_node_name = 1,
2425 .get_starget_port_name = qla2x00_get_starget_port_name,
2426 .show_starget_port_name = 1,
2427 .get_starget_port_id = qla2x00_get_starget_port_id,
2428 .show_starget_port_id = 1,
2429
8482e118
AV
2430 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2431 .show_rport_dev_loss_tmo = 1,
2432
91ca7b01 2433 .issue_fc_host_lip = qla2x00_issue_lip,
5f3a9a20
SJ
2434 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2435 .terminate_rport_io = qla2x00_terminate_rport_io,
392e2f65 2436 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2c3dfe3f
SJ
2437
2438 .vport_create = qla24xx_vport_create,
2439 .vport_disable = qla24xx_vport_disable,
2440 .vport_delete = qla24xx_vport_delete,
9a069e19
GM
2441 .bsg_request = qla24xx_bsg_request,
2442 .bsg_timeout = qla24xx_bsg_timeout,
2c3dfe3f
SJ
2443};
2444
2445struct fc_function_template qla2xxx_transport_vport_functions = {
2446
2447 .show_host_node_name = 1,
2448 .show_host_port_name = 1,
2449 .show_host_supported_classes = 1,
2450
2451 .get_host_port_id = qla2x00_get_host_port_id,
2452 .show_host_port_id = 1,
2453 .get_host_speed = qla2x00_get_host_speed,
2454 .show_host_speed = 1,
2455 .get_host_port_type = qla2x00_get_host_port_type,
2456 .show_host_port_type = 1,
2457 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2458 .show_host_symbolic_name = 1,
2459 .set_host_system_hostname = qla2x00_set_host_system_hostname,
2460 .show_host_system_hostname = 1,
2461 .get_host_fabric_name = qla2x00_get_host_fabric_name,
2462 .show_host_fabric_name = 1,
2463 .get_host_port_state = qla2x00_get_host_port_state,
2464 .show_host_port_state = 1,
2465
2466 .dd_fcrport_size = sizeof(struct fc_port *),
2467 .show_rport_supported_classes = 1,
2468
2469 .get_starget_node_name = qla2x00_get_starget_node_name,
2470 .show_starget_node_name = 1,
2471 .get_starget_port_name = qla2x00_get_starget_port_name,
2472 .show_starget_port_name = 1,
2473 .get_starget_port_id = qla2x00_get_starget_port_id,
2474 .show_starget_port_id = 1,
2475
2c3dfe3f
SJ
2476 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2477 .show_rport_dev_loss_tmo = 1,
2478
2479 .issue_fc_host_lip = qla2x00_issue_lip,
5f3a9a20
SJ
2480 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2481 .terminate_rport_io = qla2x00_terminate_rport_io,
2c3dfe3f 2482 .get_fc_host_stats = qla2x00_get_fc_host_stats,
9a069e19
GM
2483 .bsg_request = qla24xx_bsg_request,
2484 .bsg_timeout = qla24xx_bsg_timeout,
8482e118
AV
2485};
2486
8482e118 2487void
7b867cf7 2488qla2x00_init_host_attr(scsi_qla_host_t *vha)
8482e118 2489{
7b867cf7 2490 struct qla_hw_data *ha = vha->hw;
2ae2b370
AV
2491 u32 speed = FC_PORTSPEED_UNKNOWN;
2492
7b867cf7
AC
2493 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2494 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2495 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
2496 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2497 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2ae2b370 2498
3a03eb79
AV
2499 if (IS_QLA81XX(ha))
2500 speed = FC_PORTSPEED_10GBIT;
2501 else if (IS_QLA25XX(ha))
2ae2b370
AV
2502 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2503 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
4d4df193 2504 else if (IS_QLA24XX_TYPE(ha))
2ae2b370
AV
2505 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2506 FC_PORTSPEED_1GBIT;
2507 else if (IS_QLA23XX(ha))
2508 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2509 else
2510 speed = FC_PORTSPEED_1GBIT;
7b867cf7 2511 fc_host_supported_speeds(vha->host) = speed;
8482e118 2512}
9a069e19
GM
2513static int
2514qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2515{
2516 int ret = 0;
2517 int cmd;
2518 uint16_t cmd_status;
2519
2520 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2521
2522 cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2523 == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2524 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2525 ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2526 &cmd_status);
2527 return ret;
2528}
2529
2530static int
2531qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2532{
2533 struct access_chip_84xx *mn;
2534 dma_addr_t mn_dma, mgmt_dma;
2535 void *mgmt_b = NULL;
2536 int ret = 0;
2537 int rsp_hdr_len, len = 0;
2538 struct qla84_msg_mgmt *ql84_mgmt;
2539
2540 ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2541 ql84_mgmt->cmd =
2542 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2543 ql84_mgmt->mgmtp.u.mem.start_addr =
2544 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2545 ql84_mgmt->len =
2546 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2547 ql84_mgmt->mgmtp.u.config.id =
2548 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2549 ql84_mgmt->mgmtp.u.config.param0 =
2550 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2551 ql84_mgmt->mgmtp.u.config.param1 =
2552 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2553 ql84_mgmt->mgmtp.u.info.type =
2554 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2555 ql84_mgmt->mgmtp.u.info.context =
2556 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2557
2558 rsp_hdr_len = bsg_job->request_payload.payload_len;
2559
2560 mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2561 if (mn == NULL) {
2562 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2563 "failed%lu\n", __func__, ha->host_no));
2564 return -ENOMEM;
2565 }
2566
2567 memset(mn, 0, sizeof (struct access_chip_84xx));
2568
2569 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2570 mn->entry_count = 1;
2571
2572 switch (ql84_mgmt->cmd) {
2573 case QLA84_MGMT_READ_MEM:
2574 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2575 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2576 break;
2577 case QLA84_MGMT_WRITE_MEM:
2578 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2579 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2580 break;
2581 case QLA84_MGMT_CHNG_CONFIG:
2582 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2583 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2584 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2585 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2586 break;
2587 case QLA84_MGMT_GET_INFO:
2588 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2589 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2590 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2591 break;
2592 default:
2593 ret = -EIO;
2594 goto exit_mgmt0;
2595 }
2596
2597 if ((len == ql84_mgmt->len) &&
2598 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2599 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2600 &mgmt_dma, GFP_KERNEL);
2601 if (mgmt_b == NULL) {
2602 DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2603 "failed%lu\n", __func__, ha->host_no));
2604 ret = -ENOMEM;
2605 goto exit_mgmt0;
2606 }
2607 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2608 mn->dseg_count = cpu_to_le16(1);
2609 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2610 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2611 mn->dseg_length = cpu_to_le32(len);
2612
2613 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2614 memcpy(mgmt_b, ql84_mgmt->payload, len);
2615 }
2616 }
2617
2618 ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2619 if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2620 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2621 if (ret != QLA_SUCCESS)
2622 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2623 __func__, ha->host_no));
2624 } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2625 (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2626 }
2627
2628 if (mgmt_b)
2629 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2630
2631exit_mgmt0:
2632 dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2633 return ret;
2634}