]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/scsi/qla2xxx/qla_attr.c
2b92d4659934caabcc420a159e23c102c0b40820
[mirror_ubuntu-focal-kernel.git] / drivers / scsi / qla2xxx / qla_attr.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14
15 static int qla24xx_vport_disable(struct fc_vport *, bool);
16
17 /* SYSFS attributes --------------------------------------------------------- */
18
19 static ssize_t
20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 struct bin_attribute *bin_attr,
22 char *buf, loff_t off, size_t count)
23 {
24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25 struct device, kobj)));
26 struct qla_hw_data *ha = vha->hw;
27 int rval = 0;
28
29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 return 0;
31
32 mutex_lock(&ha->optrom_mutex);
33 if (IS_P3P_TYPE(ha)) {
34 if (off < ha->md_template_size) {
35 rval = memory_read_from_buffer(buf, count,
36 &off, ha->md_tmplt_hdr, ha->md_template_size);
37 } else {
38 off -= ha->md_template_size;
39 rval = memory_read_from_buffer(buf, count,
40 &off, ha->md_dump, ha->md_dump_size);
41 }
42 } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
43 rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 MCTP_DUMP_SIZE);
45 } else if (ha->fw_dump_reading) {
46 rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
47 ha->fw_dump_len);
48 } else {
49 rval = 0;
50 }
51 mutex_unlock(&ha->optrom_mutex);
52 return rval;
53 }
54
55 static ssize_t
56 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
57 struct bin_attribute *bin_attr,
58 char *buf, loff_t off, size_t count)
59 {
60 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
61 struct device, kobj)));
62 struct qla_hw_data *ha = vha->hw;
63 int reading;
64
65 if (off != 0)
66 return (0);
67
68 reading = simple_strtol(buf, NULL, 10);
69 switch (reading) {
70 case 0:
71 if (!ha->fw_dump_reading)
72 break;
73
74 ql_log(ql_log_info, vha, 0x705d,
75 "Firmware dump cleared on (%ld).\n", vha->host_no);
76
77 if (IS_P3P_TYPE(ha)) {
78 qla82xx_md_free(vha);
79 qla82xx_md_prep(vha);
80 }
81 ha->fw_dump_reading = 0;
82 ha->fw_dumped = 0;
83 break;
84 case 1:
85 if (ha->fw_dumped && !ha->fw_dump_reading) {
86 ha->fw_dump_reading = 1;
87
88 ql_log(ql_log_info, vha, 0x705e,
89 "Raw firmware dump ready for read on (%ld).\n",
90 vha->host_no);
91 }
92 break;
93 case 2:
94 qla2x00_alloc_fw_dump(vha);
95 break;
96 case 3:
97 if (IS_QLA82XX(ha)) {
98 qla82xx_idc_lock(ha);
99 qla82xx_set_reset_owner(vha);
100 qla82xx_idc_unlock(ha);
101 } else if (IS_QLA8044(ha)) {
102 qla8044_idc_lock(ha);
103 qla82xx_set_reset_owner(vha);
104 qla8044_idc_unlock(ha);
105 } else
106 qla2x00_system_error(vha);
107 break;
108 case 4:
109 if (IS_P3P_TYPE(ha)) {
110 if (ha->md_tmplt_hdr)
111 ql_dbg(ql_dbg_user, vha, 0x705b,
112 "MiniDump supported with this firmware.\n");
113 else
114 ql_dbg(ql_dbg_user, vha, 0x709d,
115 "MiniDump not supported with this firmware.\n");
116 }
117 break;
118 case 5:
119 if (IS_P3P_TYPE(ha))
120 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
121 break;
122 case 6:
123 if (!ha->mctp_dump_reading)
124 break;
125 ql_log(ql_log_info, vha, 0x70c1,
126 "MCTP dump cleared on (%ld).\n", vha->host_no);
127 ha->mctp_dump_reading = 0;
128 ha->mctp_dumped = 0;
129 break;
130 case 7:
131 if (ha->mctp_dumped && !ha->mctp_dump_reading) {
132 ha->mctp_dump_reading = 1;
133 ql_log(ql_log_info, vha, 0x70c2,
134 "Raw mctp dump ready for read on (%ld).\n",
135 vha->host_no);
136 }
137 break;
138 }
139 return count;
140 }
141
142 static struct bin_attribute sysfs_fw_dump_attr = {
143 .attr = {
144 .name = "fw_dump",
145 .mode = S_IRUSR | S_IWUSR,
146 },
147 .size = 0,
148 .read = qla2x00_sysfs_read_fw_dump,
149 .write = qla2x00_sysfs_write_fw_dump,
150 };
151
152 static ssize_t
153 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
154 struct bin_attribute *bin_attr,
155 char *buf, loff_t off, size_t count)
156 {
157 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
158 struct device, kobj)));
159 struct qla_hw_data *ha = vha->hw;
160 uint32_t faddr;
161 struct active_regions active_regions = { };
162
163 if (!capable(CAP_SYS_ADMIN))
164 return 0;
165
166 mutex_lock(&ha->optrom_mutex);
167 if (qla2x00_chip_is_down(vha)) {
168 mutex_unlock(&ha->optrom_mutex);
169 return -EAGAIN;
170 }
171
172 if (!IS_NOCACHE_VPD_TYPE(ha)) {
173 mutex_unlock(&ha->optrom_mutex);
174 goto skip;
175 }
176
177 faddr = ha->flt_region_nvram;
178 if (IS_QLA28XX(ha)) {
179 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
180 faddr = ha->flt_region_nvram_sec;
181 }
182 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
183
184 mutex_unlock(&ha->optrom_mutex);
185
186 skip:
187 return memory_read_from_buffer(buf, count, &off, ha->nvram,
188 ha->nvram_size);
189 }
190
191 static ssize_t
192 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
193 struct bin_attribute *bin_attr,
194 char *buf, loff_t off, size_t count)
195 {
196 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
197 struct device, kobj)));
198 struct qla_hw_data *ha = vha->hw;
199 uint16_t cnt;
200
201 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
202 !ha->isp_ops->write_nvram)
203 return -EINVAL;
204
205 /* Checksum NVRAM. */
206 if (IS_FWI2_CAPABLE(ha)) {
207 uint32_t *iter;
208 uint32_t chksum;
209
210 iter = (uint32_t *)buf;
211 chksum = 0;
212 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
213 chksum += le32_to_cpu(*iter);
214 chksum = ~chksum + 1;
215 *iter = cpu_to_le32(chksum);
216 } else {
217 uint8_t *iter;
218 uint8_t chksum;
219
220 iter = (uint8_t *)buf;
221 chksum = 0;
222 for (cnt = 0; cnt < count - 1; cnt++)
223 chksum += *iter++;
224 chksum = ~chksum + 1;
225 *iter = chksum;
226 }
227
228 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
229 ql_log(ql_log_warn, vha, 0x705f,
230 "HBA not online, failing NVRAM update.\n");
231 return -EAGAIN;
232 }
233
234 mutex_lock(&ha->optrom_mutex);
235 if (qla2x00_chip_is_down(vha)) {
236 mutex_unlock(&ha->optrom_mutex);
237 return -EAGAIN;
238 }
239
240 /* Write NVRAM. */
241 ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
242 ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
243 count);
244 mutex_unlock(&ha->optrom_mutex);
245
246 ql_dbg(ql_dbg_user, vha, 0x7060,
247 "Setting ISP_ABORT_NEEDED\n");
248 /* NVRAM settings take effect immediately. */
249 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
250 qla2xxx_wake_dpc(vha);
251 qla2x00_wait_for_chip_reset(vha);
252
253 return count;
254 }
255
256 static struct bin_attribute sysfs_nvram_attr = {
257 .attr = {
258 .name = "nvram",
259 .mode = S_IRUSR | S_IWUSR,
260 },
261 .size = 512,
262 .read = qla2x00_sysfs_read_nvram,
263 .write = qla2x00_sysfs_write_nvram,
264 };
265
266 static ssize_t
267 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
268 struct bin_attribute *bin_attr,
269 char *buf, loff_t off, size_t count)
270 {
271 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
272 struct device, kobj)));
273 struct qla_hw_data *ha = vha->hw;
274 ssize_t rval = 0;
275
276 mutex_lock(&ha->optrom_mutex);
277
278 if (ha->optrom_state != QLA_SREADING)
279 goto out;
280
281 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
282 ha->optrom_region_size);
283
284 out:
285 mutex_unlock(&ha->optrom_mutex);
286
287 return rval;
288 }
289
290 static ssize_t
291 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
292 struct bin_attribute *bin_attr,
293 char *buf, loff_t off, size_t count)
294 {
295 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
296 struct device, kobj)));
297 struct qla_hw_data *ha = vha->hw;
298
299 mutex_lock(&ha->optrom_mutex);
300
301 if (ha->optrom_state != QLA_SWRITING) {
302 mutex_unlock(&ha->optrom_mutex);
303 return -EINVAL;
304 }
305 if (off > ha->optrom_region_size) {
306 mutex_unlock(&ha->optrom_mutex);
307 return -ERANGE;
308 }
309 if (off + count > ha->optrom_region_size)
310 count = ha->optrom_region_size - off;
311
312 memcpy(&ha->optrom_buffer[off], buf, count);
313 mutex_unlock(&ha->optrom_mutex);
314
315 return count;
316 }
317
318 static struct bin_attribute sysfs_optrom_attr = {
319 .attr = {
320 .name = "optrom",
321 .mode = S_IRUSR | S_IWUSR,
322 },
323 .size = 0,
324 .read = qla2x00_sysfs_read_optrom,
325 .write = qla2x00_sysfs_write_optrom,
326 };
327
328 static ssize_t
329 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
330 struct bin_attribute *bin_attr,
331 char *buf, loff_t off, size_t count)
332 {
333 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
334 struct device, kobj)));
335 struct qla_hw_data *ha = vha->hw;
336 uint32_t start = 0;
337 uint32_t size = ha->optrom_size;
338 int val, valid;
339 ssize_t rval = count;
340
341 if (off)
342 return -EINVAL;
343
344 if (unlikely(pci_channel_offline(ha->pdev)))
345 return -EAGAIN;
346
347 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
348 return -EINVAL;
349 if (start > ha->optrom_size)
350 return -EINVAL;
351 if (size > ha->optrom_size - start)
352 size = ha->optrom_size - start;
353
354 mutex_lock(&ha->optrom_mutex);
355 if (qla2x00_chip_is_down(vha)) {
356 mutex_unlock(&ha->optrom_mutex);
357 return -EAGAIN;
358 }
359 switch (val) {
360 case 0:
361 if (ha->optrom_state != QLA_SREADING &&
362 ha->optrom_state != QLA_SWRITING) {
363 rval = -EINVAL;
364 goto out;
365 }
366 ha->optrom_state = QLA_SWAITING;
367
368 ql_dbg(ql_dbg_user, vha, 0x7061,
369 "Freeing flash region allocation -- 0x%x bytes.\n",
370 ha->optrom_region_size);
371
372 vfree(ha->optrom_buffer);
373 ha->optrom_buffer = NULL;
374 break;
375 case 1:
376 if (ha->optrom_state != QLA_SWAITING) {
377 rval = -EINVAL;
378 goto out;
379 }
380
381 ha->optrom_region_start = start;
382 ha->optrom_region_size = size;
383
384 ha->optrom_state = QLA_SREADING;
385 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
386 if (ha->optrom_buffer == NULL) {
387 ql_log(ql_log_warn, vha, 0x7062,
388 "Unable to allocate memory for optrom retrieval "
389 "(%x).\n", ha->optrom_region_size);
390
391 ha->optrom_state = QLA_SWAITING;
392 rval = -ENOMEM;
393 goto out;
394 }
395
396 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
397 ql_log(ql_log_warn, vha, 0x7063,
398 "HBA not online, failing NVRAM update.\n");
399 rval = -EAGAIN;
400 goto out;
401 }
402
403 ql_dbg(ql_dbg_user, vha, 0x7064,
404 "Reading flash region -- 0x%x/0x%x.\n",
405 ha->optrom_region_start, ha->optrom_region_size);
406
407 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
408 ha->optrom_region_start, ha->optrom_region_size);
409 break;
410 case 2:
411 if (ha->optrom_state != QLA_SWAITING) {
412 rval = -EINVAL;
413 goto out;
414 }
415
416 /*
417 * We need to be more restrictive on which FLASH regions are
418 * allowed to be updated via user-space. Regions accessible
419 * via this method include:
420 *
421 * ISP21xx/ISP22xx/ISP23xx type boards:
422 *
423 * 0x000000 -> 0x020000 -- Boot code.
424 *
425 * ISP2322/ISP24xx type boards:
426 *
427 * 0x000000 -> 0x07ffff -- Boot code.
428 * 0x080000 -> 0x0fffff -- Firmware.
429 *
430 * ISP25xx type boards:
431 *
432 * 0x000000 -> 0x07ffff -- Boot code.
433 * 0x080000 -> 0x0fffff -- Firmware.
434 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
435 *
436 * > ISP25xx type boards:
437 *
438 * None -- should go through BSG.
439 */
440 valid = 0;
441 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
442 valid = 1;
443 else if (start == (ha->flt_region_boot * 4) ||
444 start == (ha->flt_region_fw * 4))
445 valid = 1;
446 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
447 valid = 1;
448 if (!valid) {
449 ql_log(ql_log_warn, vha, 0x7065,
450 "Invalid start region 0x%x/0x%x.\n", start, size);
451 rval = -EINVAL;
452 goto out;
453 }
454
455 ha->optrom_region_start = start;
456 ha->optrom_region_size = size;
457
458 ha->optrom_state = QLA_SWRITING;
459 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
460 if (ha->optrom_buffer == NULL) {
461 ql_log(ql_log_warn, vha, 0x7066,
462 "Unable to allocate memory for optrom update "
463 "(%x)\n", ha->optrom_region_size);
464
465 ha->optrom_state = QLA_SWAITING;
466 rval = -ENOMEM;
467 goto out;
468 }
469
470 ql_dbg(ql_dbg_user, vha, 0x7067,
471 "Staging flash region write -- 0x%x/0x%x.\n",
472 ha->optrom_region_start, ha->optrom_region_size);
473
474 break;
475 case 3:
476 if (ha->optrom_state != QLA_SWRITING) {
477 rval = -EINVAL;
478 goto out;
479 }
480
481 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
482 ql_log(ql_log_warn, vha, 0x7068,
483 "HBA not online, failing flash update.\n");
484 rval = -EAGAIN;
485 goto out;
486 }
487
488 ql_dbg(ql_dbg_user, vha, 0x7069,
489 "Writing flash region -- 0x%x/0x%x.\n",
490 ha->optrom_region_start, ha->optrom_region_size);
491
492 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
493 ha->optrom_region_start, ha->optrom_region_size);
494 break;
495 default:
496 rval = -EINVAL;
497 }
498
499 out:
500 mutex_unlock(&ha->optrom_mutex);
501 return rval;
502 }
503
504 static struct bin_attribute sysfs_optrom_ctl_attr = {
505 .attr = {
506 .name = "optrom_ctl",
507 .mode = S_IWUSR,
508 },
509 .size = 0,
510 .write = qla2x00_sysfs_write_optrom_ctl,
511 };
512
513 static ssize_t
514 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
515 struct bin_attribute *bin_attr,
516 char *buf, loff_t off, size_t count)
517 {
518 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
519 struct device, kobj)));
520 struct qla_hw_data *ha = vha->hw;
521 uint32_t faddr;
522 struct active_regions active_regions = { };
523
524 if (unlikely(pci_channel_offline(ha->pdev)))
525 return -EAGAIN;
526
527 if (!capable(CAP_SYS_ADMIN))
528 return -EINVAL;
529
530 if (IS_NOCACHE_VPD_TYPE(ha))
531 goto skip;
532
533 faddr = ha->flt_region_vpd << 2;
534
535 if (IS_QLA28XX(ha)) {
536 qla28xx_get_aux_images(vha, &active_regions);
537 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
538 faddr = ha->flt_region_vpd_sec << 2;
539
540 ql_dbg(ql_dbg_init, vha, 0x7070,
541 "Loading %s nvram image.\n",
542 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
543 "primary" : "secondary");
544 }
545
546 mutex_lock(&ha->optrom_mutex);
547 if (qla2x00_chip_is_down(vha)) {
548 mutex_unlock(&ha->optrom_mutex);
549 return -EAGAIN;
550 }
551
552 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
553 mutex_unlock(&ha->optrom_mutex);
554
555 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
556 skip:
557 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
558 }
559
560 static ssize_t
561 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
562 struct bin_attribute *bin_attr,
563 char *buf, loff_t off, size_t count)
564 {
565 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
566 struct device, kobj)));
567 struct qla_hw_data *ha = vha->hw;
568 uint8_t *tmp_data;
569
570 if (unlikely(pci_channel_offline(ha->pdev)))
571 return 0;
572
573 if (qla2x00_chip_is_down(vha))
574 return 0;
575
576 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
577 !ha->isp_ops->write_nvram)
578 return 0;
579
580 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
581 ql_log(ql_log_warn, vha, 0x706a,
582 "HBA not online, failing VPD update.\n");
583 return -EAGAIN;
584 }
585
586 mutex_lock(&ha->optrom_mutex);
587 if (qla2x00_chip_is_down(vha)) {
588 mutex_unlock(&ha->optrom_mutex);
589 return -EAGAIN;
590 }
591
592 /* Write NVRAM. */
593 ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
594 ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
595
596 /* Update flash version information for 4Gb & above. */
597 if (!IS_FWI2_CAPABLE(ha)) {
598 mutex_unlock(&ha->optrom_mutex);
599 return -EINVAL;
600 }
601
602 tmp_data = vmalloc(256);
603 if (!tmp_data) {
604 mutex_unlock(&ha->optrom_mutex);
605 ql_log(ql_log_warn, vha, 0x706b,
606 "Unable to allocate memory for VPD information update.\n");
607 return -ENOMEM;
608 }
609 ha->isp_ops->get_flash_version(vha, tmp_data);
610 vfree(tmp_data);
611
612 mutex_unlock(&ha->optrom_mutex);
613
614 return count;
615 }
616
617 static struct bin_attribute sysfs_vpd_attr = {
618 .attr = {
619 .name = "vpd",
620 .mode = S_IRUSR | S_IWUSR,
621 },
622 .size = 0,
623 .read = qla2x00_sysfs_read_vpd,
624 .write = qla2x00_sysfs_write_vpd,
625 };
626
627 static ssize_t
628 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
629 struct bin_attribute *bin_attr,
630 char *buf, loff_t off, size_t count)
631 {
632 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
633 struct device, kobj)));
634 int rval;
635
636 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
637 return 0;
638
639 mutex_lock(&vha->hw->optrom_mutex);
640 if (qla2x00_chip_is_down(vha)) {
641 mutex_unlock(&vha->hw->optrom_mutex);
642 return 0;
643 }
644
645 rval = qla2x00_read_sfp_dev(vha, buf, count);
646 mutex_unlock(&vha->hw->optrom_mutex);
647
648 if (rval)
649 return -EIO;
650
651 return count;
652 }
653
654 static struct bin_attribute sysfs_sfp_attr = {
655 .attr = {
656 .name = "sfp",
657 .mode = S_IRUSR | S_IWUSR,
658 },
659 .size = SFP_DEV_SIZE,
660 .read = qla2x00_sysfs_read_sfp,
661 };
662
663 static ssize_t
664 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
665 struct bin_attribute *bin_attr,
666 char *buf, loff_t off, size_t count)
667 {
668 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
669 struct device, kobj)));
670 struct qla_hw_data *ha = vha->hw;
671 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
672 int type;
673 uint32_t idc_control;
674 uint8_t *tmp_data = NULL;
675
676 if (off != 0)
677 return -EINVAL;
678
679 type = simple_strtol(buf, NULL, 10);
680 switch (type) {
681 case 0x2025c:
682 ql_log(ql_log_info, vha, 0x706e,
683 "Issuing ISP reset.\n");
684
685 scsi_block_requests(vha->host);
686 if (IS_QLA82XX(ha)) {
687 ha->flags.isp82xx_no_md_cap = 1;
688 qla82xx_idc_lock(ha);
689 qla82xx_set_reset_owner(vha);
690 qla82xx_idc_unlock(ha);
691 } else if (IS_QLA8044(ha)) {
692 qla8044_idc_lock(ha);
693 idc_control = qla8044_rd_reg(ha,
694 QLA8044_IDC_DRV_CTRL);
695 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
696 (idc_control | GRACEFUL_RESET_BIT1));
697 qla82xx_set_reset_owner(vha);
698 qla8044_idc_unlock(ha);
699 } else {
700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
701 qla2xxx_wake_dpc(vha);
702 }
703 qla2x00_wait_for_chip_reset(vha);
704 scsi_unblock_requests(vha->host);
705 break;
706 case 0x2025d:
707 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
708 return -EPERM;
709
710 ql_log(ql_log_info, vha, 0x706f,
711 "Issuing MPI reset.\n");
712
713 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
714 uint32_t idc_control;
715
716 qla83xx_idc_lock(vha, 0);
717 __qla83xx_get_idc_control(vha, &idc_control);
718 idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
719 __qla83xx_set_idc_control(vha, idc_control);
720 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
721 QLA8XXX_DEV_NEED_RESET);
722 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
723 qla83xx_idc_unlock(vha, 0);
724 break;
725 } else {
726 /* Make sure FC side is not in reset */
727 qla2x00_wait_for_hba_online(vha);
728
729 /* Issue MPI reset */
730 scsi_block_requests(vha->host);
731 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
732 ql_log(ql_log_warn, vha, 0x7070,
733 "MPI reset failed.\n");
734 scsi_unblock_requests(vha->host);
735 break;
736 }
737 case 0x2025e:
738 if (!IS_P3P_TYPE(ha) || vha != base_vha) {
739 ql_log(ql_log_info, vha, 0x7071,
740 "FCoE ctx reset not supported.\n");
741 return -EPERM;
742 }
743
744 ql_log(ql_log_info, vha, 0x7072,
745 "Issuing FCoE ctx reset.\n");
746 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
747 qla2xxx_wake_dpc(vha);
748 qla2x00_wait_for_fcoe_ctx_reset(vha);
749 break;
750 case 0x2025f:
751 if (!IS_QLA8031(ha))
752 return -EPERM;
753 ql_log(ql_log_info, vha, 0x70bc,
754 "Disabling Reset by IDC control\n");
755 qla83xx_idc_lock(vha, 0);
756 __qla83xx_get_idc_control(vha, &idc_control);
757 idc_control |= QLA83XX_IDC_RESET_DISABLED;
758 __qla83xx_set_idc_control(vha, idc_control);
759 qla83xx_idc_unlock(vha, 0);
760 break;
761 case 0x20260:
762 if (!IS_QLA8031(ha))
763 return -EPERM;
764 ql_log(ql_log_info, vha, 0x70bd,
765 "Enabling Reset by IDC control\n");
766 qla83xx_idc_lock(vha, 0);
767 __qla83xx_get_idc_control(vha, &idc_control);
768 idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
769 __qla83xx_set_idc_control(vha, idc_control);
770 qla83xx_idc_unlock(vha, 0);
771 break;
772 case 0x20261:
773 ql_dbg(ql_dbg_user, vha, 0x70e0,
774 "Updating cache versions without reset ");
775
776 tmp_data = vmalloc(256);
777 if (!tmp_data) {
778 ql_log(ql_log_warn, vha, 0x70e1,
779 "Unable to allocate memory for VPD information update.\n");
780 return -ENOMEM;
781 }
782 ha->isp_ops->get_flash_version(vha, tmp_data);
783 vfree(tmp_data);
784 break;
785 }
786 return count;
787 }
788
789 static struct bin_attribute sysfs_reset_attr = {
790 .attr = {
791 .name = "reset",
792 .mode = S_IWUSR,
793 },
794 .size = 0,
795 .write = qla2x00_sysfs_write_reset,
796 };
797
798 static ssize_t
799 qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
800 struct bin_attribute *bin_attr,
801 char *buf, loff_t off, size_t count)
802 {
803 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
804 struct device, kobj)));
805 int type;
806 port_id_t did;
807
808 if (!capable(CAP_SYS_ADMIN))
809 return 0;
810
811 if (unlikely(pci_channel_offline(vha->hw->pdev)))
812 return 0;
813
814 if (qla2x00_chip_is_down(vha))
815 return 0;
816
817 type = simple_strtol(buf, NULL, 10);
818
819 did.b.domain = (type & 0x00ff0000) >> 16;
820 did.b.area = (type & 0x0000ff00) >> 8;
821 did.b.al_pa = (type & 0x000000ff);
822
823 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
824 did.b.domain, did.b.area, did.b.al_pa);
825
826 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
827
828 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
829 return count;
830 }
831
832 static struct bin_attribute sysfs_issue_logo_attr = {
833 .attr = {
834 .name = "issue_logo",
835 .mode = S_IWUSR,
836 },
837 .size = 0,
838 .write = qla2x00_issue_logo,
839 };
840
841 static ssize_t
842 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
843 struct bin_attribute *bin_attr,
844 char *buf, loff_t off, size_t count)
845 {
846 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
847 struct device, kobj)));
848 struct qla_hw_data *ha = vha->hw;
849 int rval;
850 uint16_t actual_size;
851
852 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
853 return 0;
854
855 if (unlikely(pci_channel_offline(ha->pdev)))
856 return 0;
857 mutex_lock(&vha->hw->optrom_mutex);
858 if (qla2x00_chip_is_down(vha)) {
859 mutex_unlock(&vha->hw->optrom_mutex);
860 return 0;
861 }
862
863 if (ha->xgmac_data)
864 goto do_read;
865
866 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
867 &ha->xgmac_data_dma, GFP_KERNEL);
868 if (!ha->xgmac_data) {
869 mutex_unlock(&vha->hw->optrom_mutex);
870 ql_log(ql_log_warn, vha, 0x7076,
871 "Unable to allocate memory for XGMAC read-data.\n");
872 return 0;
873 }
874
875 do_read:
876 actual_size = 0;
877 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
878
879 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
880 XGMAC_DATA_SIZE, &actual_size);
881
882 mutex_unlock(&vha->hw->optrom_mutex);
883 if (rval != QLA_SUCCESS) {
884 ql_log(ql_log_warn, vha, 0x7077,
885 "Unable to read XGMAC data (%x).\n", rval);
886 count = 0;
887 }
888
889 count = actual_size > count ? count : actual_size;
890 memcpy(buf, ha->xgmac_data, count);
891
892 return count;
893 }
894
895 static struct bin_attribute sysfs_xgmac_stats_attr = {
896 .attr = {
897 .name = "xgmac_stats",
898 .mode = S_IRUSR,
899 },
900 .size = 0,
901 .read = qla2x00_sysfs_read_xgmac_stats,
902 };
903
904 static ssize_t
905 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
906 struct bin_attribute *bin_attr,
907 char *buf, loff_t off, size_t count)
908 {
909 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
910 struct device, kobj)));
911 struct qla_hw_data *ha = vha->hw;
912 int rval;
913
914 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
915 return 0;
916
917 if (ha->dcbx_tlv)
918 goto do_read;
919 mutex_lock(&vha->hw->optrom_mutex);
920 if (qla2x00_chip_is_down(vha)) {
921 mutex_unlock(&vha->hw->optrom_mutex);
922 return 0;
923 }
924
925 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
926 &ha->dcbx_tlv_dma, GFP_KERNEL);
927 if (!ha->dcbx_tlv) {
928 mutex_unlock(&vha->hw->optrom_mutex);
929 ql_log(ql_log_warn, vha, 0x7078,
930 "Unable to allocate memory for DCBX TLV read-data.\n");
931 return -ENOMEM;
932 }
933
934 do_read:
935 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
936
937 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
938 DCBX_TLV_DATA_SIZE);
939
940 mutex_unlock(&vha->hw->optrom_mutex);
941
942 if (rval != QLA_SUCCESS) {
943 ql_log(ql_log_warn, vha, 0x7079,
944 "Unable to read DCBX TLV (%x).\n", rval);
945 return -EIO;
946 }
947
948 memcpy(buf, ha->dcbx_tlv, count);
949
950 return count;
951 }
952
953 static struct bin_attribute sysfs_dcbx_tlv_attr = {
954 .attr = {
955 .name = "dcbx_tlv",
956 .mode = S_IRUSR,
957 },
958 .size = 0,
959 .read = qla2x00_sysfs_read_dcbx_tlv,
960 };
961
962 static struct sysfs_entry {
963 char *name;
964 struct bin_attribute *attr;
965 int type;
966 } bin_file_entries[] = {
967 { "fw_dump", &sysfs_fw_dump_attr, },
968 { "nvram", &sysfs_nvram_attr, },
969 { "optrom", &sysfs_optrom_attr, },
970 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
971 { "vpd", &sysfs_vpd_attr, 1 },
972 { "sfp", &sysfs_sfp_attr, 1 },
973 { "reset", &sysfs_reset_attr, },
974 { "issue_logo", &sysfs_issue_logo_attr, },
975 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
976 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
977 { NULL },
978 };
979
980 void
981 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
982 {
983 struct Scsi_Host *host = vha->host;
984 struct sysfs_entry *iter;
985 int ret;
986
987 for (iter = bin_file_entries; iter->name; iter++) {
988 if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
989 continue;
990 if (iter->type == 2 && !IS_QLA25XX(vha->hw))
991 continue;
992 if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
993 continue;
994
995 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
996 iter->attr);
997 if (ret)
998 ql_log(ql_log_warn, vha, 0x00f3,
999 "Unable to create sysfs %s binary attribute (%d).\n",
1000 iter->name, ret);
1001 else
1002 ql_dbg(ql_dbg_init, vha, 0x00f4,
1003 "Successfully created sysfs %s binary attribute.\n",
1004 iter->name);
1005 }
1006 }
1007
1008 void
1009 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
1010 {
1011 struct Scsi_Host *host = vha->host;
1012 struct sysfs_entry *iter;
1013 struct qla_hw_data *ha = vha->hw;
1014
1015 for (iter = bin_file_entries; iter->name; iter++) {
1016 if (iter->type && !IS_FWI2_CAPABLE(ha))
1017 continue;
1018 if (iter->type == 2 && !IS_QLA25XX(ha))
1019 continue;
1020 if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
1021 continue;
1022 if (iter->type == 0x27 &&
1023 (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
1024 continue;
1025
1026 sysfs_remove_bin_file(&host->shost_gendev.kobj,
1027 iter->attr);
1028 }
1029
1030 if (stop_beacon && ha->beacon_blink_led == 1)
1031 ha->isp_ops->beacon_off(vha);
1032 }
1033
1034 /* Scsi_Host attributes. */
1035
1036 static ssize_t
1037 qla2x00_driver_version_show(struct device *dev,
1038 struct device_attribute *attr, char *buf)
1039 {
1040 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1041 }
1042
1043 static ssize_t
1044 qla2x00_fw_version_show(struct device *dev,
1045 struct device_attribute *attr, char *buf)
1046 {
1047 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1048 struct qla_hw_data *ha = vha->hw;
1049 char fw_str[128];
1050
1051 return scnprintf(buf, PAGE_SIZE, "%s\n",
1052 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1053 }
1054
1055 static ssize_t
1056 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1057 char *buf)
1058 {
1059 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1060 struct qla_hw_data *ha = vha->hw;
1061 uint32_t sn;
1062
1063 if (IS_QLAFX00(vha->hw)) {
1064 return scnprintf(buf, PAGE_SIZE, "%s\n",
1065 vha->hw->mr.serial_num);
1066 } else if (IS_FWI2_CAPABLE(ha)) {
1067 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1068 return strlen(strcat(buf, "\n"));
1069 }
1070
1071 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1072 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1073 sn % 100000);
1074 }
1075
1076 static ssize_t
1077 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1078 char *buf)
1079 {
1080 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1081
1082 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1083 }
1084
1085 static ssize_t
1086 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1087 char *buf)
1088 {
1089 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1090 struct qla_hw_data *ha = vha->hw;
1091
1092 if (IS_QLAFX00(vha->hw))
1093 return scnprintf(buf, PAGE_SIZE, "%s\n",
1094 vha->hw->mr.hw_version);
1095
1096 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1097 ha->product_id[0], ha->product_id[1], ha->product_id[2],
1098 ha->product_id[3]);
1099 }
1100
1101 static ssize_t
1102 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1103 char *buf)
1104 {
1105 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1106
1107 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1108 }
1109
1110 static ssize_t
1111 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1112 char *buf)
1113 {
1114 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1115
1116 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1117 }
1118
1119 static ssize_t
1120 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1121 char *buf)
1122 {
1123 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1124 char pci_info[30];
1125
1126 return scnprintf(buf, PAGE_SIZE, "%s\n",
1127 vha->hw->isp_ops->pci_info_str(vha, pci_info));
1128 }
1129
1130 static ssize_t
1131 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1132 char *buf)
1133 {
1134 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1135 struct qla_hw_data *ha = vha->hw;
1136 int len = 0;
1137
1138 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1139 atomic_read(&vha->loop_state) == LOOP_DEAD ||
1140 vha->device_flags & DFLG_NO_CABLE)
1141 len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1142 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1143 qla2x00_chip_is_down(vha))
1144 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1145 else {
1146 len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1147
1148 switch (ha->current_topology) {
1149 case ISP_CFG_NL:
1150 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1151 break;
1152 case ISP_CFG_FL:
1153 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1154 break;
1155 case ISP_CFG_N:
1156 len += scnprintf(buf + len, PAGE_SIZE-len,
1157 "N_Port to N_Port\n");
1158 break;
1159 case ISP_CFG_F:
1160 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1161 break;
1162 default:
1163 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1164 break;
1165 }
1166 }
1167 return len;
1168 }
1169
1170 static ssize_t
1171 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1172 char *buf)
1173 {
1174 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1175 int len = 0;
1176
1177 switch (vha->hw->zio_mode) {
1178 case QLA_ZIO_MODE_6:
1179 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1180 break;
1181 case QLA_ZIO_DISABLED:
1182 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1183 break;
1184 }
1185 return len;
1186 }
1187
1188 static ssize_t
1189 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1190 const char *buf, size_t count)
1191 {
1192 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1193 struct qla_hw_data *ha = vha->hw;
1194 int val = 0;
1195 uint16_t zio_mode;
1196
1197 if (!IS_ZIO_SUPPORTED(ha))
1198 return -ENOTSUPP;
1199
1200 if (sscanf(buf, "%d", &val) != 1)
1201 return -EINVAL;
1202
1203 if (val)
1204 zio_mode = QLA_ZIO_MODE_6;
1205 else
1206 zio_mode = QLA_ZIO_DISABLED;
1207
1208 /* Update per-hba values and queue a reset. */
1209 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1210 ha->zio_mode = zio_mode;
1211 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1212 }
1213 return strlen(buf);
1214 }
1215
1216 static ssize_t
1217 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1218 char *buf)
1219 {
1220 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1221
1222 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1223 }
1224
1225 static ssize_t
1226 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1227 const char *buf, size_t count)
1228 {
1229 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1230 int val = 0;
1231 uint16_t zio_timer;
1232
1233 if (sscanf(buf, "%d", &val) != 1)
1234 return -EINVAL;
1235 if (val > 25500 || val < 100)
1236 return -ERANGE;
1237
1238 zio_timer = (uint16_t)(val / 100);
1239 vha->hw->zio_timer = zio_timer;
1240
1241 return strlen(buf);
1242 }
1243
1244 static ssize_t
1245 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1246 char *buf)
1247 {
1248 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1249
1250 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1251 vha->hw->last_zio_threshold);
1252 }
1253
1254 static ssize_t
1255 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1256 const char *buf, size_t count)
1257 {
1258 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1259 int val = 0;
1260
1261 if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1262 return -EINVAL;
1263 if (sscanf(buf, "%d", &val) != 1)
1264 return -EINVAL;
1265 if (val < 0 || val > 256)
1266 return -ERANGE;
1267
1268 atomic_set(&vha->hw->zio_threshold, val);
1269 return strlen(buf);
1270 }
1271
1272 static ssize_t
1273 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1274 char *buf)
1275 {
1276 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1277 int len = 0;
1278
1279 if (vha->hw->beacon_blink_led)
1280 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1281 else
1282 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1283 return len;
1284 }
1285
1286 static ssize_t
1287 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1288 const char *buf, size_t count)
1289 {
1290 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1291 struct qla_hw_data *ha = vha->hw;
1292 int val = 0;
1293 int rval;
1294
1295 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1296 return -EPERM;
1297
1298 if (sscanf(buf, "%d", &val) != 1)
1299 return -EINVAL;
1300
1301 mutex_lock(&vha->hw->optrom_mutex);
1302 if (qla2x00_chip_is_down(vha)) {
1303 mutex_unlock(&vha->hw->optrom_mutex);
1304 ql_log(ql_log_warn, vha, 0x707a,
1305 "Abort ISP active -- ignoring beacon request.\n");
1306 return -EBUSY;
1307 }
1308
1309 if (val)
1310 rval = ha->isp_ops->beacon_on(vha);
1311 else
1312 rval = ha->isp_ops->beacon_off(vha);
1313
1314 if (rval != QLA_SUCCESS)
1315 count = 0;
1316
1317 mutex_unlock(&vha->hw->optrom_mutex);
1318
1319 return count;
1320 }
1321
1322 static ssize_t
1323 qla2x00_optrom_bios_version_show(struct device *dev,
1324 struct device_attribute *attr, char *buf)
1325 {
1326 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1327 struct qla_hw_data *ha = vha->hw;
1328
1329 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1330 ha->bios_revision[0]);
1331 }
1332
1333 static ssize_t
1334 qla2x00_optrom_efi_version_show(struct device *dev,
1335 struct device_attribute *attr, char *buf)
1336 {
1337 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1338 struct qla_hw_data *ha = vha->hw;
1339
1340 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1341 ha->efi_revision[0]);
1342 }
1343
1344 static ssize_t
1345 qla2x00_optrom_fcode_version_show(struct device *dev,
1346 struct device_attribute *attr, char *buf)
1347 {
1348 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1349 struct qla_hw_data *ha = vha->hw;
1350
1351 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1352 ha->fcode_revision[0]);
1353 }
1354
1355 static ssize_t
1356 qla2x00_optrom_fw_version_show(struct device *dev,
1357 struct device_attribute *attr, char *buf)
1358 {
1359 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1360 struct qla_hw_data *ha = vha->hw;
1361
1362 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1363 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1364 ha->fw_revision[3]);
1365 }
1366
1367 static ssize_t
1368 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1369 struct device_attribute *attr, char *buf)
1370 {
1371 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1372 struct qla_hw_data *ha = vha->hw;
1373
1374 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1375 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1376 return scnprintf(buf, PAGE_SIZE, "\n");
1377
1378 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1379 ha->gold_fw_version[0], ha->gold_fw_version[1],
1380 ha->gold_fw_version[2], ha->gold_fw_version[3]);
1381 }
1382
1383 static ssize_t
1384 qla2x00_total_isp_aborts_show(struct device *dev,
1385 struct device_attribute *attr, char *buf)
1386 {
1387 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1388
1389 return scnprintf(buf, PAGE_SIZE, "%d\n",
1390 vha->qla_stats.total_isp_aborts);
1391 }
1392
1393 static ssize_t
1394 qla24xx_84xx_fw_version_show(struct device *dev,
1395 struct device_attribute *attr, char *buf)
1396 {
1397 int rval = QLA_SUCCESS;
1398 uint16_t status[2] = { 0 };
1399 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1400 struct qla_hw_data *ha = vha->hw;
1401
1402 if (!IS_QLA84XX(ha))
1403 return scnprintf(buf, PAGE_SIZE, "\n");
1404
1405 if (!ha->cs84xx->op_fw_version) {
1406 rval = qla84xx_verify_chip(vha, status);
1407
1408 if (!rval && !status[0])
1409 return scnprintf(buf, PAGE_SIZE, "%u\n",
1410 (uint32_t)ha->cs84xx->op_fw_version);
1411 }
1412
1413 return scnprintf(buf, PAGE_SIZE, "\n");
1414 }
1415
1416 static ssize_t
1417 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
1418 char *buf)
1419 {
1420 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1421 struct qla_hw_data *ha = vha->hw;
1422
1423 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1424 return scnprintf(buf, PAGE_SIZE, "\n");
1425
1426 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1427 ha->serdes_version[0], ha->serdes_version[1],
1428 ha->serdes_version[2]);
1429 }
1430
1431 static ssize_t
1432 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1433 char *buf)
1434 {
1435 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1436 struct qla_hw_data *ha = vha->hw;
1437
1438 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1439 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1440 return scnprintf(buf, PAGE_SIZE, "\n");
1441
1442 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1443 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1444 ha->mpi_capabilities);
1445 }
1446
1447 static ssize_t
1448 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1449 char *buf)
1450 {
1451 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1452 struct qla_hw_data *ha = vha->hw;
1453
1454 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1455 return scnprintf(buf, PAGE_SIZE, "\n");
1456
1457 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1458 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1459 }
1460
1461 static ssize_t
1462 qla2x00_flash_block_size_show(struct device *dev,
1463 struct device_attribute *attr, char *buf)
1464 {
1465 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1466 struct qla_hw_data *ha = vha->hw;
1467
1468 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1469 }
1470
1471 static ssize_t
1472 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1473 char *buf)
1474 {
1475 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1476
1477 if (!IS_CNA_CAPABLE(vha->hw))
1478 return scnprintf(buf, PAGE_SIZE, "\n");
1479
1480 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1481 }
1482
1483 static ssize_t
1484 qla2x00_vn_port_mac_address_show(struct device *dev,
1485 struct device_attribute *attr, char *buf)
1486 {
1487 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1488
1489 if (!IS_CNA_CAPABLE(vha->hw))
1490 return scnprintf(buf, PAGE_SIZE, "\n");
1491
1492 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1493 }
1494
1495 static ssize_t
1496 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1497 char *buf)
1498 {
1499 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1500
1501 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1502 }
1503
1504 static ssize_t
1505 qla2x00_thermal_temp_show(struct device *dev,
1506 struct device_attribute *attr, char *buf)
1507 {
1508 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1509 uint16_t temp = 0;
1510 int rc;
1511
1512 mutex_lock(&vha->hw->optrom_mutex);
1513 if (qla2x00_chip_is_down(vha)) {
1514 mutex_unlock(&vha->hw->optrom_mutex);
1515 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1516 goto done;
1517 }
1518
1519 if (vha->hw->flags.eeh_busy) {
1520 mutex_unlock(&vha->hw->optrom_mutex);
1521 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1522 goto done;
1523 }
1524
1525 rc = qla2x00_get_thermal_temp(vha, &temp);
1526 mutex_unlock(&vha->hw->optrom_mutex);
1527 if (rc == QLA_SUCCESS)
1528 return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1529
1530 done:
1531 return scnprintf(buf, PAGE_SIZE, "\n");
1532 }
1533
1534 static ssize_t
1535 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1536 char *buf)
1537 {
1538 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1539 int rval = QLA_FUNCTION_FAILED;
1540 uint16_t state[6];
1541 uint32_t pstate;
1542
1543 if (IS_QLAFX00(vha->hw)) {
1544 pstate = qlafx00_fw_state_show(dev, attr, buf);
1545 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1546 }
1547
1548 mutex_lock(&vha->hw->optrom_mutex);
1549 if (qla2x00_chip_is_down(vha)) {
1550 mutex_unlock(&vha->hw->optrom_mutex);
1551 ql_log(ql_log_warn, vha, 0x707c,
1552 "ISP reset active.\n");
1553 goto out;
1554 } else if (vha->hw->flags.eeh_busy) {
1555 mutex_unlock(&vha->hw->optrom_mutex);
1556 goto out;
1557 }
1558
1559 rval = qla2x00_get_firmware_state(vha, state);
1560 mutex_unlock(&vha->hw->optrom_mutex);
1561 out:
1562 if (rval != QLA_SUCCESS) {
1563 memset(state, -1, sizeof(state));
1564 rval = qla2x00_get_firmware_state(vha, state);
1565 }
1566
1567 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1568 state[0], state[1], state[2], state[3], state[4], state[5]);
1569 }
1570
1571 static ssize_t
1572 qla2x00_diag_requests_show(struct device *dev,
1573 struct device_attribute *attr, char *buf)
1574 {
1575 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1576
1577 if (!IS_BIDI_CAPABLE(vha->hw))
1578 return scnprintf(buf, PAGE_SIZE, "\n");
1579
1580 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1581 }
1582
1583 static ssize_t
1584 qla2x00_diag_megabytes_show(struct device *dev,
1585 struct device_attribute *attr, char *buf)
1586 {
1587 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1588
1589 if (!IS_BIDI_CAPABLE(vha->hw))
1590 return scnprintf(buf, PAGE_SIZE, "\n");
1591
1592 return scnprintf(buf, PAGE_SIZE, "%llu\n",
1593 vha->bidi_stats.transfer_bytes >> 20);
1594 }
1595
1596 static ssize_t
1597 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1598 char *buf)
1599 {
1600 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1601 struct qla_hw_data *ha = vha->hw;
1602 uint32_t size;
1603
1604 if (!ha->fw_dumped)
1605 size = 0;
1606 else if (IS_P3P_TYPE(ha))
1607 size = ha->md_template_size + ha->md_dump_size;
1608 else
1609 size = ha->fw_dump_len;
1610
1611 return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1612 }
1613
1614 static ssize_t
1615 qla2x00_allow_cna_fw_dump_show(struct device *dev,
1616 struct device_attribute *attr, char *buf)
1617 {
1618 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1619
1620 if (!IS_P3P_TYPE(vha->hw))
1621 return scnprintf(buf, PAGE_SIZE, "\n");
1622 else
1623 return scnprintf(buf, PAGE_SIZE, "%s\n",
1624 vha->hw->allow_cna_fw_dump ? "true" : "false");
1625 }
1626
1627 static ssize_t
1628 qla2x00_allow_cna_fw_dump_store(struct device *dev,
1629 struct device_attribute *attr, const char *buf, size_t count)
1630 {
1631 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1632 int val = 0;
1633
1634 if (!IS_P3P_TYPE(vha->hw))
1635 return -EINVAL;
1636
1637 if (sscanf(buf, "%d", &val) != 1)
1638 return -EINVAL;
1639
1640 vha->hw->allow_cna_fw_dump = val != 0;
1641
1642 return strlen(buf);
1643 }
1644
1645 static ssize_t
1646 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1647 char *buf)
1648 {
1649 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1650 struct qla_hw_data *ha = vha->hw;
1651
1652 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1653 return scnprintf(buf, PAGE_SIZE, "\n");
1654
1655 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1656 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1657 }
1658
1659 static ssize_t
1660 qla2x00_min_supported_speed_show(struct device *dev,
1661 struct device_attribute *attr, char *buf)
1662 {
1663 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1664 struct qla_hw_data *ha = vha->hw;
1665
1666 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1667 return scnprintf(buf, PAGE_SIZE, "\n");
1668
1669 return scnprintf(buf, PAGE_SIZE, "%s\n",
1670 ha->min_supported_speed == 6 ? "64Gps" :
1671 ha->min_supported_speed == 5 ? "32Gps" :
1672 ha->min_supported_speed == 4 ? "16Gps" :
1673 ha->min_supported_speed == 3 ? "8Gps" :
1674 ha->min_supported_speed == 2 ? "4Gps" :
1675 ha->min_supported_speed != 0 ? "unknown" : "");
1676 }
1677
1678 static ssize_t
1679 qla2x00_max_supported_speed_show(struct device *dev,
1680 struct device_attribute *attr, char *buf)
1681 {
1682 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1683 struct qla_hw_data *ha = vha->hw;
1684
1685 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1686 return scnprintf(buf, PAGE_SIZE, "\n");
1687
1688 return scnprintf(buf, PAGE_SIZE, "%s\n",
1689 ha->max_supported_speed == 2 ? "64Gps" :
1690 ha->max_supported_speed == 1 ? "32Gps" :
1691 ha->max_supported_speed == 0 ? "16Gps" : "unknown");
1692 }
1693
1694 static ssize_t
1695 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1696 const char *buf, size_t count)
1697 {
1698 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1699 ulong type, speed;
1700 int oldspeed, rval;
1701 int mode = QLA_SET_DATA_RATE_LR;
1702 struct qla_hw_data *ha = vha->hw;
1703
1704 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
1705 ql_log(ql_log_warn, vha, 0x70d8,
1706 "Speed setting not supported \n");
1707 return -EINVAL;
1708 }
1709
1710 rval = kstrtol(buf, 10, &type);
1711 if (rval)
1712 return rval;
1713 speed = type;
1714 if (type == 40 || type == 80 || type == 160 ||
1715 type == 320) {
1716 ql_dbg(ql_dbg_user, vha, 0x70d9,
1717 "Setting will be affected after a loss of sync\n");
1718 type = type/10;
1719 mode = QLA_SET_DATA_RATE_NOLR;
1720 }
1721
1722 oldspeed = ha->set_data_rate;
1723
1724 switch (type) {
1725 case 0:
1726 ha->set_data_rate = PORT_SPEED_AUTO;
1727 break;
1728 case 4:
1729 ha->set_data_rate = PORT_SPEED_4GB;
1730 break;
1731 case 8:
1732 ha->set_data_rate = PORT_SPEED_8GB;
1733 break;
1734 case 16:
1735 ha->set_data_rate = PORT_SPEED_16GB;
1736 break;
1737 case 32:
1738 ha->set_data_rate = PORT_SPEED_32GB;
1739 break;
1740 default:
1741 ql_log(ql_log_warn, vha, 0x1199,
1742 "Unrecognized speed setting:%lx. Setting Autoneg\n",
1743 speed);
1744 ha->set_data_rate = PORT_SPEED_AUTO;
1745 }
1746
1747 if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1748 return -EINVAL;
1749
1750 ql_log(ql_log_info, vha, 0x70da,
1751 "Setting speed to %lx Gbps \n", type);
1752
1753 rval = qla2x00_set_data_rate(vha, mode);
1754 if (rval != QLA_SUCCESS)
1755 return -EIO;
1756
1757 return strlen(buf);
1758 }
1759
1760 static ssize_t
1761 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1762 char *buf)
1763 {
1764 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1765 struct qla_hw_data *ha = vha->hw;
1766 ssize_t rval;
1767 char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
1768
1769 rval = qla2x00_get_data_rate(vha);
1770 if (rval != QLA_SUCCESS) {
1771 ql_log(ql_log_warn, vha, 0x70db,
1772 "Unable to get port speed rval:%zd\n", rval);
1773 return -EINVAL;
1774 }
1775
1776 ql_log(ql_log_info, vha, 0x70d6,
1777 "port speed:%d\n", ha->link_data_rate);
1778
1779 return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
1780 }
1781
1782 /* ----- */
1783
1784 static ssize_t
1785 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1786 {
1787 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1788 int len = 0;
1789
1790 len += scnprintf(buf + len, PAGE_SIZE-len,
1791 "Supported options: enabled | disabled | dual | exclusive\n");
1792
1793 /* --- */
1794 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1795
1796 switch (vha->qlini_mode) {
1797 case QLA2XXX_INI_MODE_EXCLUSIVE:
1798 len += scnprintf(buf + len, PAGE_SIZE-len,
1799 QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1800 break;
1801 case QLA2XXX_INI_MODE_DISABLED:
1802 len += scnprintf(buf + len, PAGE_SIZE-len,
1803 QLA2XXX_INI_MODE_STR_DISABLED);
1804 break;
1805 case QLA2XXX_INI_MODE_ENABLED:
1806 len += scnprintf(buf + len, PAGE_SIZE-len,
1807 QLA2XXX_INI_MODE_STR_ENABLED);
1808 break;
1809 case QLA2XXX_INI_MODE_DUAL:
1810 len += scnprintf(buf + len, PAGE_SIZE-len,
1811 QLA2XXX_INI_MODE_STR_DUAL);
1812 break;
1813 }
1814 len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1815
1816 return len;
1817 }
1818
1819 static char *mode_to_str[] = {
1820 "exclusive",
1821 "disabled",
1822 "enabled",
1823 "dual",
1824 };
1825
1826 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
1827 static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1828 {
1829 int rc = 0;
1830 enum {
1831 NO_ACTION,
1832 MODE_CHANGE_ACCEPT,
1833 MODE_CHANGE_NO_ACTION,
1834 TARGET_STILL_ACTIVE,
1835 };
1836 int action = NO_ACTION;
1837 int set_mode = 0;
1838 u8 eo_toggle = 0; /* exchange offload flipped */
1839
1840 switch (vha->qlini_mode) {
1841 case QLA2XXX_INI_MODE_DISABLED:
1842 switch (op) {
1843 case QLA2XXX_INI_MODE_DISABLED:
1844 if (qla_tgt_mode_enabled(vha)) {
1845 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1846 vha->hw->flags.exchoffld_enabled)
1847 eo_toggle = 1;
1848 if (((vha->ql2xexchoffld !=
1849 vha->u_ql2xexchoffld) &&
1850 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1851 eo_toggle) {
1852 /*
1853 * The number of exchange to be offload
1854 * was tweaked or offload option was
1855 * flipped
1856 */
1857 action = MODE_CHANGE_ACCEPT;
1858 } else {
1859 action = MODE_CHANGE_NO_ACTION;
1860 }
1861 } else {
1862 action = MODE_CHANGE_NO_ACTION;
1863 }
1864 break;
1865 case QLA2XXX_INI_MODE_EXCLUSIVE:
1866 if (qla_tgt_mode_enabled(vha)) {
1867 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1868 vha->hw->flags.exchoffld_enabled)
1869 eo_toggle = 1;
1870 if (((vha->ql2xexchoffld !=
1871 vha->u_ql2xexchoffld) &&
1872 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1873 eo_toggle) {
1874 /*
1875 * The number of exchange to be offload
1876 * was tweaked or offload option was
1877 * flipped
1878 */
1879 action = MODE_CHANGE_ACCEPT;
1880 } else {
1881 action = MODE_CHANGE_NO_ACTION;
1882 }
1883 } else {
1884 action = MODE_CHANGE_ACCEPT;
1885 }
1886 break;
1887 case QLA2XXX_INI_MODE_DUAL:
1888 action = MODE_CHANGE_ACCEPT;
1889 /* active_mode is target only, reset it to dual */
1890 if (qla_tgt_mode_enabled(vha)) {
1891 set_mode = 1;
1892 action = MODE_CHANGE_ACCEPT;
1893 } else {
1894 action = MODE_CHANGE_NO_ACTION;
1895 }
1896 break;
1897
1898 case QLA2XXX_INI_MODE_ENABLED:
1899 if (qla_tgt_mode_enabled(vha))
1900 action = TARGET_STILL_ACTIVE;
1901 else {
1902 action = MODE_CHANGE_ACCEPT;
1903 set_mode = 1;
1904 }
1905 break;
1906 }
1907 break;
1908
1909 case QLA2XXX_INI_MODE_EXCLUSIVE:
1910 switch (op) {
1911 case QLA2XXX_INI_MODE_EXCLUSIVE:
1912 if (qla_tgt_mode_enabled(vha)) {
1913 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1914 vha->hw->flags.exchoffld_enabled)
1915 eo_toggle = 1;
1916 if (((vha->ql2xexchoffld !=
1917 vha->u_ql2xexchoffld) &&
1918 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1919 eo_toggle)
1920 /*
1921 * The number of exchange to be offload
1922 * was tweaked or offload option was
1923 * flipped
1924 */
1925 action = MODE_CHANGE_ACCEPT;
1926 else
1927 action = NO_ACTION;
1928 } else
1929 action = NO_ACTION;
1930
1931 break;
1932
1933 case QLA2XXX_INI_MODE_DISABLED:
1934 if (qla_tgt_mode_enabled(vha)) {
1935 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1936 vha->hw->flags.exchoffld_enabled)
1937 eo_toggle = 1;
1938 if (((vha->ql2xexchoffld !=
1939 vha->u_ql2xexchoffld) &&
1940 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1941 eo_toggle)
1942 action = MODE_CHANGE_ACCEPT;
1943 else
1944 action = MODE_CHANGE_NO_ACTION;
1945 } else
1946 action = MODE_CHANGE_NO_ACTION;
1947 break;
1948
1949 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
1950 if (qla_tgt_mode_enabled(vha)) {
1951 action = MODE_CHANGE_ACCEPT;
1952 set_mode = 1;
1953 } else
1954 action = MODE_CHANGE_ACCEPT;
1955 break;
1956
1957 case QLA2XXX_INI_MODE_ENABLED:
1958 if (qla_tgt_mode_enabled(vha))
1959 action = TARGET_STILL_ACTIVE;
1960 else {
1961 if (vha->hw->flags.fw_started)
1962 action = MODE_CHANGE_NO_ACTION;
1963 else
1964 action = MODE_CHANGE_ACCEPT;
1965 }
1966 break;
1967 }
1968 break;
1969
1970 case QLA2XXX_INI_MODE_ENABLED:
1971 switch (op) {
1972 case QLA2XXX_INI_MODE_ENABLED:
1973 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
1974 vha->hw->flags.exchoffld_enabled)
1975 eo_toggle = 1;
1976 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
1977 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
1978 eo_toggle)
1979 action = MODE_CHANGE_ACCEPT;
1980 else
1981 action = NO_ACTION;
1982 break;
1983 case QLA2XXX_INI_MODE_DUAL:
1984 case QLA2XXX_INI_MODE_DISABLED:
1985 action = MODE_CHANGE_ACCEPT;
1986 break;
1987 default:
1988 action = MODE_CHANGE_NO_ACTION;
1989 break;
1990 }
1991 break;
1992
1993 case QLA2XXX_INI_MODE_DUAL:
1994 switch (op) {
1995 case QLA2XXX_INI_MODE_DUAL:
1996 if (qla_tgt_mode_enabled(vha) ||
1997 qla_dual_mode_enabled(vha)) {
1998 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
1999 vha->u_ql2xiniexchg) !=
2000 vha->hw->flags.exchoffld_enabled)
2001 eo_toggle = 1;
2002
2003 if ((((vha->ql2xexchoffld +
2004 vha->ql2xiniexchg) !=
2005 (vha->u_ql2xiniexchg +
2006 vha->u_ql2xexchoffld)) &&
2007 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2008 vha->u_ql2xexchoffld)) || eo_toggle)
2009 action = MODE_CHANGE_ACCEPT;
2010 else
2011 action = NO_ACTION;
2012 } else {
2013 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2014 vha->u_ql2xiniexchg) !=
2015 vha->hw->flags.exchoffld_enabled)
2016 eo_toggle = 1;
2017
2018 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
2019 != (vha->u_ql2xiniexchg +
2020 vha->u_ql2xexchoffld)) &&
2021 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2022 vha->u_ql2xexchoffld)) || eo_toggle)
2023 action = MODE_CHANGE_NO_ACTION;
2024 else
2025 action = NO_ACTION;
2026 }
2027 break;
2028
2029 case QLA2XXX_INI_MODE_DISABLED:
2030 if (qla_tgt_mode_enabled(vha) ||
2031 qla_dual_mode_enabled(vha)) {
2032 /* turning off initiator mode */
2033 set_mode = 1;
2034 action = MODE_CHANGE_ACCEPT;
2035 } else {
2036 action = MODE_CHANGE_NO_ACTION;
2037 }
2038 break;
2039
2040 case QLA2XXX_INI_MODE_EXCLUSIVE:
2041 if (qla_tgt_mode_enabled(vha) ||
2042 qla_dual_mode_enabled(vha)) {
2043 set_mode = 1;
2044 action = MODE_CHANGE_ACCEPT;
2045 } else {
2046 action = MODE_CHANGE_ACCEPT;
2047 }
2048 break;
2049
2050 case QLA2XXX_INI_MODE_ENABLED:
2051 if (qla_tgt_mode_enabled(vha) ||
2052 qla_dual_mode_enabled(vha)) {
2053 action = TARGET_STILL_ACTIVE;
2054 } else {
2055 action = MODE_CHANGE_ACCEPT;
2056 }
2057 }
2058 break;
2059 }
2060
2061 switch (action) {
2062 case MODE_CHANGE_ACCEPT:
2063 ql_log(ql_log_warn, vha, 0xffff,
2064 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2065 mode_to_str[vha->qlini_mode], mode_to_str[op],
2066 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2067 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2068
2069 vha->qlini_mode = op;
2070 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2071 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2072 if (set_mode)
2073 qlt_set_mode(vha);
2074 vha->flags.online = 1;
2075 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2076 break;
2077
2078 case MODE_CHANGE_NO_ACTION:
2079 ql_log(ql_log_warn, vha, 0xffff,
2080 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2081 mode_to_str[vha->qlini_mode], mode_to_str[op],
2082 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2083 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2084 vha->qlini_mode = op;
2085 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2086 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2087 break;
2088
2089 case TARGET_STILL_ACTIVE:
2090 ql_log(ql_log_warn, vha, 0xffff,
2091 "Target Mode is active. Unable to change Mode.\n");
2092 break;
2093
2094 case NO_ACTION:
2095 default:
2096 ql_log(ql_log_warn, vha, 0xffff,
2097 "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2098 vha->qlini_mode, op,
2099 vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2100 break;
2101 }
2102
2103 return rc;
2104 }
2105
2106 static ssize_t
2107 qlini_mode_store(struct device *dev, struct device_attribute *attr,
2108 const char *buf, size_t count)
2109 {
2110 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2111 int ini;
2112
2113 if (!buf)
2114 return -EINVAL;
2115
2116 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2117 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2118 ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2119 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2120 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2121 ini = QLA2XXX_INI_MODE_DISABLED;
2122 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2123 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2124 ini = QLA2XXX_INI_MODE_ENABLED;
2125 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2126 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2127 ini = QLA2XXX_INI_MODE_DUAL;
2128 else
2129 return -EINVAL;
2130
2131 qla_set_ini_mode(vha, ini);
2132 return strlen(buf);
2133 }
2134
2135 static ssize_t
2136 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2137 char *buf)
2138 {
2139 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2140 int len = 0;
2141
2142 len += scnprintf(buf + len, PAGE_SIZE-len,
2143 "target exchange: new %d : current: %d\n\n",
2144 vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2145
2146 len += scnprintf(buf + len, PAGE_SIZE-len,
2147 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2148 vha->host_no);
2149
2150 return len;
2151 }
2152
2153 static ssize_t
2154 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2155 const char *buf, size_t count)
2156 {
2157 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2158 int val = 0;
2159
2160 if (sscanf(buf, "%d", &val) != 1)
2161 return -EINVAL;
2162
2163 if (val > FW_MAX_EXCHANGES_CNT)
2164 val = FW_MAX_EXCHANGES_CNT;
2165 else if (val < 0)
2166 val = 0;
2167
2168 vha->u_ql2xexchoffld = val;
2169 return strlen(buf);
2170 }
2171
2172 static ssize_t
2173 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2174 char *buf)
2175 {
2176 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2177 int len = 0;
2178
2179 len += scnprintf(buf + len, PAGE_SIZE-len,
2180 "target exchange: new %d : current: %d\n\n",
2181 vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2182
2183 len += scnprintf(buf + len, PAGE_SIZE-len,
2184 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2185 vha->host_no);
2186
2187 return len;
2188 }
2189
2190 static ssize_t
2191 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2192 const char *buf, size_t count)
2193 {
2194 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2195 int val = 0;
2196
2197 if (sscanf(buf, "%d", &val) != 1)
2198 return -EINVAL;
2199
2200 if (val > FW_MAX_EXCHANGES_CNT)
2201 val = FW_MAX_EXCHANGES_CNT;
2202 else if (val < 0)
2203 val = 0;
2204
2205 vha->u_ql2xiniexchg = val;
2206 return strlen(buf);
2207 }
2208
2209 static ssize_t
2210 qla2x00_dif_bundle_statistics_show(struct device *dev,
2211 struct device_attribute *attr, char *buf)
2212 {
2213 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2214 struct qla_hw_data *ha = vha->hw;
2215
2216 return scnprintf(buf, PAGE_SIZE,
2217 "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2218 ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2219 ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2220 ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2221 }
2222
2223 static ssize_t
2224 qla2x00_fw_attr_show(struct device *dev,
2225 struct device_attribute *attr, char *buf)
2226 {
2227 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2228 struct qla_hw_data *ha = vha->hw;
2229
2230 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2231 return scnprintf(buf, PAGE_SIZE, "\n");
2232
2233 return scnprintf(buf, PAGE_SIZE, "%llx\n",
2234 (uint64_t)ha->fw_attributes_ext[1] << 48 |
2235 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2236 (uint64_t)ha->fw_attributes_h << 16 |
2237 (uint64_t)ha->fw_attributes);
2238 }
2239
2240 static ssize_t
2241 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
2242 char *buf)
2243 {
2244 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2245
2246 return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
2247 }
2248
2249 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
2250 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2251 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2252 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2253 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2254 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2255 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2256 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2257 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2258 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2259 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2260 qla2x00_zio_timer_store);
2261 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2262 qla2x00_beacon_store);
2263 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2264 qla2x00_optrom_bios_version_show, NULL);
2265 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2266 qla2x00_optrom_efi_version_show, NULL);
2267 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2268 qla2x00_optrom_fcode_version_show, NULL);
2269 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2270 NULL);
2271 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2272 qla2x00_optrom_gold_fw_version_show, NULL);
2273 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2274 NULL);
2275 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2276 NULL);
2277 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
2278 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2279 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2280 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2281 NULL);
2282 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2283 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2284 qla2x00_vn_port_mac_address_show, NULL);
2285 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2286 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2287 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2288 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2289 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2290 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2291 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2292 qla2x00_allow_cna_fw_dump_show,
2293 qla2x00_allow_cna_fw_dump_store);
2294 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2295 static DEVICE_ATTR(min_supported_speed, 0444,
2296 qla2x00_min_supported_speed_show, NULL);
2297 static DEVICE_ATTR(max_supported_speed, 0444,
2298 qla2x00_max_supported_speed_show, NULL);
2299 static DEVICE_ATTR(zio_threshold, 0644,
2300 qla_zio_threshold_show,
2301 qla_zio_threshold_store);
2302 static DEVICE_ATTR_RW(qlini_mode);
2303 static DEVICE_ATTR_RW(ql2xexchoffld);
2304 static DEVICE_ATTR_RW(ql2xiniexchg);
2305 static DEVICE_ATTR(dif_bundle_statistics, 0444,
2306 qla2x00_dif_bundle_statistics_show, NULL);
2307 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2308 qla2x00_port_speed_store);
2309 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
2310 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
2311
2312
2313 struct device_attribute *qla2x00_host_attrs[] = {
2314 &dev_attr_driver_version,
2315 &dev_attr_fw_version,
2316 &dev_attr_serial_num,
2317 &dev_attr_isp_name,
2318 &dev_attr_isp_id,
2319 &dev_attr_model_name,
2320 &dev_attr_model_desc,
2321 &dev_attr_pci_info,
2322 &dev_attr_link_state,
2323 &dev_attr_zio,
2324 &dev_attr_zio_timer,
2325 &dev_attr_beacon,
2326 &dev_attr_optrom_bios_version,
2327 &dev_attr_optrom_efi_version,
2328 &dev_attr_optrom_fcode_version,
2329 &dev_attr_optrom_fw_version,
2330 &dev_attr_84xx_fw_version,
2331 &dev_attr_total_isp_aborts,
2332 &dev_attr_serdes_version,
2333 &dev_attr_mpi_version,
2334 &dev_attr_phy_version,
2335 &dev_attr_flash_block_size,
2336 &dev_attr_vlan_id,
2337 &dev_attr_vn_port_mac_address,
2338 &dev_attr_fabric_param,
2339 &dev_attr_fw_state,
2340 &dev_attr_optrom_gold_fw_version,
2341 &dev_attr_thermal_temp,
2342 &dev_attr_diag_requests,
2343 &dev_attr_diag_megabytes,
2344 &dev_attr_fw_dump_size,
2345 &dev_attr_allow_cna_fw_dump,
2346 &dev_attr_pep_version,
2347 &dev_attr_min_supported_speed,
2348 &dev_attr_max_supported_speed,
2349 &dev_attr_zio_threshold,
2350 &dev_attr_dif_bundle_statistics,
2351 &dev_attr_port_speed,
2352 &dev_attr_port_no,
2353 &dev_attr_fw_attr,
2354 NULL, /* reserve for qlini_mode */
2355 NULL, /* reserve for ql2xiniexchg */
2356 NULL, /* reserve for ql2xexchoffld */
2357 NULL,
2358 };
2359
2360 void qla_insert_tgt_attrs(void)
2361 {
2362 struct device_attribute **attr;
2363
2364 /* advance to empty slot */
2365 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2366 continue;
2367
2368 *attr = &dev_attr_qlini_mode;
2369 attr++;
2370 *attr = &dev_attr_ql2xiniexchg;
2371 attr++;
2372 *attr = &dev_attr_ql2xexchoffld;
2373 }
2374
2375 /* Host attributes. */
2376
2377 static void
2378 qla2x00_get_host_port_id(struct Scsi_Host *shost)
2379 {
2380 scsi_qla_host_t *vha = shost_priv(shost);
2381
2382 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2383 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2384 }
2385
2386 static void
2387 qla2x00_get_host_speed(struct Scsi_Host *shost)
2388 {
2389 scsi_qla_host_t *vha = shost_priv(shost);
2390 u32 speed;
2391
2392 if (IS_QLAFX00(vha->hw)) {
2393 qlafx00_get_host_speed(shost);
2394 return;
2395 }
2396
2397 switch (vha->hw->link_data_rate) {
2398 case PORT_SPEED_1GB:
2399 speed = FC_PORTSPEED_1GBIT;
2400 break;
2401 case PORT_SPEED_2GB:
2402 speed = FC_PORTSPEED_2GBIT;
2403 break;
2404 case PORT_SPEED_4GB:
2405 speed = FC_PORTSPEED_4GBIT;
2406 break;
2407 case PORT_SPEED_8GB:
2408 speed = FC_PORTSPEED_8GBIT;
2409 break;
2410 case PORT_SPEED_10GB:
2411 speed = FC_PORTSPEED_10GBIT;
2412 break;
2413 case PORT_SPEED_16GB:
2414 speed = FC_PORTSPEED_16GBIT;
2415 break;
2416 case PORT_SPEED_32GB:
2417 speed = FC_PORTSPEED_32GBIT;
2418 break;
2419 case PORT_SPEED_64GB:
2420 speed = FC_PORTSPEED_64GBIT;
2421 break;
2422 default:
2423 speed = FC_PORTSPEED_UNKNOWN;
2424 break;
2425 }
2426
2427 fc_host_speed(shost) = speed;
2428 }
2429
2430 static void
2431 qla2x00_get_host_port_type(struct Scsi_Host *shost)
2432 {
2433 scsi_qla_host_t *vha = shost_priv(shost);
2434 uint32_t port_type;
2435
2436 if (vha->vp_idx) {
2437 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2438 return;
2439 }
2440 switch (vha->hw->current_topology) {
2441 case ISP_CFG_NL:
2442 port_type = FC_PORTTYPE_LPORT;
2443 break;
2444 case ISP_CFG_FL:
2445 port_type = FC_PORTTYPE_NLPORT;
2446 break;
2447 case ISP_CFG_N:
2448 port_type = FC_PORTTYPE_PTP;
2449 break;
2450 case ISP_CFG_F:
2451 port_type = FC_PORTTYPE_NPORT;
2452 break;
2453 default:
2454 port_type = FC_PORTTYPE_UNKNOWN;
2455 break;
2456 }
2457
2458 fc_host_port_type(shost) = port_type;
2459 }
2460
2461 static void
2462 qla2x00_get_starget_node_name(struct scsi_target *starget)
2463 {
2464 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2465 scsi_qla_host_t *vha = shost_priv(host);
2466 fc_port_t *fcport;
2467 u64 node_name = 0;
2468
2469 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2470 if (fcport->rport &&
2471 starget->id == fcport->rport->scsi_target_id) {
2472 node_name = wwn_to_u64(fcport->node_name);
2473 break;
2474 }
2475 }
2476
2477 fc_starget_node_name(starget) = node_name;
2478 }
2479
2480 static void
2481 qla2x00_get_starget_port_name(struct scsi_target *starget)
2482 {
2483 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2484 scsi_qla_host_t *vha = shost_priv(host);
2485 fc_port_t *fcport;
2486 u64 port_name = 0;
2487
2488 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2489 if (fcport->rport &&
2490 starget->id == fcport->rport->scsi_target_id) {
2491 port_name = wwn_to_u64(fcport->port_name);
2492 break;
2493 }
2494 }
2495
2496 fc_starget_port_name(starget) = port_name;
2497 }
2498
2499 static void
2500 qla2x00_get_starget_port_id(struct scsi_target *starget)
2501 {
2502 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2503 scsi_qla_host_t *vha = shost_priv(host);
2504 fc_port_t *fcport;
2505 uint32_t port_id = ~0U;
2506
2507 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2508 if (fcport->rport &&
2509 starget->id == fcport->rport->scsi_target_id) {
2510 port_id = fcport->d_id.b.domain << 16 |
2511 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2512 break;
2513 }
2514 }
2515
2516 fc_starget_port_id(starget) = port_id;
2517 }
2518
2519 static inline void
2520 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2521 {
2522 rport->dev_loss_tmo = timeout ? timeout : 1;
2523 }
2524
2525 static void
2526 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2527 {
2528 struct Scsi_Host *host = rport_to_shost(rport);
2529 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2530 unsigned long flags;
2531
2532 if (!fcport)
2533 return;
2534
2535 /* Now that the rport has been deleted, set the fcport state to
2536 FCS_DEVICE_DEAD */
2537 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2538
2539 /*
2540 * Transport has effectively 'deleted' the rport, clear
2541 * all local references.
2542 */
2543 spin_lock_irqsave(host->host_lock, flags);
2544 fcport->rport = fcport->drport = NULL;
2545 *((fc_port_t **)rport->dd_data) = NULL;
2546 spin_unlock_irqrestore(host->host_lock, flags);
2547
2548 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2549 return;
2550
2551 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2552 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2553 return;
2554 }
2555 }
2556
2557 static void
2558 qla2x00_terminate_rport_io(struct fc_rport *rport)
2559 {
2560 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2561
2562 if (!fcport)
2563 return;
2564
2565 if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2566 return;
2567
2568 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2569 return;
2570
2571 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2572 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2573 return;
2574 }
2575 /*
2576 * At this point all fcport's software-states are cleared. Perform any
2577 * final cleanup of firmware resources (PCBs and XCBs).
2578 */
2579 if (fcport->loop_id != FC_NO_LOOP_ID) {
2580 if (IS_FWI2_CAPABLE(fcport->vha->hw))
2581 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
2582 fcport->loop_id, fcport->d_id.b.domain,
2583 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2584 else
2585 qla2x00_port_logout(fcport->vha, fcport);
2586 }
2587 }
2588
2589 static int
2590 qla2x00_issue_lip(struct Scsi_Host *shost)
2591 {
2592 scsi_qla_host_t *vha = shost_priv(shost);
2593
2594 if (IS_QLAFX00(vha->hw))
2595 return 0;
2596
2597 qla2x00_loop_reset(vha);
2598 return 0;
2599 }
2600
2601 static struct fc_host_statistics *
2602 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2603 {
2604 scsi_qla_host_t *vha = shost_priv(shost);
2605 struct qla_hw_data *ha = vha->hw;
2606 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2607 int rval;
2608 struct link_statistics *stats;
2609 dma_addr_t stats_dma;
2610 struct fc_host_statistics *p = &vha->fc_host_stat;
2611
2612 memset(p, -1, sizeof(*p));
2613
2614 if (IS_QLAFX00(vha->hw))
2615 goto done;
2616
2617 if (test_bit(UNLOADING, &vha->dpc_flags))
2618 goto done;
2619
2620 if (unlikely(pci_channel_offline(ha->pdev)))
2621 goto done;
2622
2623 if (qla2x00_chip_is_down(vha))
2624 goto done;
2625
2626 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2627 GFP_KERNEL);
2628 if (!stats) {
2629 ql_log(ql_log_warn, vha, 0x707d,
2630 "Failed to allocate memory for stats.\n");
2631 goto done;
2632 }
2633
2634 rval = QLA_FUNCTION_FAILED;
2635 if (IS_FWI2_CAPABLE(ha)) {
2636 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2637 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2638 !ha->dpc_active) {
2639 /* Must be in a 'READY' state for statistics retrieval. */
2640 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2641 stats, stats_dma);
2642 }
2643
2644 if (rval != QLA_SUCCESS)
2645 goto done_free;
2646
2647 p->link_failure_count = stats->link_fail_cnt;
2648 p->loss_of_sync_count = stats->loss_sync_cnt;
2649 p->loss_of_signal_count = stats->loss_sig_cnt;
2650 p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
2651 p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
2652 p->invalid_crc_count = stats->inval_crc_cnt;
2653 if (IS_FWI2_CAPABLE(ha)) {
2654 p->lip_count = stats->lip_cnt;
2655 p->tx_frames = stats->tx_frames;
2656 p->rx_frames = stats->rx_frames;
2657 p->dumped_frames = stats->discarded_frames;
2658 p->nos_count = stats->nos_rcvd;
2659 p->error_frames =
2660 stats->dropped_frames + stats->discarded_frames;
2661 p->rx_words = vha->qla_stats.input_bytes;
2662 p->tx_words = vha->qla_stats.output_bytes;
2663 }
2664 p->fcp_control_requests = vha->qla_stats.control_requests;
2665 p->fcp_input_requests = vha->qla_stats.input_requests;
2666 p->fcp_output_requests = vha->qla_stats.output_requests;
2667 p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
2668 p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
2669 p->seconds_since_last_reset =
2670 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2671 do_div(p->seconds_since_last_reset, HZ);
2672
2673 done_free:
2674 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2675 stats, stats_dma);
2676 done:
2677 return p;
2678 }
2679
2680 static void
2681 qla2x00_reset_host_stats(struct Scsi_Host *shost)
2682 {
2683 scsi_qla_host_t *vha = shost_priv(shost);
2684 struct qla_hw_data *ha = vha->hw;
2685 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2686 struct link_statistics *stats;
2687 dma_addr_t stats_dma;
2688
2689 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2690 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2691
2692 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2693
2694 if (IS_FWI2_CAPABLE(ha)) {
2695 stats = dma_alloc_coherent(&ha->pdev->dev,
2696 sizeof(*stats), &stats_dma, GFP_KERNEL);
2697 if (!stats) {
2698 ql_log(ql_log_warn, vha, 0x70d7,
2699 "Failed to allocate memory for stats.\n");
2700 return;
2701 }
2702
2703 /* reset firmware statistics */
2704 qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2705
2706 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2707 stats, stats_dma);
2708 }
2709 }
2710
2711 static void
2712 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2713 {
2714 scsi_qla_host_t *vha = shost_priv(shost);
2715
2716 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2717 sizeof(fc_host_symbolic_name(shost)));
2718 }
2719
2720 static void
2721 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2722 {
2723 scsi_qla_host_t *vha = shost_priv(shost);
2724
2725 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2726 }
2727
2728 static void
2729 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2730 {
2731 scsi_qla_host_t *vha = shost_priv(shost);
2732 static const uint8_t node_name[WWN_SIZE] = {
2733 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
2734 };
2735 u64 fabric_name = wwn_to_u64(node_name);
2736
2737 if (vha->device_flags & SWITCH_FOUND)
2738 fabric_name = wwn_to_u64(vha->fabric_node_name);
2739
2740 fc_host_fabric_name(shost) = fabric_name;
2741 }
2742
2743 static void
2744 qla2x00_get_host_port_state(struct Scsi_Host *shost)
2745 {
2746 scsi_qla_host_t *vha = shost_priv(shost);
2747 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2748
2749 if (!base_vha->flags.online) {
2750 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2751 return;
2752 }
2753
2754 switch (atomic_read(&base_vha->loop_state)) {
2755 case LOOP_UPDATE:
2756 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2757 break;
2758 case LOOP_DOWN:
2759 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
2760 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2761 else
2762 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2763 break;
2764 case LOOP_DEAD:
2765 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2766 break;
2767 case LOOP_READY:
2768 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2769 break;
2770 default:
2771 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2772 break;
2773 }
2774 }
2775
2776 static int
2777 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2778 {
2779 int ret = 0;
2780 uint8_t qos = 0;
2781 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
2782 scsi_qla_host_t *vha = NULL;
2783 struct qla_hw_data *ha = base_vha->hw;
2784 int cnt;
2785 struct req_que *req = ha->req_q_map[0];
2786 struct qla_qpair *qpair;
2787
2788 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2789 if (ret) {
2790 ql_log(ql_log_warn, vha, 0x707e,
2791 "Vport sanity check failed, status %x\n", ret);
2792 return (ret);
2793 }
2794
2795 vha = qla24xx_create_vhost(fc_vport);
2796 if (vha == NULL) {
2797 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2798 return FC_VPORT_FAILED;
2799 }
2800 if (disable) {
2801 atomic_set(&vha->vp_state, VP_OFFLINE);
2802 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2803 } else
2804 atomic_set(&vha->vp_state, VP_FAILED);
2805
2806 /* ready to create vport */
2807 ql_log(ql_log_info, vha, 0x7080,
2808 "VP entry id %d assigned.\n", vha->vp_idx);
2809
2810 /* initialized vport states */
2811 atomic_set(&vha->loop_state, LOOP_DOWN);
2812 vha->vp_err_state = VP_ERR_PORTDWN;
2813 vha->vp_prev_err_state = VP_ERR_UNKWN;
2814 /* Check if physical ha port is Up */
2815 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
2816 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2817 /* Don't retry or attempt login of this virtual port */
2818 ql_dbg(ql_dbg_user, vha, 0x7081,
2819 "Vport loop state is not UP.\n");
2820 atomic_set(&vha->loop_state, LOOP_DEAD);
2821 if (!disable)
2822 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
2823 }
2824
2825 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2826 if (ha->fw_attributes & BIT_4) {
2827 int prot = 0, guard;
2828
2829 vha->flags.difdix_supported = 1;
2830 ql_dbg(ql_dbg_user, vha, 0x7082,
2831 "Registered for DIF/DIX type 1 and 3 protection.\n");
2832 if (ql2xenabledif == 1)
2833 prot = SHOST_DIX_TYPE0_PROTECTION;
2834 scsi_host_set_prot(vha->host,
2835 prot | SHOST_DIF_TYPE1_PROTECTION
2836 | SHOST_DIF_TYPE2_PROTECTION
2837 | SHOST_DIF_TYPE3_PROTECTION
2838 | SHOST_DIX_TYPE1_PROTECTION
2839 | SHOST_DIX_TYPE2_PROTECTION
2840 | SHOST_DIX_TYPE3_PROTECTION);
2841
2842 guard = SHOST_DIX_GUARD_CRC;
2843
2844 if (IS_PI_IPGUARD_CAPABLE(ha) &&
2845 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2846 guard |= SHOST_DIX_GUARD_IP;
2847
2848 scsi_host_set_guard(vha->host, guard);
2849 } else
2850 vha->flags.difdix_supported = 0;
2851 }
2852
2853 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
2854 &ha->pdev->dev)) {
2855 ql_dbg(ql_dbg_user, vha, 0x7083,
2856 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2857 goto vport_create_failed_2;
2858 }
2859
2860 /* initialize attributes */
2861 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2862 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2863 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2864 fc_host_supported_classes(vha->host) =
2865 fc_host_supported_classes(base_vha->host);
2866 fc_host_supported_speeds(vha->host) =
2867 fc_host_supported_speeds(base_vha->host);
2868
2869 qlt_vport_create(vha, ha);
2870 qla24xx_vport_disable(fc_vport, disable);
2871
2872 if (!ql2xmqsupport || !ha->npiv_info)
2873 goto vport_queue;
2874
2875 /* Create a request queue in QoS mode for the vport */
2876 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
2877 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
2878 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
2879 8) == 0) {
2880 qos = ha->npiv_info[cnt].q_qos;
2881 break;
2882 }
2883 }
2884
2885 if (qos) {
2886 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
2887 if (!qpair)
2888 ql_log(ql_log_warn, vha, 0x7084,
2889 "Can't create qpair for VP[%d]\n",
2890 vha->vp_idx);
2891 else {
2892 ql_dbg(ql_dbg_multiq, vha, 0xc001,
2893 "Queue pair: %d Qos: %d) created for VP[%d]\n",
2894 qpair->id, qos, vha->vp_idx);
2895 ql_dbg(ql_dbg_user, vha, 0x7085,
2896 "Queue Pair: %d Qos: %d) created for VP[%d]\n",
2897 qpair->id, qos, vha->vp_idx);
2898 req = qpair->req;
2899 vha->qpair = qpair;
2900 }
2901 }
2902
2903 vport_queue:
2904 vha->req = req;
2905 return 0;
2906
2907 vport_create_failed_2:
2908 qla24xx_disable_vp(vha);
2909 qla24xx_deallocate_vp_id(vha);
2910 scsi_host_put(vha->host);
2911 return FC_VPORT_FAILED;
2912 }
2913
2914 static int
2915 qla24xx_vport_delete(struct fc_vport *fc_vport)
2916 {
2917 scsi_qla_host_t *vha = fc_vport->dd_data;
2918 struct qla_hw_data *ha = vha->hw;
2919 uint16_t id = vha->vp_idx;
2920
2921 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
2922 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
2923 msleep(1000);
2924
2925 qla_nvme_delete(vha);
2926
2927 qla24xx_disable_vp(vha);
2928 qla2x00_wait_for_sess_deletion(vha);
2929
2930 vha->flags.delete_progress = 1;
2931
2932 qlt_remove_target(ha, vha);
2933
2934 fc_remove_host(vha->host);
2935
2936 scsi_remove_host(vha->host);
2937
2938 /* Allow timer to run to drain queued items, when removing vp */
2939 qla24xx_deallocate_vp_id(vha);
2940
2941 if (vha->timer_active) {
2942 qla2x00_vp_stop_timer(vha);
2943 ql_dbg(ql_dbg_user, vha, 0x7086,
2944 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2945 }
2946
2947 qla2x00_free_fcports(vha);
2948
2949 mutex_lock(&ha->vport_lock);
2950 ha->cur_vport_count--;
2951 clear_bit(vha->vp_idx, ha->vp_idx_map);
2952 mutex_unlock(&ha->vport_lock);
2953
2954 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2955 vha->gnl.ldma);
2956
2957 vfree(vha->scan.l);
2958
2959 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
2960 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2961 ql_log(ql_log_warn, vha, 0x7087,
2962 "Queue Pair delete failed.\n");
2963 }
2964
2965 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
2966 scsi_host_put(vha->host);
2967 return 0;
2968 }
2969
2970 static int
2971 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
2972 {
2973 scsi_qla_host_t *vha = fc_vport->dd_data;
2974
2975 if (disable)
2976 qla24xx_disable_vp(vha);
2977 else
2978 qla24xx_enable_vp(vha);
2979
2980 return 0;
2981 }
2982
2983 struct fc_function_template qla2xxx_transport_functions = {
2984
2985 .show_host_node_name = 1,
2986 .show_host_port_name = 1,
2987 .show_host_supported_classes = 1,
2988 .show_host_supported_speeds = 1,
2989
2990 .get_host_port_id = qla2x00_get_host_port_id,
2991 .show_host_port_id = 1,
2992 .get_host_speed = qla2x00_get_host_speed,
2993 .show_host_speed = 1,
2994 .get_host_port_type = qla2x00_get_host_port_type,
2995 .show_host_port_type = 1,
2996 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2997 .show_host_symbolic_name = 1,
2998 .set_host_system_hostname = qla2x00_set_host_system_hostname,
2999 .show_host_system_hostname = 1,
3000 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3001 .show_host_fabric_name = 1,
3002 .get_host_port_state = qla2x00_get_host_port_state,
3003 .show_host_port_state = 1,
3004
3005 .dd_fcrport_size = sizeof(struct fc_port *),
3006 .show_rport_supported_classes = 1,
3007
3008 .get_starget_node_name = qla2x00_get_starget_node_name,
3009 .show_starget_node_name = 1,
3010 .get_starget_port_name = qla2x00_get_starget_port_name,
3011 .show_starget_port_name = 1,
3012 .get_starget_port_id = qla2x00_get_starget_port_id,
3013 .show_starget_port_id = 1,
3014
3015 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3016 .show_rport_dev_loss_tmo = 1,
3017
3018 .issue_fc_host_lip = qla2x00_issue_lip,
3019 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3020 .terminate_rport_io = qla2x00_terminate_rport_io,
3021 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3022 .reset_fc_host_stats = qla2x00_reset_host_stats,
3023
3024 .vport_create = qla24xx_vport_create,
3025 .vport_disable = qla24xx_vport_disable,
3026 .vport_delete = qla24xx_vport_delete,
3027 .bsg_request = qla24xx_bsg_request,
3028 .bsg_timeout = qla24xx_bsg_timeout,
3029 };
3030
3031 struct fc_function_template qla2xxx_transport_vport_functions = {
3032
3033 .show_host_node_name = 1,
3034 .show_host_port_name = 1,
3035 .show_host_supported_classes = 1,
3036
3037 .get_host_port_id = qla2x00_get_host_port_id,
3038 .show_host_port_id = 1,
3039 .get_host_speed = qla2x00_get_host_speed,
3040 .show_host_speed = 1,
3041 .get_host_port_type = qla2x00_get_host_port_type,
3042 .show_host_port_type = 1,
3043 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3044 .show_host_symbolic_name = 1,
3045 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3046 .show_host_system_hostname = 1,
3047 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3048 .show_host_fabric_name = 1,
3049 .get_host_port_state = qla2x00_get_host_port_state,
3050 .show_host_port_state = 1,
3051
3052 .dd_fcrport_size = sizeof(struct fc_port *),
3053 .show_rport_supported_classes = 1,
3054
3055 .get_starget_node_name = qla2x00_get_starget_node_name,
3056 .show_starget_node_name = 1,
3057 .get_starget_port_name = qla2x00_get_starget_port_name,
3058 .show_starget_port_name = 1,
3059 .get_starget_port_id = qla2x00_get_starget_port_id,
3060 .show_starget_port_id = 1,
3061
3062 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3063 .show_rport_dev_loss_tmo = 1,
3064
3065 .issue_fc_host_lip = qla2x00_issue_lip,
3066 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3067 .terminate_rport_io = qla2x00_terminate_rport_io,
3068 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3069 .reset_fc_host_stats = qla2x00_reset_host_stats,
3070
3071 .bsg_request = qla24xx_bsg_request,
3072 .bsg_timeout = qla24xx_bsg_timeout,
3073 };
3074
3075 void
3076 qla2x00_init_host_attr(scsi_qla_host_t *vha)
3077 {
3078 struct qla_hw_data *ha = vha->hw;
3079 u32 speeds = FC_PORTSPEED_UNKNOWN;
3080
3081 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3082 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3083 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3084 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
3085 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
3086 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
3087 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
3088
3089 if (IS_CNA_CAPABLE(ha))
3090 speeds = FC_PORTSPEED_10GBIT;
3091 else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
3092 if (ha->max_supported_speed == 2) {
3093 if (ha->min_supported_speed <= 6)
3094 speeds |= FC_PORTSPEED_64GBIT;
3095 }
3096 if (ha->max_supported_speed == 2 ||
3097 ha->max_supported_speed == 1) {
3098 if (ha->min_supported_speed <= 5)
3099 speeds |= FC_PORTSPEED_32GBIT;
3100 }
3101 if (ha->max_supported_speed == 2 ||
3102 ha->max_supported_speed == 1 ||
3103 ha->max_supported_speed == 0) {
3104 if (ha->min_supported_speed <= 4)
3105 speeds |= FC_PORTSPEED_16GBIT;
3106 }
3107 if (ha->max_supported_speed == 1 ||
3108 ha->max_supported_speed == 0) {
3109 if (ha->min_supported_speed <= 3)
3110 speeds |= FC_PORTSPEED_8GBIT;
3111 }
3112 if (ha->max_supported_speed == 0) {
3113 if (ha->min_supported_speed <= 2)
3114 speeds |= FC_PORTSPEED_4GBIT;
3115 }
3116 } else if (IS_QLA2031(ha))
3117 speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
3118 FC_PORTSPEED_4GBIT;
3119 else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
3120 speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
3121 FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3122 else if (IS_QLA24XX_TYPE(ha))
3123 speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
3124 FC_PORTSPEED_1GBIT;
3125 else if (IS_QLA23XX(ha))
3126 speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3127 else
3128 speeds = FC_PORTSPEED_1GBIT;
3129
3130 fc_host_supported_speeds(vha->host) = speeds;
3131 }