2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/cciss_ioctl.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_transport_sas.h>
33 #include <asm/unaligned.h>
35 #include "smartpqi_sis.h"
37 #if !defined(BUILD_TIMESTAMP)
38 #define BUILD_TIMESTAMP
41 #define DRIVER_VERSION "0.9.9-100"
42 #define DRIVER_MAJOR 0
43 #define DRIVER_MINOR 9
44 #define DRIVER_RELEASE 9
45 #define DRIVER_REVISION 100
47 #define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
48 #define DRIVER_NAME_SHORT "smartpqi"
50 MODULE_AUTHOR("Microsemi");
51 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
53 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
54 MODULE_VERSION(DRIVER_VERSION
);
55 MODULE_LICENSE("GPL");
57 #define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
59 static char *hpe_branded_controller
= "HPE Smart Array Controller";
60 static char *microsemi_branded_controller
= "Microsemi Smart Family Controller";
62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
);
63 static int pqi_scan_scsi_devices(struct pqi_ctrl_info
*ctrl_info
);
64 static void pqi_scan_start(struct Scsi_Host
*shost
);
65 static void pqi_start_io(struct pqi_ctrl_info
*ctrl_info
,
66 struct pqi_queue_group
*queue_group
, enum pqi_io_path path
,
67 struct pqi_io_request
*io_request
);
68 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info
*ctrl_info
,
69 struct pqi_iu_header
*request
, unsigned int flags
,
70 struct pqi_raid_error_info
*error_info
, unsigned long timeout_msecs
);
71 static int pqi_aio_submit_io(struct pqi_ctrl_info
*ctrl_info
,
72 struct scsi_cmnd
*scmd
, u32 aio_handle
, u8
*cdb
,
73 unsigned int cdb_length
, struct pqi_queue_group
*queue_group
,
74 struct pqi_encryption_info
*encryption_info
);
76 /* for flags argument to pqi_submit_raid_request_synchronous() */
77 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
79 static struct scsi_transport_template
*pqi_sas_transport_template
;
81 static atomic_t pqi_controller_count
= ATOMIC_INIT(0);
83 static int pqi_disable_device_id_wildcards
;
84 module_param_named(disable_device_id_wildcards
,
85 pqi_disable_device_id_wildcards
, int, S_IRUGO
| S_IWUSR
);
86 MODULE_PARM_DESC(disable_device_id_wildcards
,
87 "Disable device ID wildcards.");
89 static char *raid_levels
[] = {
99 static char *pqi_raid_level_to_string(u8 raid_level
)
101 if (raid_level
< ARRAY_SIZE(raid_levels
))
102 return raid_levels
[raid_level
];
109 #define SA_RAID_1 2 /* also used for RAID 10 */
110 #define SA_RAID_5 3 /* also used for RAID 50 */
112 #define SA_RAID_6 5 /* also used for RAID 60 */
113 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
114 #define SA_RAID_MAX SA_RAID_ADM
115 #define SA_RAID_UNKNOWN 0xff
117 static inline void pqi_scsi_done(struct scsi_cmnd
*scmd
)
119 scmd
->scsi_done(scmd
);
122 static inline bool pqi_scsi3addr_equal(u8
*scsi3addr1
, u8
*scsi3addr2
)
124 return memcmp(scsi3addr1
, scsi3addr2
, 8) == 0;
127 static inline struct pqi_ctrl_info
*shost_to_hba(struct Scsi_Host
*shost
)
129 void *hostdata
= shost_priv(shost
);
131 return *((struct pqi_ctrl_info
**)hostdata
);
134 static inline bool pqi_is_logical_device(struct pqi_scsi_dev
*device
)
136 return !device
->is_physical_device
;
139 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
)
141 return !ctrl_info
->controller_online
;
144 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info
*ctrl_info
)
146 if (ctrl_info
->controller_online
)
147 if (!sis_is_firmware_running(ctrl_info
))
148 pqi_take_ctrl_offline(ctrl_info
);
151 static inline bool pqi_is_hba_lunid(u8
*scsi3addr
)
153 return pqi_scsi3addr_equal(scsi3addr
, RAID_CTLR_LUNID
);
156 static inline enum pqi_ctrl_mode
pqi_get_ctrl_mode(
157 struct pqi_ctrl_info
*ctrl_info
)
159 return sis_read_driver_scratch(ctrl_info
);
162 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info
*ctrl_info
,
163 enum pqi_ctrl_mode mode
)
165 sis_write_driver_scratch(ctrl_info
, mode
);
168 #define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
170 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info
*ctrl_info
)
172 schedule_delayed_work(&ctrl_info
->rescan_work
,
173 PQI_RESCAN_WORK_INTERVAL
);
176 static int pqi_map_single(struct pci_dev
*pci_dev
,
177 struct pqi_sg_descriptor
*sg_descriptor
, void *buffer
,
178 size_t buffer_length
, int data_direction
)
180 dma_addr_t bus_address
;
182 if (!buffer
|| buffer_length
== 0 || data_direction
== PCI_DMA_NONE
)
185 bus_address
= pci_map_single(pci_dev
, buffer
, buffer_length
,
187 if (pci_dma_mapping_error(pci_dev
, bus_address
))
190 put_unaligned_le64((u64
)bus_address
, &sg_descriptor
->address
);
191 put_unaligned_le32(buffer_length
, &sg_descriptor
->length
);
192 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
197 static void pqi_pci_unmap(struct pci_dev
*pci_dev
,
198 struct pqi_sg_descriptor
*descriptors
, int num_descriptors
,
203 if (data_direction
== PCI_DMA_NONE
)
206 for (i
= 0; i
< num_descriptors
; i
++)
207 pci_unmap_single(pci_dev
,
208 (dma_addr_t
)get_unaligned_le64(&descriptors
[i
].address
),
209 get_unaligned_le32(&descriptors
[i
].length
),
213 static int pqi_build_raid_path_request(struct pqi_ctrl_info
*ctrl_info
,
214 struct pqi_raid_path_request
*request
, u8 cmd
,
215 u8
*scsi3addr
, void *buffer
, size_t buffer_length
,
216 u16 vpd_page
, int *pci_direction
)
221 memset(request
, 0, sizeof(*request
));
223 request
->header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
224 put_unaligned_le16(offsetof(struct pqi_raid_path_request
,
225 sg_descriptors
[1]) - PQI_REQUEST_HEADER_LENGTH
,
226 &request
->header
.iu_length
);
227 put_unaligned_le32(buffer_length
, &request
->buffer_length
);
228 memcpy(request
->lun_number
, scsi3addr
, sizeof(request
->lun_number
));
229 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
230 request
->additional_cdb_bytes_usage
= SOP_ADDITIONAL_CDB_BYTES_0
;
236 request
->data_direction
= SOP_READ_FLAG
;
238 if (vpd_page
& VPD_PAGE
) {
240 cdb
[2] = (u8
)vpd_page
;
242 cdb
[4] = (u8
)buffer_length
;
244 case CISS_REPORT_LOG
:
245 case CISS_REPORT_PHYS
:
246 request
->data_direction
= SOP_READ_FLAG
;
248 if (cmd
== CISS_REPORT_PHYS
)
249 cdb
[1] = CISS_REPORT_PHYS_EXTENDED
;
251 cdb
[1] = CISS_REPORT_LOG_EXTENDED
;
252 put_unaligned_be32(buffer_length
, &cdb
[6]);
254 case CISS_GET_RAID_MAP
:
255 request
->data_direction
= SOP_READ_FLAG
;
257 cdb
[1] = CISS_GET_RAID_MAP
;
258 put_unaligned_be32(buffer_length
, &cdb
[6]);
261 request
->data_direction
= SOP_WRITE_FLAG
;
263 cdb
[6] = BMIC_CACHE_FLUSH
;
264 put_unaligned_be16(buffer_length
, &cdb
[7]);
266 case BMIC_IDENTIFY_CONTROLLER
:
267 case BMIC_IDENTIFY_PHYSICAL_DEVICE
:
268 request
->data_direction
= SOP_READ_FLAG
;
271 put_unaligned_be16(buffer_length
, &cdb
[7]);
273 case BMIC_WRITE_HOST_WELLNESS
:
274 request
->data_direction
= SOP_WRITE_FLAG
;
277 put_unaligned_be16(buffer_length
, &cdb
[7]);
280 dev_err(&ctrl_info
->pci_dev
->dev
, "unknown command 0x%c\n",
286 switch (request
->data_direction
) {
288 pci_dir
= PCI_DMA_FROMDEVICE
;
291 pci_dir
= PCI_DMA_TODEVICE
;
293 case SOP_NO_DIRECTION_FLAG
:
294 pci_dir
= PCI_DMA_NONE
;
297 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
301 *pci_direction
= pci_dir
;
303 return pqi_map_single(ctrl_info
->pci_dev
, &request
->sg_descriptors
[0],
304 buffer
, buffer_length
, pci_dir
);
307 static struct pqi_io_request
*pqi_alloc_io_request(
308 struct pqi_ctrl_info
*ctrl_info
)
310 struct pqi_io_request
*io_request
;
311 u16 i
= ctrl_info
->next_io_request_slot
; /* benignly racy */
314 io_request
= &ctrl_info
->io_request_pool
[i
];
315 if (atomic_inc_return(&io_request
->refcount
) == 1)
317 atomic_dec(&io_request
->refcount
);
318 i
= (i
+ 1) % ctrl_info
->max_io_slots
;
322 ctrl_info
->next_io_request_slot
= (i
+ 1) % ctrl_info
->max_io_slots
;
324 io_request
->scmd
= NULL
;
325 io_request
->status
= 0;
326 io_request
->error_info
= NULL
;
331 static void pqi_free_io_request(struct pqi_io_request
*io_request
)
333 atomic_dec(&io_request
->refcount
);
336 static int pqi_identify_controller(struct pqi_ctrl_info
*ctrl_info
,
337 struct bmic_identify_controller
*buffer
)
341 struct pqi_raid_path_request request
;
343 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
344 BMIC_IDENTIFY_CONTROLLER
, RAID_CTLR_LUNID
, buffer
,
345 sizeof(*buffer
), 0, &pci_direction
);
349 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
352 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
358 static int pqi_scsi_inquiry(struct pqi_ctrl_info
*ctrl_info
,
359 u8
*scsi3addr
, u16 vpd_page
, void *buffer
, size_t buffer_length
)
363 struct pqi_raid_path_request request
;
365 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
366 INQUIRY
, scsi3addr
, buffer
, buffer_length
, vpd_page
,
371 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
374 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
380 static int pqi_identify_physical_device(struct pqi_ctrl_info
*ctrl_info
,
381 struct pqi_scsi_dev
*device
,
382 struct bmic_identify_physical_device
*buffer
,
383 size_t buffer_length
)
387 u16 bmic_device_index
;
388 struct pqi_raid_path_request request
;
390 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
391 BMIC_IDENTIFY_PHYSICAL_DEVICE
, RAID_CTLR_LUNID
, buffer
,
392 buffer_length
, 0, &pci_direction
);
396 bmic_device_index
= CISS_GET_DRIVE_NUMBER(device
->scsi3addr
);
397 request
.cdb
[2] = (u8
)bmic_device_index
;
398 request
.cdb
[9] = (u8
)(bmic_device_index
>> 8);
400 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
401 0, NULL
, NO_TIMEOUT
);
403 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
409 #define SA_CACHE_FLUSH_BUFFER_LENGTH 4
410 #define PQI_FLUSH_CACHE_TIMEOUT (30 * 1000)
412 static int pqi_flush_cache(struct pqi_ctrl_info
*ctrl_info
)
415 struct pqi_raid_path_request request
;
420 * Don't bother trying to flush the cache if the controller is
423 if (pqi_ctrl_offline(ctrl_info
))
426 buffer
= kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH
, GFP_KERNEL
);
430 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
431 SA_CACHE_FLUSH
, RAID_CTLR_LUNID
, buffer
,
432 SA_CACHE_FLUSH_BUFFER_LENGTH
, 0, &pci_direction
);
436 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
437 0, NULL
, PQI_FLUSH_CACHE_TIMEOUT
);
439 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
448 static int pqi_write_host_wellness(struct pqi_ctrl_info
*ctrl_info
,
449 void *buffer
, size_t buffer_length
)
452 struct pqi_raid_path_request request
;
455 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
456 BMIC_WRITE_HOST_WELLNESS
, RAID_CTLR_LUNID
, buffer
,
457 buffer_length
, 0, &pci_direction
);
461 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
462 0, NULL
, NO_TIMEOUT
);
464 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
472 struct bmic_host_wellness_driver_version
{
474 u8 driver_version_tag
[2];
475 __le16 driver_version_length
;
476 char driver_version
[32];
482 static int pqi_write_driver_version_to_host_wellness(
483 struct pqi_ctrl_info
*ctrl_info
)
486 struct bmic_host_wellness_driver_version
*buffer
;
487 size_t buffer_length
;
489 buffer_length
= sizeof(*buffer
);
491 buffer
= kmalloc(buffer_length
, GFP_KERNEL
);
495 buffer
->start_tag
[0] = '<';
496 buffer
->start_tag
[1] = 'H';
497 buffer
->start_tag
[2] = 'W';
498 buffer
->start_tag
[3] = '>';
499 buffer
->driver_version_tag
[0] = 'D';
500 buffer
->driver_version_tag
[1] = 'V';
501 put_unaligned_le16(sizeof(buffer
->driver_version
),
502 &buffer
->driver_version_length
);
503 strncpy(buffer
->driver_version
, DRIVER_VERSION
,
504 sizeof(buffer
->driver_version
) - 1);
505 buffer
->driver_version
[sizeof(buffer
->driver_version
) - 1] = '\0';
506 buffer
->end_tag
[0] = 'Z';
507 buffer
->end_tag
[1] = 'Z';
509 rc
= pqi_write_host_wellness(ctrl_info
, buffer
, buffer_length
);
518 struct bmic_host_wellness_time
{
523 u8 dont_write_tag
[2];
529 static int pqi_write_current_time_to_host_wellness(
530 struct pqi_ctrl_info
*ctrl_info
)
533 struct bmic_host_wellness_time
*buffer
;
534 size_t buffer_length
;
540 buffer_length
= sizeof(*buffer
);
542 buffer
= kmalloc(buffer_length
, GFP_KERNEL
);
546 buffer
->start_tag
[0] = '<';
547 buffer
->start_tag
[1] = 'H';
548 buffer
->start_tag
[2] = 'W';
549 buffer
->start_tag
[3] = '>';
550 buffer
->time_tag
[0] = 'T';
551 buffer
->time_tag
[1] = 'D';
552 put_unaligned_le16(sizeof(buffer
->time
),
553 &buffer
->time_length
);
555 do_gettimeofday(&time
);
556 local_time
= time
.tv_sec
- (sys_tz
.tz_minuteswest
* 60);
557 rtc_time64_to_tm(local_time
, &tm
);
558 year
= tm
.tm_year
+ 1900;
560 buffer
->time
[0] = bin2bcd(tm
.tm_hour
);
561 buffer
->time
[1] = bin2bcd(tm
.tm_min
);
562 buffer
->time
[2] = bin2bcd(tm
.tm_sec
);
564 buffer
->time
[4] = bin2bcd(tm
.tm_mon
+ 1);
565 buffer
->time
[5] = bin2bcd(tm
.tm_mday
);
566 buffer
->time
[6] = bin2bcd(year
/ 100);
567 buffer
->time
[7] = bin2bcd(year
% 100);
569 buffer
->dont_write_tag
[0] = 'D';
570 buffer
->dont_write_tag
[1] = 'W';
571 buffer
->end_tag
[0] = 'Z';
572 buffer
->end_tag
[1] = 'Z';
574 rc
= pqi_write_host_wellness(ctrl_info
, buffer
, buffer_length
);
581 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
583 static void pqi_update_time_worker(struct work_struct
*work
)
586 struct pqi_ctrl_info
*ctrl_info
;
588 ctrl_info
= container_of(to_delayed_work(work
), struct pqi_ctrl_info
,
592 printk("%s: NULL controller pointer.\n", __func__
);
595 rc
= pqi_write_current_time_to_host_wellness(ctrl_info
);
597 dev_warn(&ctrl_info
->pci_dev
->dev
,
598 "error updating time on controller\n");
600 schedule_delayed_work(&ctrl_info
->update_time_work
,
601 PQI_UPDATE_TIME_WORK_INTERVAL
);
604 static inline void pqi_schedule_update_time_worker(
605 struct pqi_ctrl_info
*ctrl_info
)
607 schedule_delayed_work(&ctrl_info
->update_time_work
, 120);
610 static int pqi_report_luns(struct pqi_ctrl_info
*ctrl_info
, u8 cmd
,
611 void *buffer
, size_t buffer_length
)
615 struct pqi_raid_path_request request
;
617 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
618 cmd
, RAID_CTLR_LUNID
, buffer
, buffer_length
, 0, &pci_direction
);
622 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
625 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
631 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info
*ctrl_info
, u8 cmd
,
635 size_t lun_list_length
;
636 size_t lun_data_length
;
637 size_t new_lun_list_length
;
638 void *lun_data
= NULL
;
639 struct report_lun_header
*report_lun_header
;
641 report_lun_header
= kmalloc(sizeof(*report_lun_header
), GFP_KERNEL
);
642 if (!report_lun_header
) {
647 rc
= pqi_report_luns(ctrl_info
, cmd
, report_lun_header
,
648 sizeof(*report_lun_header
));
652 lun_list_length
= get_unaligned_be32(&report_lun_header
->list_length
);
655 lun_data_length
= sizeof(struct report_lun_header
) + lun_list_length
;
657 lun_data
= kmalloc(lun_data_length
, GFP_KERNEL
);
663 if (lun_list_length
== 0) {
664 memcpy(lun_data
, report_lun_header
, sizeof(*report_lun_header
));
668 rc
= pqi_report_luns(ctrl_info
, cmd
, lun_data
, lun_data_length
);
672 new_lun_list_length
= get_unaligned_be32(
673 &((struct report_lun_header
*)lun_data
)->list_length
);
675 if (new_lun_list_length
> lun_list_length
) {
676 lun_list_length
= new_lun_list_length
;
682 kfree(report_lun_header
);
694 static inline int pqi_report_phys_luns(struct pqi_ctrl_info
*ctrl_info
,
697 return pqi_report_phys_logical_luns(ctrl_info
, CISS_REPORT_PHYS
,
701 static inline int pqi_report_logical_luns(struct pqi_ctrl_info
*ctrl_info
,
704 return pqi_report_phys_logical_luns(ctrl_info
, CISS_REPORT_LOG
, buffer
);
707 static int pqi_get_device_lists(struct pqi_ctrl_info
*ctrl_info
,
708 struct report_phys_lun_extended
**physdev_list
,
709 struct report_log_lun_extended
**logdev_list
)
712 size_t logdev_list_length
;
713 size_t logdev_data_length
;
714 struct report_log_lun_extended
*internal_logdev_list
;
715 struct report_log_lun_extended
*logdev_data
;
716 struct report_lun_header report_lun_header
;
718 rc
= pqi_report_phys_luns(ctrl_info
, (void **)physdev_list
);
720 dev_err(&ctrl_info
->pci_dev
->dev
,
721 "report physical LUNs failed\n");
723 rc
= pqi_report_logical_luns(ctrl_info
, (void **)logdev_list
);
725 dev_err(&ctrl_info
->pci_dev
->dev
,
726 "report logical LUNs failed\n");
729 * Tack the controller itself onto the end of the logical device list.
732 logdev_data
= *logdev_list
;
736 get_unaligned_be32(&logdev_data
->header
.list_length
);
738 memset(&report_lun_header
, 0, sizeof(report_lun_header
));
740 (struct report_log_lun_extended
*)&report_lun_header
;
741 logdev_list_length
= 0;
744 logdev_data_length
= sizeof(struct report_lun_header
) +
747 internal_logdev_list
= kmalloc(logdev_data_length
+
748 sizeof(struct report_log_lun_extended
), GFP_KERNEL
);
749 if (!internal_logdev_list
) {
755 memcpy(internal_logdev_list
, logdev_data
, logdev_data_length
);
756 memset((u8
*)internal_logdev_list
+ logdev_data_length
, 0,
757 sizeof(struct report_log_lun_extended_entry
));
758 put_unaligned_be32(logdev_list_length
+
759 sizeof(struct report_log_lun_extended_entry
),
760 &internal_logdev_list
->header
.list_length
);
763 *logdev_list
= internal_logdev_list
;
768 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev
*device
,
769 int bus
, int target
, int lun
)
772 device
->target
= target
;
776 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev
*device
)
781 scsi3addr
= device
->scsi3addr
;
782 lunid
= get_unaligned_le32(scsi3addr
);
784 if (pqi_is_hba_lunid(scsi3addr
)) {
785 /* The specified device is the controller. */
786 pqi_set_bus_target_lun(device
, PQI_HBA_BUS
, 0, lunid
& 0x3fff);
787 device
->target_lun_valid
= true;
791 if (pqi_is_logical_device(device
)) {
792 pqi_set_bus_target_lun(device
, PQI_RAID_VOLUME_BUS
, 0,
794 device
->target_lun_valid
= true;
799 * Defer target and LUN assignment for non-controller physical devices
800 * because the SAS transport layer will make these assignments later.
802 pqi_set_bus_target_lun(device
, PQI_PHYSICAL_DEVICE_BUS
, 0, 0);
805 static void pqi_get_raid_level(struct pqi_ctrl_info
*ctrl_info
,
806 struct pqi_scsi_dev
*device
)
812 raid_level
= SA_RAID_UNKNOWN
;
814 buffer
= kmalloc(64, GFP_KERNEL
);
816 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
817 VPD_PAGE
| CISS_VPD_LV_DEVICE_GEOMETRY
, buffer
, 64);
819 raid_level
= buffer
[8];
820 if (raid_level
> SA_RAID_MAX
)
821 raid_level
= SA_RAID_UNKNOWN
;
826 device
->raid_level
= raid_level
;
829 static int pqi_validate_raid_map(struct pqi_ctrl_info
*ctrl_info
,
830 struct pqi_scsi_dev
*device
, struct raid_map
*raid_map
)
834 u32 r5or6_blocks_per_row
;
835 unsigned int num_phys_disks
;
836 unsigned int num_raid_map_entries
;
838 raid_map_size
= get_unaligned_le32(&raid_map
->structure_size
);
840 if (raid_map_size
< offsetof(struct raid_map
, disk_data
)) {
841 err_msg
= "RAID map too small";
845 if (raid_map_size
> sizeof(*raid_map
)) {
846 err_msg
= "RAID map too large";
850 num_phys_disks
= get_unaligned_le16(&raid_map
->layout_map_count
) *
851 (get_unaligned_le16(&raid_map
->data_disks_per_row
) +
852 get_unaligned_le16(&raid_map
->metadata_disks_per_row
));
853 num_raid_map_entries
= num_phys_disks
*
854 get_unaligned_le16(&raid_map
->row_cnt
);
856 if (num_raid_map_entries
> RAID_MAP_MAX_ENTRIES
) {
857 err_msg
= "invalid number of map entries in RAID map";
861 if (device
->raid_level
== SA_RAID_1
) {
862 if (get_unaligned_le16(&raid_map
->layout_map_count
) != 2) {
863 err_msg
= "invalid RAID-1 map";
866 } else if (device
->raid_level
== SA_RAID_ADM
) {
867 if (get_unaligned_le16(&raid_map
->layout_map_count
) != 3) {
868 err_msg
= "invalid RAID-1(ADM) map";
871 } else if ((device
->raid_level
== SA_RAID_5
||
872 device
->raid_level
== SA_RAID_6
) &&
873 get_unaligned_le16(&raid_map
->layout_map_count
) > 1) {
875 r5or6_blocks_per_row
=
876 get_unaligned_le16(&raid_map
->strip_size
) *
877 get_unaligned_le16(&raid_map
->data_disks_per_row
);
878 if (r5or6_blocks_per_row
== 0) {
879 err_msg
= "invalid RAID-5 or RAID-6 map";
887 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n", err_msg
);
892 static int pqi_get_raid_map(struct pqi_ctrl_info
*ctrl_info
,
893 struct pqi_scsi_dev
*device
)
897 struct pqi_raid_path_request request
;
898 struct raid_map
*raid_map
;
900 raid_map
= kmalloc(sizeof(*raid_map
), GFP_KERNEL
);
904 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
905 CISS_GET_RAID_MAP
, device
->scsi3addr
, raid_map
,
906 sizeof(*raid_map
), 0, &pci_direction
);
910 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
913 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
919 rc
= pqi_validate_raid_map(ctrl_info
, device
, raid_map
);
923 device
->raid_map
= raid_map
;
933 static void pqi_get_offload_status(struct pqi_ctrl_info
*ctrl_info
,
934 struct pqi_scsi_dev
*device
)
940 buffer
= kmalloc(64, GFP_KERNEL
);
944 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
945 VPD_PAGE
| CISS_VPD_LV_OFFLOAD_STATUS
, buffer
, 64);
949 #define OFFLOAD_STATUS_BYTE 4
950 #define OFFLOAD_CONFIGURED_BIT 0x1
951 #define OFFLOAD_ENABLED_BIT 0x2
953 offload_status
= buffer
[OFFLOAD_STATUS_BYTE
];
954 device
->offload_configured
=
955 !!(offload_status
& OFFLOAD_CONFIGURED_BIT
);
956 if (device
->offload_configured
) {
957 device
->offload_enabled_pending
=
958 !!(offload_status
& OFFLOAD_ENABLED_BIT
);
959 if (pqi_get_raid_map(ctrl_info
, device
))
960 device
->offload_enabled_pending
= false;
968 * Use vendor-specific VPD to determine online/offline status of a volume.
971 static void pqi_get_volume_status(struct pqi_ctrl_info
*ctrl_info
,
972 struct pqi_scsi_dev
*device
)
976 u8 volume_status
= CISS_LV_STATUS_UNAVAILABLE
;
977 bool volume_offline
= true;
979 struct ciss_vpd_logical_volume_status
*vpd
;
981 vpd
= kmalloc(sizeof(*vpd
), GFP_KERNEL
);
985 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
986 VPD_PAGE
| CISS_VPD_LV_STATUS
, vpd
, sizeof(*vpd
));
990 page_length
= offsetof(struct ciss_vpd_logical_volume_status
,
991 volume_status
) + vpd
->page_length
;
992 if (page_length
< sizeof(*vpd
))
995 volume_status
= vpd
->volume_status
;
996 volume_flags
= get_unaligned_be32(&vpd
->flags
);
997 volume_offline
= (volume_flags
& CISS_LV_FLAGS_NO_HOST_IO
) != 0;
1002 device
->volume_status
= volume_status
;
1003 device
->volume_offline
= volume_offline
;
1006 static int pqi_get_device_info(struct pqi_ctrl_info
*ctrl_info
,
1007 struct pqi_scsi_dev
*device
)
1012 buffer
= kmalloc(64, GFP_KERNEL
);
1016 /* Send an inquiry to the device to see what it is. */
1017 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
, 0, buffer
, 64);
1021 scsi_sanitize_inquiry_string(&buffer
[8], 8);
1022 scsi_sanitize_inquiry_string(&buffer
[16], 16);
1024 device
->devtype
= buffer
[0] & 0x1f;
1025 memcpy(device
->vendor
, &buffer
[8],
1026 sizeof(device
->vendor
));
1027 memcpy(device
->model
, &buffer
[16],
1028 sizeof(device
->model
));
1030 if (pqi_is_logical_device(device
) && device
->devtype
== TYPE_DISK
) {
1031 pqi_get_raid_level(ctrl_info
, device
);
1032 pqi_get_offload_status(ctrl_info
, device
);
1033 pqi_get_volume_status(ctrl_info
, device
);
1042 static void pqi_get_physical_disk_info(struct pqi_ctrl_info
*ctrl_info
,
1043 struct pqi_scsi_dev
*device
,
1044 struct bmic_identify_physical_device
*id_phys
)
1048 memset(id_phys
, 0, sizeof(*id_phys
));
1050 rc
= pqi_identify_physical_device(ctrl_info
, device
,
1051 id_phys
, sizeof(*id_phys
));
1053 device
->queue_depth
= PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH
;
1057 device
->queue_depth
=
1058 get_unaligned_le16(&id_phys
->current_queue_depth_limit
);
1059 device
->device_type
= id_phys
->device_type
;
1060 device
->active_path_index
= id_phys
->active_path_number
;
1061 device
->path_map
= id_phys
->redundant_path_present_map
;
1062 memcpy(&device
->box
,
1063 &id_phys
->alternate_paths_phys_box_on_port
,
1064 sizeof(device
->box
));
1065 memcpy(&device
->phys_connector
,
1066 &id_phys
->alternate_paths_phys_connector
,
1067 sizeof(device
->phys_connector
));
1068 device
->bay
= id_phys
->phys_bay_in_box
;
1071 static void pqi_show_volume_status(struct pqi_ctrl_info
*ctrl_info
,
1072 struct pqi_scsi_dev
*device
)
1075 static const char unknown_state_str
[] =
1076 "Volume is in an unknown state (%u)";
1077 char unknown_state_buffer
[sizeof(unknown_state_str
) + 10];
1079 switch (device
->volume_status
) {
1081 status
= "Volume online";
1083 case CISS_LV_FAILED
:
1084 status
= "Volume failed";
1086 case CISS_LV_NOT_CONFIGURED
:
1087 status
= "Volume not configured";
1089 case CISS_LV_DEGRADED
:
1090 status
= "Volume degraded";
1092 case CISS_LV_READY_FOR_RECOVERY
:
1093 status
= "Volume ready for recovery operation";
1095 case CISS_LV_UNDERGOING_RECOVERY
:
1096 status
= "Volume undergoing recovery";
1098 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED
:
1099 status
= "Wrong physical drive was replaced";
1101 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM
:
1102 status
= "A physical drive not properly connected";
1104 case CISS_LV_HARDWARE_OVERHEATING
:
1105 status
= "Hardware is overheating";
1107 case CISS_LV_HARDWARE_HAS_OVERHEATED
:
1108 status
= "Hardware has overheated";
1110 case CISS_LV_UNDERGOING_EXPANSION
:
1111 status
= "Volume undergoing expansion";
1113 case CISS_LV_NOT_AVAILABLE
:
1114 status
= "Volume waiting for transforming volume";
1116 case CISS_LV_QUEUED_FOR_EXPANSION
:
1117 status
= "Volume queued for expansion";
1119 case CISS_LV_DISABLED_SCSI_ID_CONFLICT
:
1120 status
= "Volume disabled due to SCSI ID conflict";
1122 case CISS_LV_EJECTED
:
1123 status
= "Volume has been ejected";
1125 case CISS_LV_UNDERGOING_ERASE
:
1126 status
= "Volume undergoing background erase";
1128 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD
:
1129 status
= "Volume ready for predictive spare rebuild";
1131 case CISS_LV_UNDERGOING_RPI
:
1132 status
= "Volume undergoing rapid parity initialization";
1134 case CISS_LV_PENDING_RPI
:
1135 status
= "Volume queued for rapid parity initialization";
1137 case CISS_LV_ENCRYPTED_NO_KEY
:
1138 status
= "Encrypted volume inaccessible - key not present";
1140 case CISS_LV_UNDERGOING_ENCRYPTION
:
1141 status
= "Volume undergoing encryption process";
1143 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1144 status
= "Volume undergoing encryption re-keying process";
1146 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1148 "Encrypted volume inaccessible - disabled on ctrl";
1150 case CISS_LV_PENDING_ENCRYPTION
:
1151 status
= "Volume pending migration to encrypted state";
1153 case CISS_LV_PENDING_ENCRYPTION_REKEYING
:
1154 status
= "Volume pending encryption rekeying";
1156 case CISS_LV_NOT_SUPPORTED
:
1157 status
= "Volume not supported on this controller";
1159 case CISS_LV_STATUS_UNAVAILABLE
:
1160 status
= "Volume status not available";
1163 snprintf(unknown_state_buffer
, sizeof(unknown_state_buffer
),
1164 unknown_state_str
, device
->volume_status
);
1165 status
= unknown_state_buffer
;
1169 dev_info(&ctrl_info
->pci_dev
->dev
,
1170 "scsi %d:%d:%d:%d %s\n",
1171 ctrl_info
->scsi_host
->host_no
,
1172 device
->bus
, device
->target
, device
->lun
, status
);
1175 static struct pqi_scsi_dev
*pqi_find_disk_by_aio_handle(
1176 struct pqi_ctrl_info
*ctrl_info
, u32 aio_handle
)
1178 struct pqi_scsi_dev
*device
;
1180 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1181 scsi_device_list_entry
) {
1182 if (device
->devtype
!= TYPE_DISK
&& device
->devtype
!= TYPE_ZBC
)
1184 if (pqi_is_logical_device(device
))
1186 if (device
->aio_handle
== aio_handle
)
1193 static void pqi_update_logical_drive_queue_depth(
1194 struct pqi_ctrl_info
*ctrl_info
, struct pqi_scsi_dev
*logical_drive
)
1197 struct raid_map
*raid_map
;
1198 struct raid_map_disk_data
*disk_data
;
1199 struct pqi_scsi_dev
*phys_disk
;
1200 unsigned int num_phys_disks
;
1201 unsigned int num_raid_map_entries
;
1202 unsigned int queue_depth
;
1204 logical_drive
->queue_depth
= PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH
;
1206 raid_map
= logical_drive
->raid_map
;
1210 disk_data
= raid_map
->disk_data
;
1211 num_phys_disks
= get_unaligned_le16(&raid_map
->layout_map_count
) *
1212 (get_unaligned_le16(&raid_map
->data_disks_per_row
) +
1213 get_unaligned_le16(&raid_map
->metadata_disks_per_row
));
1214 num_raid_map_entries
= num_phys_disks
*
1215 get_unaligned_le16(&raid_map
->row_cnt
);
1218 for (i
= 0; i
< num_raid_map_entries
; i
++) {
1219 phys_disk
= pqi_find_disk_by_aio_handle(ctrl_info
,
1220 disk_data
[i
].aio_handle
);
1223 dev_warn(&ctrl_info
->pci_dev
->dev
,
1224 "failed to find physical disk for logical drive %016llx\n",
1225 get_unaligned_be64(logical_drive
->scsi3addr
));
1226 logical_drive
->offload_enabled
= false;
1227 logical_drive
->offload_enabled_pending
= false;
1229 logical_drive
->raid_map
= NULL
;
1233 queue_depth
+= phys_disk
->queue_depth
;
1236 logical_drive
->queue_depth
= queue_depth
;
1239 static void pqi_update_all_logical_drive_queue_depths(
1240 struct pqi_ctrl_info
*ctrl_info
)
1242 struct pqi_scsi_dev
*device
;
1244 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1245 scsi_device_list_entry
) {
1246 if (device
->devtype
!= TYPE_DISK
&& device
->devtype
!= TYPE_ZBC
)
1248 if (!pqi_is_logical_device(device
))
1250 pqi_update_logical_drive_queue_depth(ctrl_info
, device
);
1254 static void pqi_rescan_worker(struct work_struct
*work
)
1256 struct pqi_ctrl_info
*ctrl_info
;
1258 ctrl_info
= container_of(to_delayed_work(work
), struct pqi_ctrl_info
,
1261 pqi_scan_scsi_devices(ctrl_info
);
1264 static int pqi_add_device(struct pqi_ctrl_info
*ctrl_info
,
1265 struct pqi_scsi_dev
*device
)
1269 if (pqi_is_logical_device(device
))
1270 rc
= scsi_add_device(ctrl_info
->scsi_host
, device
->bus
,
1271 device
->target
, device
->lun
);
1273 rc
= pqi_add_sas_device(ctrl_info
->sas_host
, device
);
1278 static inline void pqi_remove_device(struct pqi_ctrl_info
*ctrl_info
,
1279 struct pqi_scsi_dev
*device
)
1281 if (pqi_is_logical_device(device
))
1282 scsi_remove_device(device
->sdev
);
1284 pqi_remove_sas_device(device
);
1287 /* Assumes the SCSI device list lock is held. */
1289 static struct pqi_scsi_dev
*pqi_find_scsi_dev(struct pqi_ctrl_info
*ctrl_info
,
1290 int bus
, int target
, int lun
)
1292 struct pqi_scsi_dev
*device
;
1294 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1295 scsi_device_list_entry
)
1296 if (device
->bus
== bus
&& device
->target
== target
&&
1303 static inline bool pqi_device_equal(struct pqi_scsi_dev
*dev1
,
1304 struct pqi_scsi_dev
*dev2
)
1306 if (dev1
->is_physical_device
!= dev2
->is_physical_device
)
1309 if (dev1
->is_physical_device
)
1310 return dev1
->wwid
== dev2
->wwid
;
1312 return memcmp(dev1
->volume_id
, dev2
->volume_id
,
1313 sizeof(dev1
->volume_id
)) == 0;
1316 enum pqi_find_result
{
1322 static enum pqi_find_result
pqi_scsi_find_entry(struct pqi_ctrl_info
*ctrl_info
,
1323 struct pqi_scsi_dev
*device_to_find
,
1324 struct pqi_scsi_dev
**matching_device
)
1326 struct pqi_scsi_dev
*device
;
1328 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1329 scsi_device_list_entry
) {
1330 if (pqi_scsi3addr_equal(device_to_find
->scsi3addr
,
1331 device
->scsi3addr
)) {
1332 *matching_device
= device
;
1333 if (pqi_device_equal(device_to_find
, device
)) {
1334 if (device_to_find
->volume_offline
)
1335 return DEVICE_CHANGED
;
1338 return DEVICE_CHANGED
;
1342 return DEVICE_NOT_FOUND
;
1345 static void pqi_dev_info(struct pqi_ctrl_info
*ctrl_info
,
1346 char *action
, struct pqi_scsi_dev
*device
)
1348 dev_info(&ctrl_info
->pci_dev
->dev
,
1349 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1351 ctrl_info
->scsi_host
->host_no
,
1355 scsi_device_type(device
->devtype
),
1358 pqi_raid_level_to_string(device
->raid_level
),
1359 device
->offload_configured
? '+' : '-',
1360 device
->offload_enabled_pending
? '+' : '-',
1361 device
->expose_device
? '+' : '-',
1362 device
->queue_depth
);
1365 /* Assumes the SCSI device list lock is held. */
1367 static void pqi_scsi_update_device(struct pqi_scsi_dev
*existing_device
,
1368 struct pqi_scsi_dev
*new_device
)
1370 existing_device
->devtype
= new_device
->devtype
;
1371 existing_device
->device_type
= new_device
->device_type
;
1372 existing_device
->bus
= new_device
->bus
;
1373 if (new_device
->target_lun_valid
) {
1374 existing_device
->target
= new_device
->target
;
1375 existing_device
->lun
= new_device
->lun
;
1376 existing_device
->target_lun_valid
= true;
1379 /* By definition, the scsi3addr and wwid fields are already the same. */
1381 existing_device
->is_physical_device
= new_device
->is_physical_device
;
1382 existing_device
->expose_device
= new_device
->expose_device
;
1383 existing_device
->no_uld_attach
= new_device
->no_uld_attach
;
1384 existing_device
->aio_enabled
= new_device
->aio_enabled
;
1385 memcpy(existing_device
->vendor
, new_device
->vendor
,
1386 sizeof(existing_device
->vendor
));
1387 memcpy(existing_device
->model
, new_device
->model
,
1388 sizeof(existing_device
->model
));
1389 existing_device
->sas_address
= new_device
->sas_address
;
1390 existing_device
->raid_level
= new_device
->raid_level
;
1391 existing_device
->queue_depth
= new_device
->queue_depth
;
1392 existing_device
->aio_handle
= new_device
->aio_handle
;
1393 existing_device
->volume_status
= new_device
->volume_status
;
1394 existing_device
->active_path_index
= new_device
->active_path_index
;
1395 existing_device
->path_map
= new_device
->path_map
;
1396 existing_device
->bay
= new_device
->bay
;
1397 memcpy(existing_device
->box
, new_device
->box
,
1398 sizeof(existing_device
->box
));
1399 memcpy(existing_device
->phys_connector
, new_device
->phys_connector
,
1400 sizeof(existing_device
->phys_connector
));
1401 existing_device
->offload_configured
= new_device
->offload_configured
;
1402 existing_device
->offload_enabled
= false;
1403 existing_device
->offload_enabled_pending
=
1404 new_device
->offload_enabled_pending
;
1405 existing_device
->offload_to_mirror
= 0;
1406 kfree(existing_device
->raid_map
);
1407 existing_device
->raid_map
= new_device
->raid_map
;
1409 /* To prevent this from being freed later. */
1410 new_device
->raid_map
= NULL
;
1413 static inline void pqi_free_device(struct pqi_scsi_dev
*device
)
1416 kfree(device
->raid_map
);
1422 * Called when exposing a new device to the OS fails in order to re-adjust
1423 * our internal SCSI device list to match the SCSI ML's view.
1426 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info
*ctrl_info
,
1427 struct pqi_scsi_dev
*device
)
1429 unsigned long flags
;
1431 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1432 list_del(&device
->scsi_device_list_entry
);
1433 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1435 /* Allow the device structure to be freed later. */
1436 device
->keep_device
= false;
1439 static void pqi_update_device_list(struct pqi_ctrl_info
*ctrl_info
,
1440 struct pqi_scsi_dev
*new_device_list
[], unsigned int num_new_devices
)
1444 unsigned long flags
;
1445 enum pqi_find_result find_result
;
1446 struct pqi_scsi_dev
*device
;
1447 struct pqi_scsi_dev
*next
;
1448 struct pqi_scsi_dev
*matching_device
;
1449 struct list_head add_list
;
1450 struct list_head delete_list
;
1452 INIT_LIST_HEAD(&add_list
);
1453 INIT_LIST_HEAD(&delete_list
);
1456 * The idea here is to do as little work as possible while holding the
1457 * spinlock. That's why we go to great pains to defer anything other
1458 * than updating the internal device list until after we release the
1462 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1464 /* Assume that all devices in the existing list have gone away. */
1465 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1466 scsi_device_list_entry
)
1467 device
->device_gone
= true;
1469 for (i
= 0; i
< num_new_devices
; i
++) {
1470 device
= new_device_list
[i
];
1472 find_result
= pqi_scsi_find_entry(ctrl_info
, device
,
1475 switch (find_result
) {
1478 * The newly found device is already in the existing
1481 device
->new_device
= false;
1482 matching_device
->device_gone
= false;
1483 pqi_scsi_update_device(matching_device
, device
);
1485 case DEVICE_NOT_FOUND
:
1487 * The newly found device is NOT in the existing device
1490 device
->new_device
= true;
1492 case DEVICE_CHANGED
:
1494 * The original device has gone away and we need to add
1497 device
->new_device
= true;
1500 WARN_ON(find_result
);
1505 /* Process all devices that have gone away. */
1506 list_for_each_entry_safe(device
, next
, &ctrl_info
->scsi_device_list
,
1507 scsi_device_list_entry
) {
1508 if (device
->device_gone
) {
1509 list_del(&device
->scsi_device_list_entry
);
1510 list_add_tail(&device
->delete_list_entry
, &delete_list
);
1514 /* Process all new devices. */
1515 for (i
= 0; i
< num_new_devices
; i
++) {
1516 device
= new_device_list
[i
];
1517 if (!device
->new_device
)
1519 if (device
->volume_offline
)
1521 list_add_tail(&device
->scsi_device_list_entry
,
1522 &ctrl_info
->scsi_device_list
);
1523 list_add_tail(&device
->add_list_entry
, &add_list
);
1524 /* To prevent this device structure from being freed later. */
1525 device
->keep_device
= true;
1528 pqi_update_all_logical_drive_queue_depths(ctrl_info
);
1530 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1531 scsi_device_list_entry
)
1532 device
->offload_enabled
=
1533 device
->offload_enabled_pending
;
1535 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1537 /* Remove all devices that have gone away. */
1538 list_for_each_entry_safe(device
, next
, &delete_list
,
1539 delete_list_entry
) {
1541 pqi_remove_device(ctrl_info
, device
);
1542 if (device
->volume_offline
) {
1543 pqi_dev_info(ctrl_info
, "offline", device
);
1544 pqi_show_volume_status(ctrl_info
, device
);
1546 pqi_dev_info(ctrl_info
, "removed", device
);
1548 list_del(&device
->delete_list_entry
);
1549 pqi_free_device(device
);
1553 * Notify the SCSI ML if the queue depth of any existing device has
1556 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1557 scsi_device_list_entry
) {
1558 if (device
->sdev
&& device
->queue_depth
!=
1559 device
->advertised_queue_depth
) {
1560 device
->advertised_queue_depth
= device
->queue_depth
;
1561 scsi_change_queue_depth(device
->sdev
,
1562 device
->advertised_queue_depth
);
1566 /* Expose any new devices. */
1567 list_for_each_entry_safe(device
, next
, &add_list
, add_list_entry
) {
1568 if (device
->expose_device
&& !device
->sdev
) {
1569 rc
= pqi_add_device(ctrl_info
, device
);
1571 dev_warn(&ctrl_info
->pci_dev
->dev
,
1572 "scsi %d:%d:%d:%d addition failed, device not added\n",
1573 ctrl_info
->scsi_host
->host_no
,
1574 device
->bus
, device
->target
,
1576 pqi_fixup_botched_add(ctrl_info
, device
);
1580 pqi_dev_info(ctrl_info
, "added", device
);
1584 static bool pqi_is_supported_device(struct pqi_scsi_dev
*device
)
1586 bool is_supported
= false;
1588 switch (device
->devtype
) {
1592 case TYPE_MEDIUM_CHANGER
:
1593 case TYPE_ENCLOSURE
:
1594 is_supported
= true;
1598 * Only support the HBA controller itself as a RAID
1599 * controller. If it's a RAID controller other than
1600 * the HBA itself (an external RAID controller, MSA500
1601 * or similar), we don't support it.
1603 if (pqi_is_hba_lunid(device
->scsi3addr
))
1604 is_supported
= true;
1608 return is_supported
;
1611 static inline bool pqi_skip_device(u8
*scsi3addr
,
1612 struct report_phys_lun_extended_entry
*phys_lun_ext_entry
)
1616 if (!MASKED_DEVICE(scsi3addr
))
1619 /* The device is masked. */
1621 device_flags
= phys_lun_ext_entry
->device_flags
;
1623 if (device_flags
& REPORT_PHYS_LUN_DEV_FLAG_NON_DISK
) {
1625 * It's a non-disk device. We ignore all devices of this type
1626 * when they're masked.
1634 static inline bool pqi_expose_device(struct pqi_scsi_dev
*device
)
1636 /* Expose all devices except for physical devices that are masked. */
1637 if (device
->is_physical_device
&& MASKED_DEVICE(device
->scsi3addr
))
1643 static int pqi_update_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1647 struct list_head new_device_list_head
;
1648 struct report_phys_lun_extended
*physdev_list
= NULL
;
1649 struct report_log_lun_extended
*logdev_list
= NULL
;
1650 struct report_phys_lun_extended_entry
*phys_lun_ext_entry
;
1651 struct report_log_lun_extended_entry
*log_lun_ext_entry
;
1652 struct bmic_identify_physical_device
*id_phys
= NULL
;
1655 struct pqi_scsi_dev
**new_device_list
= NULL
;
1656 struct pqi_scsi_dev
*device
;
1657 struct pqi_scsi_dev
*next
;
1658 unsigned int num_new_devices
;
1659 unsigned int num_valid_devices
;
1660 bool is_physical_device
;
1662 static char *out_of_memory_msg
=
1663 "out of memory, device discovery stopped";
1665 INIT_LIST_HEAD(&new_device_list_head
);
1667 rc
= pqi_get_device_lists(ctrl_info
, &physdev_list
, &logdev_list
);
1673 get_unaligned_be32(&physdev_list
->header
.list_length
)
1674 / sizeof(physdev_list
->lun_entries
[0]);
1680 get_unaligned_be32(&logdev_list
->header
.list_length
)
1681 / sizeof(logdev_list
->lun_entries
[0]);
1685 if (num_physicals
) {
1687 * We need this buffer for calls to pqi_get_physical_disk_info()
1688 * below. We allocate it here instead of inside
1689 * pqi_get_physical_disk_info() because it's a fairly large
1692 id_phys
= kmalloc(sizeof(*id_phys
), GFP_KERNEL
);
1694 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
1701 num_new_devices
= num_physicals
+ num_logicals
;
1703 new_device_list
= kmalloc(sizeof(*new_device_list
) *
1704 num_new_devices
, GFP_KERNEL
);
1705 if (!new_device_list
) {
1706 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n", out_of_memory_msg
);
1711 for (i
= 0; i
< num_new_devices
; i
++) {
1712 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
1714 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
1719 list_add_tail(&device
->new_device_list_entry
,
1720 &new_device_list_head
);
1724 num_valid_devices
= 0;
1726 for (i
= 0; i
< num_new_devices
; i
++) {
1728 if (i
< num_physicals
) {
1729 is_physical_device
= true;
1730 phys_lun_ext_entry
= &physdev_list
->lun_entries
[i
];
1731 log_lun_ext_entry
= NULL
;
1732 scsi3addr
= phys_lun_ext_entry
->lunid
;
1734 is_physical_device
= false;
1735 phys_lun_ext_entry
= NULL
;
1737 &logdev_list
->lun_entries
[i
- num_physicals
];
1738 scsi3addr
= log_lun_ext_entry
->lunid
;
1741 if (is_physical_device
&&
1742 pqi_skip_device(scsi3addr
, phys_lun_ext_entry
))
1746 device
= list_next_entry(device
, new_device_list_entry
);
1748 device
= list_first_entry(&new_device_list_head
,
1749 struct pqi_scsi_dev
, new_device_list_entry
);
1751 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
1752 device
->is_physical_device
= is_physical_device
;
1753 device
->raid_level
= SA_RAID_UNKNOWN
;
1755 /* Gather information about the device. */
1756 rc
= pqi_get_device_info(ctrl_info
, device
);
1757 if (rc
== -ENOMEM
) {
1758 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
1763 dev_warn(&ctrl_info
->pci_dev
->dev
,
1764 "obtaining device info failed, skipping device %016llx\n",
1765 get_unaligned_be64(device
->scsi3addr
));
1770 if (!pqi_is_supported_device(device
))
1773 pqi_assign_bus_target_lun(device
);
1775 device
->expose_device
= pqi_expose_device(device
);
1777 if (device
->is_physical_device
) {
1778 device
->wwid
= phys_lun_ext_entry
->wwid
;
1779 if ((phys_lun_ext_entry
->device_flags
&
1780 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED
) &&
1781 phys_lun_ext_entry
->aio_handle
)
1782 device
->aio_enabled
= true;
1784 memcpy(device
->volume_id
, log_lun_ext_entry
->volume_id
,
1785 sizeof(device
->volume_id
));
1788 switch (device
->devtype
) {
1791 case TYPE_ENCLOSURE
:
1792 if (device
->is_physical_device
) {
1793 device
->sas_address
=
1794 get_unaligned_be64(&device
->wwid
);
1795 if (device
->devtype
== TYPE_DISK
||
1796 device
->devtype
== TYPE_ZBC
) {
1797 device
->aio_handle
=
1798 phys_lun_ext_entry
->aio_handle
;
1799 pqi_get_physical_disk_info(ctrl_info
,
1806 new_device_list
[num_valid_devices
++] = device
;
1809 pqi_update_device_list(ctrl_info
, new_device_list
, num_valid_devices
);
1812 list_for_each_entry_safe(device
, next
, &new_device_list_head
,
1813 new_device_list_entry
) {
1814 if (device
->keep_device
)
1816 list_del(&device
->new_device_list_entry
);
1817 pqi_free_device(device
);
1820 kfree(new_device_list
);
1821 kfree(physdev_list
);
1828 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1830 unsigned long flags
;
1831 struct pqi_scsi_dev
*device
;
1832 struct pqi_scsi_dev
*next
;
1834 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1836 list_for_each_entry_safe(device
, next
, &ctrl_info
->scsi_device_list
,
1837 scsi_device_list_entry
) {
1839 pqi_remove_device(ctrl_info
, device
);
1840 list_del(&device
->scsi_device_list_entry
);
1841 pqi_free_device(device
);
1844 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1847 static int pqi_scan_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1851 if (pqi_ctrl_offline(ctrl_info
))
1854 mutex_lock(&ctrl_info
->scan_mutex
);
1856 rc
= pqi_update_scsi_devices(ctrl_info
);
1858 pqi_schedule_rescan_worker(ctrl_info
);
1860 mutex_unlock(&ctrl_info
->scan_mutex
);
1865 static void pqi_scan_start(struct Scsi_Host
*shost
)
1867 pqi_scan_scsi_devices(shost_to_hba(shost
));
1870 /* Returns TRUE if scan is finished. */
1872 static int pqi_scan_finished(struct Scsi_Host
*shost
,
1873 unsigned long elapsed_time
)
1875 struct pqi_ctrl_info
*ctrl_info
;
1877 ctrl_info
= shost_priv(shost
);
1879 return !mutex_is_locked(&ctrl_info
->scan_mutex
);
1882 static inline void pqi_set_encryption_info(
1883 struct pqi_encryption_info
*encryption_info
, struct raid_map
*raid_map
,
1886 u32 volume_blk_size
;
1889 * Set the encryption tweak values based on logical block address.
1890 * If the block size is 512, the tweak value is equal to the LBA.
1891 * For other block sizes, tweak value is (LBA * block size) / 512.
1893 volume_blk_size
= get_unaligned_le32(&raid_map
->volume_blk_size
);
1894 if (volume_blk_size
!= 512)
1895 first_block
= (first_block
* volume_blk_size
) / 512;
1897 encryption_info
->data_encryption_key_index
=
1898 get_unaligned_le16(&raid_map
->data_encryption_key_index
);
1899 encryption_info
->encrypt_tweak_lower
= lower_32_bits(first_block
);
1900 encryption_info
->encrypt_tweak_upper
= upper_32_bits(first_block
);
1904 * Attempt to perform offload RAID mapping for a logical volume I/O.
1907 #define PQI_RAID_BYPASS_INELIGIBLE 1
1909 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
1910 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
1911 struct pqi_queue_group
*queue_group
)
1913 struct raid_map
*raid_map
;
1914 bool is_write
= false;
1922 u32 first_row_offset
;
1923 u32 last_row_offset
;
1928 u32 r5or6_blocks_per_row
;
1929 u64 r5or6_first_row
;
1931 u32 r5or6_first_row_offset
;
1932 u32 r5or6_last_row_offset
;
1933 u32 r5or6_first_column
;
1934 u32 r5or6_last_column
;
1935 u16 data_disks_per_row
;
1936 u32 total_disks_per_row
;
1937 u16 layout_map_count
;
1949 int offload_to_mirror
;
1950 struct pqi_encryption_info
*encryption_info_ptr
;
1951 struct pqi_encryption_info encryption_info
;
1952 #if BITS_PER_LONG == 32
1956 /* Check for valid opcode, get LBA and block count. */
1957 switch (scmd
->cmnd
[0]) {
1962 first_block
= (u64
)get_unaligned_be16(&scmd
->cmnd
[2]);
1963 block_cnt
= (u32
)scmd
->cmnd
[4];
1971 first_block
= (u64
)get_unaligned_be32(&scmd
->cmnd
[2]);
1972 block_cnt
= (u32
)get_unaligned_be16(&scmd
->cmnd
[7]);
1978 first_block
= (u64
)get_unaligned_be32(&scmd
->cmnd
[2]);
1979 block_cnt
= get_unaligned_be32(&scmd
->cmnd
[6]);
1985 first_block
= get_unaligned_be64(&scmd
->cmnd
[2]);
1986 block_cnt
= get_unaligned_be32(&scmd
->cmnd
[10]);
1989 /* Process via normal I/O path. */
1990 return PQI_RAID_BYPASS_INELIGIBLE
;
1993 /* Check for write to non-RAID-0. */
1994 if (is_write
&& device
->raid_level
!= SA_RAID_0
)
1995 return PQI_RAID_BYPASS_INELIGIBLE
;
1997 if (unlikely(block_cnt
== 0))
1998 return PQI_RAID_BYPASS_INELIGIBLE
;
2000 last_block
= first_block
+ block_cnt
- 1;
2001 raid_map
= device
->raid_map
;
2003 /* Check for invalid block or wraparound. */
2004 if (last_block
>= get_unaligned_le64(&raid_map
->volume_blk_cnt
) ||
2005 last_block
< first_block
)
2006 return PQI_RAID_BYPASS_INELIGIBLE
;
2008 data_disks_per_row
= get_unaligned_le16(&raid_map
->data_disks_per_row
);
2009 strip_size
= get_unaligned_le16(&raid_map
->strip_size
);
2010 layout_map_count
= get_unaligned_le16(&raid_map
->layout_map_count
);
2012 /* Calculate stripe information for the request. */
2013 blocks_per_row
= data_disks_per_row
* strip_size
;
2014 #if BITS_PER_LONG == 32
2015 tmpdiv
= first_block
;
2016 do_div(tmpdiv
, blocks_per_row
);
2018 tmpdiv
= last_block
;
2019 do_div(tmpdiv
, blocks_per_row
);
2021 first_row_offset
= (u32
)(first_block
- (first_row
* blocks_per_row
));
2022 last_row_offset
= (u32
)(last_block
- (last_row
* blocks_per_row
));
2023 tmpdiv
= first_row_offset
;
2024 do_div(tmpdiv
, strip_size
);
2025 first_column
= tmpdiv
;
2026 tmpdiv
= last_row_offset
;
2027 do_div(tmpdiv
, strip_size
);
2028 last_column
= tmpdiv
;
2030 first_row
= first_block
/ blocks_per_row
;
2031 last_row
= last_block
/ blocks_per_row
;
2032 first_row_offset
= (u32
)(first_block
- (first_row
* blocks_per_row
));
2033 last_row_offset
= (u32
)(last_block
- (last_row
* blocks_per_row
));
2034 first_column
= first_row_offset
/ strip_size
;
2035 last_column
= last_row_offset
/ strip_size
;
2038 /* If this isn't a single row/column then give to the controller. */
2039 if (first_row
!= last_row
|| first_column
!= last_column
)
2040 return PQI_RAID_BYPASS_INELIGIBLE
;
2042 /* Proceeding with driver mapping. */
2043 total_disks_per_row
= data_disks_per_row
+
2044 get_unaligned_le16(&raid_map
->metadata_disks_per_row
);
2045 map_row
= ((u32
)(first_row
>> raid_map
->parity_rotation_shift
)) %
2046 get_unaligned_le16(&raid_map
->row_cnt
);
2047 map_index
= (map_row
* total_disks_per_row
) + first_column
;
2050 if (device
->raid_level
== SA_RAID_1
) {
2051 if (device
->offload_to_mirror
)
2052 map_index
+= data_disks_per_row
;
2053 device
->offload_to_mirror
= !device
->offload_to_mirror
;
2054 } else if (device
->raid_level
== SA_RAID_ADM
) {
2057 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2060 offload_to_mirror
= device
->offload_to_mirror
;
2061 if (offload_to_mirror
== 0) {
2062 /* use physical disk in the first mirrored group. */
2063 map_index
%= data_disks_per_row
;
2067 * Determine mirror group that map_index
2070 current_group
= map_index
/ data_disks_per_row
;
2072 if (offload_to_mirror
!= current_group
) {
2074 layout_map_count
- 1) {
2076 * Select raid index from
2079 map_index
+= data_disks_per_row
;
2083 * Select raid index from first
2086 map_index
%= data_disks_per_row
;
2090 } while (offload_to_mirror
!= current_group
);
2093 /* Set mirror group to use next time. */
2095 (offload_to_mirror
>= layout_map_count
- 1) ?
2096 0 : offload_to_mirror
+ 1;
2097 WARN_ON(offload_to_mirror
>= layout_map_count
);
2098 device
->offload_to_mirror
= offload_to_mirror
;
2100 * Avoid direct use of device->offload_to_mirror within this
2101 * function since multiple threads might simultaneously
2102 * increment it beyond the range of device->layout_map_count -1.
2104 } else if ((device
->raid_level
== SA_RAID_5
||
2105 device
->raid_level
== SA_RAID_6
) && layout_map_count
> 1) {
2107 /* Verify first and last block are in same RAID group */
2108 r5or6_blocks_per_row
= strip_size
* data_disks_per_row
;
2109 stripesize
= r5or6_blocks_per_row
* layout_map_count
;
2110 #if BITS_PER_LONG == 32
2111 tmpdiv
= first_block
;
2112 first_group
= do_div(tmpdiv
, stripesize
);
2113 tmpdiv
= first_group
;
2114 do_div(tmpdiv
, r5or6_blocks_per_row
);
2115 first_group
= tmpdiv
;
2116 tmpdiv
= last_block
;
2117 last_group
= do_div(tmpdiv
, stripesize
);
2118 tmpdiv
= last_group
;
2119 do_div(tmpdiv
, r5or6_blocks_per_row
);
2120 last_group
= tmpdiv
;
2122 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
2123 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
2125 if (first_group
!= last_group
)
2126 return PQI_RAID_BYPASS_INELIGIBLE
;
2128 /* Verify request is in a single row of RAID 5/6 */
2129 #if BITS_PER_LONG == 32
2130 tmpdiv
= first_block
;
2131 do_div(tmpdiv
, stripesize
);
2132 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
2133 tmpdiv
= last_block
;
2134 do_div(tmpdiv
, stripesize
);
2135 r5or6_last_row
= r0_last_row
= tmpdiv
;
2137 first_row
= r5or6_first_row
= r0_first_row
=
2138 first_block
/ stripesize
;
2139 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
2141 if (r5or6_first_row
!= r5or6_last_row
)
2142 return PQI_RAID_BYPASS_INELIGIBLE
;
2144 /* Verify request is in a single column */
2145 #if BITS_PER_LONG == 32
2146 tmpdiv
= first_block
;
2147 first_row_offset
= do_div(tmpdiv
, stripesize
);
2148 tmpdiv
= first_row_offset
;
2149 first_row_offset
= (u32
)do_div(tmpdiv
, r5or6_blocks_per_row
);
2150 r5or6_first_row_offset
= first_row_offset
;
2151 tmpdiv
= last_block
;
2152 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
2153 tmpdiv
= r5or6_last_row_offset
;
2154 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
2155 tmpdiv
= r5or6_first_row_offset
;
2156 do_div(tmpdiv
, strip_size
);
2157 first_column
= r5or6_first_column
= tmpdiv
;
2158 tmpdiv
= r5or6_last_row_offset
;
2159 do_div(tmpdiv
, strip_size
);
2160 r5or6_last_column
= tmpdiv
;
2162 first_row_offset
= r5or6_first_row_offset
=
2163 (u32
)((first_block
% stripesize
) %
2164 r5or6_blocks_per_row
);
2166 r5or6_last_row_offset
=
2167 (u32
)((last_block
% stripesize
) %
2168 r5or6_blocks_per_row
);
2170 first_column
= r5or6_first_row_offset
/ strip_size
;
2171 r5or6_first_column
= first_column
;
2172 r5or6_last_column
= r5or6_last_row_offset
/ strip_size
;
2174 if (r5or6_first_column
!= r5or6_last_column
)
2175 return PQI_RAID_BYPASS_INELIGIBLE
;
2177 /* Request is eligible */
2179 ((u32
)(first_row
>> raid_map
->parity_rotation_shift
)) %
2180 get_unaligned_le16(&raid_map
->row_cnt
);
2182 map_index
= (first_group
*
2183 (get_unaligned_le16(&raid_map
->row_cnt
) *
2184 total_disks_per_row
)) +
2185 (map_row
* total_disks_per_row
) + first_column
;
2188 if (unlikely(map_index
>= RAID_MAP_MAX_ENTRIES
))
2189 return PQI_RAID_BYPASS_INELIGIBLE
;
2191 aio_handle
= raid_map
->disk_data
[map_index
].aio_handle
;
2192 disk_block
= get_unaligned_le64(&raid_map
->disk_starting_blk
) +
2193 first_row
* strip_size
+
2194 (first_row_offset
- first_column
* strip_size
);
2195 disk_block_cnt
= block_cnt
;
2197 /* Handle differing logical/physical block sizes. */
2198 if (raid_map
->phys_blk_shift
) {
2199 disk_block
<<= raid_map
->phys_blk_shift
;
2200 disk_block_cnt
<<= raid_map
->phys_blk_shift
;
2203 if (unlikely(disk_block_cnt
> 0xffff))
2204 return PQI_RAID_BYPASS_INELIGIBLE
;
2206 /* Build the new CDB for the physical disk I/O. */
2207 if (disk_block
> 0xffffffff) {
2208 cdb
[0] = is_write
? WRITE_16
: READ_16
;
2210 put_unaligned_be64(disk_block
, &cdb
[2]);
2211 put_unaligned_be32(disk_block_cnt
, &cdb
[10]);
2216 cdb
[0] = is_write
? WRITE_10
: READ_10
;
2218 put_unaligned_be32((u32
)disk_block
, &cdb
[2]);
2220 put_unaligned_be16((u16
)disk_block_cnt
, &cdb
[7]);
2225 if (get_unaligned_le16(&raid_map
->flags
) &
2226 RAID_MAP_ENCRYPTION_ENABLED
) {
2227 pqi_set_encryption_info(&encryption_info
, raid_map
,
2229 encryption_info_ptr
= &encryption_info
;
2231 encryption_info_ptr
= NULL
;
2234 return pqi_aio_submit_io(ctrl_info
, scmd
, aio_handle
,
2235 cdb
, cdb_length
, queue_group
, encryption_info_ptr
);
2238 #define PQI_STATUS_IDLE 0x0
2240 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2241 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2243 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2244 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2245 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2246 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2247 #define PQI_DEVICE_STATE_ERROR 0x4
2249 #define PQI_MODE_READY_TIMEOUT_SECS 30
2250 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2252 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info
*ctrl_info
)
2254 struct pqi_device_registers __iomem
*pqi_registers
;
2255 unsigned long timeout
;
2259 pqi_registers
= ctrl_info
->pqi_registers
;
2260 timeout
= (PQI_MODE_READY_TIMEOUT_SECS
* HZ
) + jiffies
;
2263 signature
= readq(&pqi_registers
->signature
);
2264 if (memcmp(&signature
, PQI_DEVICE_SIGNATURE
,
2265 sizeof(signature
)) == 0)
2267 if (time_after(jiffies
, timeout
)) {
2268 dev_err(&ctrl_info
->pci_dev
->dev
,
2269 "timed out waiting for PQI signature\n");
2272 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2276 status
= readb(&pqi_registers
->function_and_status_code
);
2277 if (status
== PQI_STATUS_IDLE
)
2279 if (time_after(jiffies
, timeout
)) {
2280 dev_err(&ctrl_info
->pci_dev
->dev
,
2281 "timed out waiting for PQI IDLE\n");
2284 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2288 if (readl(&pqi_registers
->device_status
) ==
2289 PQI_DEVICE_STATE_ALL_REGISTERS_READY
)
2291 if (time_after(jiffies
, timeout
)) {
2292 dev_err(&ctrl_info
->pci_dev
->dev
,
2293 "timed out waiting for PQI all registers ready\n");
2296 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2302 static inline void pqi_aio_path_disabled(struct pqi_io_request
*io_request
)
2304 struct pqi_scsi_dev
*device
;
2306 device
= io_request
->scmd
->device
->hostdata
;
2307 device
->offload_enabled
= false;
2310 static inline void pqi_take_device_offline(struct scsi_device
*sdev
)
2312 struct pqi_ctrl_info
*ctrl_info
;
2313 struct pqi_scsi_dev
*device
;
2315 if (scsi_device_online(sdev
)) {
2316 scsi_device_set_state(sdev
, SDEV_OFFLINE
);
2317 ctrl_info
= shost_to_hba(sdev
->host
);
2318 schedule_delayed_work(&ctrl_info
->rescan_work
, 0);
2319 device
= sdev
->hostdata
;
2320 dev_err(&ctrl_info
->pci_dev
->dev
, "offlined scsi %d:%d:%d:%d\n",
2321 ctrl_info
->scsi_host
->host_no
, device
->bus
,
2322 device
->target
, device
->lun
);
2326 static void pqi_process_raid_io_error(struct pqi_io_request
*io_request
)
2330 struct scsi_cmnd
*scmd
;
2331 struct pqi_raid_error_info
*error_info
;
2332 size_t sense_data_length
;
2335 struct scsi_sense_hdr sshdr
;
2337 scmd
= io_request
->scmd
;
2341 error_info
= io_request
->error_info
;
2342 scsi_status
= error_info
->status
;
2345 if (error_info
->data_out_result
== PQI_DATA_IN_OUT_UNDERFLOW
) {
2347 get_unaligned_le32(&error_info
->data_out_transferred
);
2348 residual_count
= scsi_bufflen(scmd
) - xfer_count
;
2349 scsi_set_resid(scmd
, residual_count
);
2350 if (xfer_count
< scmd
->underflow
)
2351 host_byte
= DID_SOFT_ERROR
;
2354 sense_data_length
= get_unaligned_le16(&error_info
->sense_data_length
);
2355 if (sense_data_length
== 0)
2357 get_unaligned_le16(&error_info
->response_data_length
);
2358 if (sense_data_length
) {
2359 if (sense_data_length
> sizeof(error_info
->data
))
2360 sense_data_length
= sizeof(error_info
->data
);
2362 if (scsi_status
== SAM_STAT_CHECK_CONDITION
&&
2363 scsi_normalize_sense(error_info
->data
,
2364 sense_data_length
, &sshdr
) &&
2365 sshdr
.sense_key
== HARDWARE_ERROR
&&
2366 sshdr
.asc
== 0x3e &&
2367 sshdr
.ascq
== 0x1) {
2368 pqi_take_device_offline(scmd
->device
);
2369 host_byte
= DID_NO_CONNECT
;
2372 if (sense_data_length
> SCSI_SENSE_BUFFERSIZE
)
2373 sense_data_length
= SCSI_SENSE_BUFFERSIZE
;
2374 memcpy(scmd
->sense_buffer
, error_info
->data
,
2378 scmd
->result
= scsi_status
;
2379 set_host_byte(scmd
, host_byte
);
2382 static void pqi_process_aio_io_error(struct pqi_io_request
*io_request
)
2386 struct scsi_cmnd
*scmd
;
2387 struct pqi_aio_error_info
*error_info
;
2388 size_t sense_data_length
;
2391 bool device_offline
;
2393 scmd
= io_request
->scmd
;
2394 error_info
= io_request
->error_info
;
2396 sense_data_length
= 0;
2397 device_offline
= false;
2399 switch (error_info
->service_response
) {
2400 case PQI_AIO_SERV_RESPONSE_COMPLETE
:
2401 scsi_status
= error_info
->status
;
2403 case PQI_AIO_SERV_RESPONSE_FAILURE
:
2404 switch (error_info
->status
) {
2405 case PQI_AIO_STATUS_IO_ABORTED
:
2406 scsi_status
= SAM_STAT_TASK_ABORTED
;
2408 case PQI_AIO_STATUS_UNDERRUN
:
2409 scsi_status
= SAM_STAT_GOOD
;
2410 residual_count
= get_unaligned_le32(
2411 &error_info
->residual_count
);
2412 scsi_set_resid(scmd
, residual_count
);
2413 xfer_count
= scsi_bufflen(scmd
) - residual_count
;
2414 if (xfer_count
< scmd
->underflow
)
2415 host_byte
= DID_SOFT_ERROR
;
2417 case PQI_AIO_STATUS_OVERRUN
:
2418 scsi_status
= SAM_STAT_GOOD
;
2420 case PQI_AIO_STATUS_AIO_PATH_DISABLED
:
2421 pqi_aio_path_disabled(io_request
);
2422 scsi_status
= SAM_STAT_GOOD
;
2423 io_request
->status
= -EAGAIN
;
2425 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE
:
2426 case PQI_AIO_STATUS_INVALID_DEVICE
:
2427 device_offline
= true;
2428 pqi_take_device_offline(scmd
->device
);
2429 host_byte
= DID_NO_CONNECT
;
2430 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2432 case PQI_AIO_STATUS_IO_ERROR
:
2434 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2438 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE
:
2439 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED
:
2440 scsi_status
= SAM_STAT_GOOD
;
2442 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED
:
2443 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN
:
2445 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2449 if (error_info
->data_present
) {
2451 get_unaligned_le16(&error_info
->data_length
);
2452 if (sense_data_length
) {
2453 if (sense_data_length
> sizeof(error_info
->data
))
2454 sense_data_length
= sizeof(error_info
->data
);
2455 if (sense_data_length
> SCSI_SENSE_BUFFERSIZE
)
2456 sense_data_length
= SCSI_SENSE_BUFFERSIZE
;
2457 memcpy(scmd
->sense_buffer
, error_info
->data
,
2462 if (device_offline
&& sense_data_length
== 0)
2463 scsi_build_sense_buffer(0, scmd
->sense_buffer
, HARDWARE_ERROR
,
2466 scmd
->result
= scsi_status
;
2467 set_host_byte(scmd
, host_byte
);
2470 static void pqi_process_io_error(unsigned int iu_type
,
2471 struct pqi_io_request
*io_request
)
2474 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR
:
2475 pqi_process_raid_io_error(io_request
);
2477 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR
:
2478 pqi_process_aio_io_error(io_request
);
2483 static int pqi_interpret_task_management_response(
2484 struct pqi_task_management_response
*response
)
2488 switch (response
->response_code
) {
2489 case SOP_TMF_COMPLETE
:
2490 case SOP_TMF_FUNCTION_SUCCEEDED
:
2501 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info
*ctrl_info
,
2502 struct pqi_queue_group
*queue_group
)
2504 unsigned int num_responses
;
2507 struct pqi_io_request
*io_request
;
2508 struct pqi_io_response
*response
;
2512 oq_ci
= queue_group
->oq_ci_copy
;
2515 oq_pi
= *queue_group
->oq_pi
;
2520 response
= queue_group
->oq_element_array
+
2521 (oq_ci
* PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
);
2523 request_id
= get_unaligned_le16(&response
->request_id
);
2524 WARN_ON(request_id
>= ctrl_info
->max_io_slots
);
2526 io_request
= &ctrl_info
->io_request_pool
[request_id
];
2527 WARN_ON(atomic_read(&io_request
->refcount
) == 0);
2529 switch (response
->header
.iu_type
) {
2530 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS
:
2531 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS
:
2532 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT
:
2534 case PQI_RESPONSE_IU_TASK_MANAGEMENT
:
2535 io_request
->status
=
2536 pqi_interpret_task_management_response(
2539 case PQI_RESPONSE_IU_AIO_PATH_DISABLED
:
2540 pqi_aio_path_disabled(io_request
);
2541 io_request
->status
= -EAGAIN
;
2543 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR
:
2544 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR
:
2545 io_request
->error_info
= ctrl_info
->error_buffer
+
2546 (get_unaligned_le16(&response
->error_index
) *
2547 PQI_ERROR_BUFFER_ELEMENT_LENGTH
);
2548 pqi_process_io_error(response
->header
.iu_type
,
2552 dev_err(&ctrl_info
->pci_dev
->dev
,
2553 "unexpected IU type: 0x%x\n",
2554 response
->header
.iu_type
);
2555 WARN_ON(response
->header
.iu_type
);
2559 io_request
->io_complete_callback(io_request
,
2560 io_request
->context
);
2563 * Note that the I/O request structure CANNOT BE TOUCHED after
2564 * returning from the I/O completion callback!
2567 oq_ci
= (oq_ci
+ 1) % ctrl_info
->num_elements_per_oq
;
2570 if (num_responses
) {
2571 queue_group
->oq_ci_copy
= oq_ci
;
2572 writel(oq_ci
, queue_group
->oq_ci
);
2575 return num_responses
;
2578 static inline unsigned int pqi_num_elements_free(unsigned int pi
,
2580 unsigned int elements_in_queue
)
2582 unsigned int num_elements_used
;
2585 num_elements_used
= pi
- ci
;
2587 num_elements_used
= elements_in_queue
- ci
+ pi
;
2589 return elements_in_queue
- num_elements_used
- 1;
2592 #define PQI_EVENT_ACK_TIMEOUT 30
2594 static void pqi_start_event_ack(struct pqi_ctrl_info
*ctrl_info
,
2595 struct pqi_event_acknowledge_request
*iu
, size_t iu_length
)
2599 unsigned long flags
;
2601 unsigned long timeout
;
2602 struct pqi_queue_group
*queue_group
;
2604 queue_group
= &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
];
2605 put_unaligned_le16(queue_group
->oq_id
, &iu
->header
.response_queue_id
);
2607 timeout
= (PQI_EVENT_ACK_TIMEOUT
* HZ
) + jiffies
;
2610 spin_lock_irqsave(&queue_group
->submit_lock
[RAID_PATH
], flags
);
2612 iq_pi
= queue_group
->iq_pi_copy
[RAID_PATH
];
2613 iq_ci
= *queue_group
->iq_ci
[RAID_PATH
];
2615 if (pqi_num_elements_free(iq_pi
, iq_ci
,
2616 ctrl_info
->num_elements_per_iq
))
2619 spin_unlock_irqrestore(
2620 &queue_group
->submit_lock
[RAID_PATH
], flags
);
2622 if (time_after(jiffies
, timeout
)) {
2623 dev_err(&ctrl_info
->pci_dev
->dev
,
2624 "sending event acknowledge timed out\n");
2629 next_element
= queue_group
->iq_element_array
[RAID_PATH
] +
2630 (iq_pi
* PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
2632 memcpy(next_element
, iu
, iu_length
);
2634 iq_pi
= (iq_pi
+ 1) % ctrl_info
->num_elements_per_iq
;
2636 queue_group
->iq_pi_copy
[RAID_PATH
] = iq_pi
;
2639 * This write notifies the controller that an IU is available to be
2642 writel(iq_pi
, queue_group
->iq_pi
[RAID_PATH
]);
2644 spin_unlock_irqrestore(&queue_group
->submit_lock
[RAID_PATH
], flags
);
2647 * We have to special-case this type of request because the firmware
2648 * does not generate an interrupt when this type of request completes.
2649 * Therefore, we have to poll until we see that the firmware has
2650 * consumed the request before we move on.
2653 timeout
= (PQI_EVENT_ACK_TIMEOUT
* HZ
) + jiffies
;
2656 if (*queue_group
->iq_ci
[RAID_PATH
] == iq_pi
)
2658 if (time_after(jiffies
, timeout
)) {
2659 dev_err(&ctrl_info
->pci_dev
->dev
,
2660 "completing event acknowledge timed out\n");
2663 usleep_range(1000, 2000);
2667 static void pqi_acknowledge_event(struct pqi_ctrl_info
*ctrl_info
,
2668 struct pqi_event
*event
)
2670 struct pqi_event_acknowledge_request request
;
2672 memset(&request
, 0, sizeof(request
));
2674 request
.header
.iu_type
= PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT
;
2675 put_unaligned_le16(sizeof(request
) - PQI_REQUEST_HEADER_LENGTH
,
2676 &request
.header
.iu_length
);
2677 request
.event_type
= event
->event_type
;
2678 request
.event_id
= event
->event_id
;
2679 request
.additional_event_id
= event
->additional_event_id
;
2681 pqi_start_event_ack(ctrl_info
, &request
, sizeof(request
));
2684 static void pqi_event_worker(struct work_struct
*work
)
2687 struct pqi_ctrl_info
*ctrl_info
;
2688 struct pqi_event
*pending_event
;
2689 bool got_non_heartbeat_event
= false;
2691 ctrl_info
= container_of(work
, struct pqi_ctrl_info
, event_work
);
2693 pending_event
= ctrl_info
->pending_events
;
2694 for (i
= 0; i
< PQI_NUM_SUPPORTED_EVENTS
; i
++) {
2695 if (pending_event
->pending
) {
2696 pending_event
->pending
= false;
2697 pqi_acknowledge_event(ctrl_info
, pending_event
);
2698 if (i
!= PQI_EVENT_HEARTBEAT
)
2699 got_non_heartbeat_event
= true;
2704 if (got_non_heartbeat_event
)
2705 pqi_schedule_rescan_worker(ctrl_info
);
2708 static void pqi_take_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
)
2712 struct pqi_queue_group
*queue_group
;
2713 unsigned long flags
;
2714 struct pqi_io_request
*io_request
;
2715 struct pqi_io_request
*next
;
2716 struct scsi_cmnd
*scmd
;
2718 ctrl_info
->controller_online
= false;
2719 dev_err(&ctrl_info
->pci_dev
->dev
, "controller offline\n");
2721 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
2722 queue_group
= &ctrl_info
->queue_groups
[i
];
2724 for (path
= 0; path
< 2; path
++) {
2726 &queue_group
->submit_lock
[path
], flags
);
2728 list_for_each_entry_safe(io_request
, next
,
2729 &queue_group
->request_list
[path
],
2730 request_list_entry
) {
2732 scmd
= io_request
->scmd
;
2734 set_host_byte(scmd
, DID_NO_CONNECT
);
2735 pqi_scsi_done(scmd
);
2738 list_del(&io_request
->request_list_entry
);
2741 spin_unlock_irqrestore(
2742 &queue_group
->submit_lock
[path
], flags
);
2747 #define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2748 #define PQI_MAX_HEARTBEAT_REQUESTS 5
2750 static void pqi_heartbeat_timer_handler(unsigned long data
)
2753 struct pqi_ctrl_info
*ctrl_info
= (struct pqi_ctrl_info
*)data
;
2755 num_interrupts
= atomic_read(&ctrl_info
->num_interrupts
);
2757 if (num_interrupts
== ctrl_info
->previous_num_interrupts
) {
2758 ctrl_info
->num_heartbeats_requested
++;
2759 if (ctrl_info
->num_heartbeats_requested
>
2760 PQI_MAX_HEARTBEAT_REQUESTS
) {
2761 pqi_take_ctrl_offline(ctrl_info
);
2764 ctrl_info
->pending_events
[PQI_EVENT_HEARTBEAT
].pending
= true;
2765 schedule_work(&ctrl_info
->event_work
);
2767 ctrl_info
->num_heartbeats_requested
= 0;
2770 ctrl_info
->previous_num_interrupts
= num_interrupts
;
2771 mod_timer(&ctrl_info
->heartbeat_timer
,
2772 jiffies
+ PQI_HEARTBEAT_TIMER_INTERVAL
);
2775 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info
*ctrl_info
)
2777 ctrl_info
->previous_num_interrupts
=
2778 atomic_read(&ctrl_info
->num_interrupts
);
2780 init_timer(&ctrl_info
->heartbeat_timer
);
2781 ctrl_info
->heartbeat_timer
.expires
=
2782 jiffies
+ PQI_HEARTBEAT_TIMER_INTERVAL
;
2783 ctrl_info
->heartbeat_timer
.data
= (unsigned long)ctrl_info
;
2784 ctrl_info
->heartbeat_timer
.function
= pqi_heartbeat_timer_handler
;
2785 add_timer(&ctrl_info
->heartbeat_timer
);
2786 ctrl_info
->heartbeat_timer_started
= true;
2789 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info
*ctrl_info
)
2791 if (ctrl_info
->heartbeat_timer_started
)
2792 del_timer_sync(&ctrl_info
->heartbeat_timer
);
2795 static int pqi_event_type_to_event_index(unsigned int event_type
)
2799 switch (event_type
) {
2800 case PQI_EVENT_TYPE_HEARTBEAT
:
2801 index
= PQI_EVENT_HEARTBEAT
;
2803 case PQI_EVENT_TYPE_HOTPLUG
:
2804 index
= PQI_EVENT_HOTPLUG
;
2806 case PQI_EVENT_TYPE_HARDWARE
:
2807 index
= PQI_EVENT_HARDWARE
;
2809 case PQI_EVENT_TYPE_PHYSICAL_DEVICE
:
2810 index
= PQI_EVENT_PHYSICAL_DEVICE
;
2812 case PQI_EVENT_TYPE_LOGICAL_DEVICE
:
2813 index
= PQI_EVENT_LOGICAL_DEVICE
;
2815 case PQI_EVENT_TYPE_AIO_STATE_CHANGE
:
2816 index
= PQI_EVENT_AIO_STATE_CHANGE
;
2818 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE
:
2819 index
= PQI_EVENT_AIO_CONFIG_CHANGE
;
2829 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info
*ctrl_info
)
2831 unsigned int num_events
;
2834 struct pqi_event_queue
*event_queue
;
2835 struct pqi_event_response
*response
;
2836 struct pqi_event
*pending_event
;
2837 bool need_delayed_work
;
2840 event_queue
= &ctrl_info
->event_queue
;
2842 need_delayed_work
= false;
2843 oq_ci
= event_queue
->oq_ci_copy
;
2846 oq_pi
= *event_queue
->oq_pi
;
2851 response
= event_queue
->oq_element_array
+
2852 (oq_ci
* PQI_EVENT_OQ_ELEMENT_LENGTH
);
2855 pqi_event_type_to_event_index(response
->event_type
);
2857 if (event_index
>= 0) {
2858 if (response
->request_acknowlege
) {
2860 &ctrl_info
->pending_events
[event_index
];
2861 pending_event
->event_type
=
2862 response
->event_type
;
2863 pending_event
->event_id
= response
->event_id
;
2864 pending_event
->additional_event_id
=
2865 response
->additional_event_id
;
2866 if (event_index
!= PQI_EVENT_HEARTBEAT
) {
2867 pending_event
->pending
= true;
2868 need_delayed_work
= true;
2873 oq_ci
= (oq_ci
+ 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS
;
2877 event_queue
->oq_ci_copy
= oq_ci
;
2878 writel(oq_ci
, event_queue
->oq_ci
);
2880 if (need_delayed_work
)
2881 schedule_work(&ctrl_info
->event_work
);
2887 static irqreturn_t
pqi_irq_handler(int irq
, void *data
)
2889 struct pqi_ctrl_info
*ctrl_info
;
2890 struct pqi_queue_group
*queue_group
;
2891 unsigned int num_responses_handled
;
2894 ctrl_info
= queue_group
->ctrl_info
;
2896 if (!ctrl_info
|| !queue_group
->oq_ci
)
2899 num_responses_handled
= pqi_process_io_intr(ctrl_info
, queue_group
);
2901 if (irq
== ctrl_info
->event_irq
)
2902 num_responses_handled
+= pqi_process_event_intr(ctrl_info
);
2904 if (num_responses_handled
)
2905 atomic_inc(&ctrl_info
->num_interrupts
);
2907 pqi_start_io(ctrl_info
, queue_group
, RAID_PATH
, NULL
);
2908 pqi_start_io(ctrl_info
, queue_group
, AIO_PATH
, NULL
);
2913 static int pqi_request_irqs(struct pqi_ctrl_info
*ctrl_info
)
2918 ctrl_info
->event_irq
= ctrl_info
->msix_vectors
[0];
2920 for (i
= 0; i
< ctrl_info
->num_msix_vectors_enabled
; i
++) {
2921 rc
= request_irq(ctrl_info
->msix_vectors
[i
],
2923 DRIVER_NAME_SHORT
, ctrl_info
->intr_data
[i
]);
2925 dev_err(&ctrl_info
->pci_dev
->dev
,
2926 "irq %u init failed with error %d\n",
2927 ctrl_info
->msix_vectors
[i
], rc
);
2930 ctrl_info
->num_msix_vectors_initialized
++;
2936 static void pqi_free_irqs(struct pqi_ctrl_info
*ctrl_info
)
2940 for (i
= 0; i
< ctrl_info
->num_msix_vectors_initialized
; i
++)
2941 free_irq(ctrl_info
->msix_vectors
[i
],
2942 ctrl_info
->intr_data
[i
]);
2945 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info
*ctrl_info
)
2949 int num_vectors_enabled
;
2950 struct msix_entry msix_entries
[PQI_MAX_MSIX_VECTORS
];
2952 max_vectors
= ctrl_info
->num_queue_groups
;
2954 for (i
= 0; i
< max_vectors
; i
++)
2955 msix_entries
[i
].entry
= i
;
2957 num_vectors_enabled
= pci_enable_msix_range(ctrl_info
->pci_dev
,
2958 msix_entries
, PQI_MIN_MSIX_VECTORS
, max_vectors
);
2960 if (num_vectors_enabled
< 0) {
2961 dev_err(&ctrl_info
->pci_dev
->dev
,
2962 "MSI-X init failed with error %d\n",
2963 num_vectors_enabled
);
2964 return num_vectors_enabled
;
2967 ctrl_info
->num_msix_vectors_enabled
= num_vectors_enabled
;
2968 for (i
= 0; i
< num_vectors_enabled
; i
++) {
2969 ctrl_info
->msix_vectors
[i
] = msix_entries
[i
].vector
;
2970 ctrl_info
->intr_data
[i
] = &ctrl_info
->queue_groups
[i
];
2976 static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info
*ctrl_info
)
2982 cpu
= cpumask_first(cpu_online_mask
);
2983 for (i
= 0; i
< ctrl_info
->num_msix_vectors_initialized
; i
++) {
2984 rc
= irq_set_affinity_hint(ctrl_info
->msix_vectors
[i
],
2987 dev_err(&ctrl_info
->pci_dev
->dev
,
2988 "error %d setting affinity hint for irq vector %u\n",
2989 rc
, ctrl_info
->msix_vectors
[i
]);
2990 cpu
= cpumask_next(cpu
, cpu_online_mask
);
2994 static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info
*ctrl_info
)
2998 for (i
= 0; i
< ctrl_info
->num_msix_vectors_initialized
; i
++)
2999 irq_set_affinity_hint(ctrl_info
->msix_vectors
[i
], NULL
);
3002 static int pqi_alloc_operational_queues(struct pqi_ctrl_info
*ctrl_info
)
3005 size_t alloc_length
;
3006 size_t element_array_length_per_iq
;
3007 size_t element_array_length_per_oq
;
3008 void *element_array
;
3009 void *next_queue_index
;
3010 void *aligned_pointer
;
3011 unsigned int num_inbound_queues
;
3012 unsigned int num_outbound_queues
;
3013 unsigned int num_queue_indexes
;
3014 struct pqi_queue_group
*queue_group
;
3016 element_array_length_per_iq
=
3017 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
*
3018 ctrl_info
->num_elements_per_iq
;
3019 element_array_length_per_oq
=
3020 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
*
3021 ctrl_info
->num_elements_per_oq
;
3022 num_inbound_queues
= ctrl_info
->num_queue_groups
* 2;
3023 num_outbound_queues
= ctrl_info
->num_queue_groups
;
3024 num_queue_indexes
= (ctrl_info
->num_queue_groups
* 3) + 1;
3026 aligned_pointer
= NULL
;
3028 for (i
= 0; i
< num_inbound_queues
; i
++) {
3029 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
3030 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3031 aligned_pointer
+= element_array_length_per_iq
;
3034 for (i
= 0; i
< num_outbound_queues
; i
++) {
3035 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
3036 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3037 aligned_pointer
+= element_array_length_per_oq
;
3040 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
3041 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3042 aligned_pointer
+= PQI_NUM_EVENT_QUEUE_ELEMENTS
*
3043 PQI_EVENT_OQ_ELEMENT_LENGTH
;
3045 for (i
= 0; i
< num_queue_indexes
; i
++) {
3046 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
3047 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3048 aligned_pointer
+= sizeof(pqi_index_t
);
3051 alloc_length
= (size_t)aligned_pointer
+
3052 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
;
3054 ctrl_info
->queue_memory_base
=
3055 dma_zalloc_coherent(&ctrl_info
->pci_dev
->dev
,
3057 &ctrl_info
->queue_memory_base_dma_handle
, GFP_KERNEL
);
3059 if (!ctrl_info
->queue_memory_base
) {
3060 dev_err(&ctrl_info
->pci_dev
->dev
,
3061 "failed to allocate memory for PQI admin queues\n");
3065 ctrl_info
->queue_memory_length
= alloc_length
;
3067 element_array
= PTR_ALIGN(ctrl_info
->queue_memory_base
,
3068 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3070 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3071 queue_group
= &ctrl_info
->queue_groups
[i
];
3072 queue_group
->iq_element_array
[RAID_PATH
] = element_array
;
3073 queue_group
->iq_element_array_bus_addr
[RAID_PATH
] =
3074 ctrl_info
->queue_memory_base_dma_handle
+
3075 (element_array
- ctrl_info
->queue_memory_base
);
3076 element_array
+= element_array_length_per_iq
;
3077 element_array
= PTR_ALIGN(element_array
,
3078 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3079 queue_group
->iq_element_array
[AIO_PATH
] = element_array
;
3080 queue_group
->iq_element_array_bus_addr
[AIO_PATH
] =
3081 ctrl_info
->queue_memory_base_dma_handle
+
3082 (element_array
- ctrl_info
->queue_memory_base
);
3083 element_array
+= element_array_length_per_iq
;
3084 element_array
= PTR_ALIGN(element_array
,
3085 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3088 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3089 queue_group
= &ctrl_info
->queue_groups
[i
];
3090 queue_group
->oq_element_array
= element_array
;
3091 queue_group
->oq_element_array_bus_addr
=
3092 ctrl_info
->queue_memory_base_dma_handle
+
3093 (element_array
- ctrl_info
->queue_memory_base
);
3094 element_array
+= element_array_length_per_oq
;
3095 element_array
= PTR_ALIGN(element_array
,
3096 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3099 ctrl_info
->event_queue
.oq_element_array
= element_array
;
3100 ctrl_info
->event_queue
.oq_element_array_bus_addr
=
3101 ctrl_info
->queue_memory_base_dma_handle
+
3102 (element_array
- ctrl_info
->queue_memory_base
);
3103 element_array
+= PQI_NUM_EVENT_QUEUE_ELEMENTS
*
3104 PQI_EVENT_OQ_ELEMENT_LENGTH
;
3106 next_queue_index
= PTR_ALIGN(element_array
,
3107 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3109 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3110 queue_group
= &ctrl_info
->queue_groups
[i
];
3111 queue_group
->iq_ci
[RAID_PATH
] = next_queue_index
;
3112 queue_group
->iq_ci_bus_addr
[RAID_PATH
] =
3113 ctrl_info
->queue_memory_base_dma_handle
+
3114 (next_queue_index
- ctrl_info
->queue_memory_base
);
3115 next_queue_index
+= sizeof(pqi_index_t
);
3116 next_queue_index
= PTR_ALIGN(next_queue_index
,
3117 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3118 queue_group
->iq_ci
[AIO_PATH
] = next_queue_index
;
3119 queue_group
->iq_ci_bus_addr
[AIO_PATH
] =
3120 ctrl_info
->queue_memory_base_dma_handle
+
3121 (next_queue_index
- ctrl_info
->queue_memory_base
);
3122 next_queue_index
+= sizeof(pqi_index_t
);
3123 next_queue_index
= PTR_ALIGN(next_queue_index
,
3124 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3125 queue_group
->oq_pi
= next_queue_index
;
3126 queue_group
->oq_pi_bus_addr
=
3127 ctrl_info
->queue_memory_base_dma_handle
+
3128 (next_queue_index
- ctrl_info
->queue_memory_base
);
3129 next_queue_index
+= sizeof(pqi_index_t
);
3130 next_queue_index
= PTR_ALIGN(next_queue_index
,
3131 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3134 ctrl_info
->event_queue
.oq_pi
= next_queue_index
;
3135 ctrl_info
->event_queue
.oq_pi_bus_addr
=
3136 ctrl_info
->queue_memory_base_dma_handle
+
3137 (next_queue_index
- ctrl_info
->queue_memory_base
);
3142 static void pqi_init_operational_queues(struct pqi_ctrl_info
*ctrl_info
)
3145 u16 next_iq_id
= PQI_MIN_OPERATIONAL_QUEUE_ID
;
3146 u16 next_oq_id
= PQI_MIN_OPERATIONAL_QUEUE_ID
;
3149 * Initialize the backpointers to the controller structure in
3150 * each operational queue group structure.
3152 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++)
3153 ctrl_info
->queue_groups
[i
].ctrl_info
= ctrl_info
;
3156 * Assign IDs to all operational queues. Note that the IDs
3157 * assigned to operational IQs are independent of the IDs
3158 * assigned to operational OQs.
3160 ctrl_info
->event_queue
.oq_id
= next_oq_id
++;
3161 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3162 ctrl_info
->queue_groups
[i
].iq_id
[RAID_PATH
] = next_iq_id
++;
3163 ctrl_info
->queue_groups
[i
].iq_id
[AIO_PATH
] = next_iq_id
++;
3164 ctrl_info
->queue_groups
[i
].oq_id
= next_oq_id
++;
3168 * Assign MSI-X table entry indexes to all queues. Note that the
3169 * interrupt for the event queue is shared with the first queue group.
3171 ctrl_info
->event_queue
.int_msg_num
= 0;
3172 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++)
3173 ctrl_info
->queue_groups
[i
].int_msg_num
= i
;
3175 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3176 spin_lock_init(&ctrl_info
->queue_groups
[i
].submit_lock
[0]);
3177 spin_lock_init(&ctrl_info
->queue_groups
[i
].submit_lock
[1]);
3178 INIT_LIST_HEAD(&ctrl_info
->queue_groups
[i
].request_list
[0]);
3179 INIT_LIST_HEAD(&ctrl_info
->queue_groups
[i
].request_list
[1]);
3183 static int pqi_alloc_admin_queues(struct pqi_ctrl_info
*ctrl_info
)
3185 size_t alloc_length
;
3186 struct pqi_admin_queues_aligned
*admin_queues_aligned
;
3187 struct pqi_admin_queues
*admin_queues
;
3189 alloc_length
= sizeof(struct pqi_admin_queues_aligned
) +
3190 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
;
3192 ctrl_info
->admin_queue_memory_base
=
3193 dma_zalloc_coherent(&ctrl_info
->pci_dev
->dev
,
3195 &ctrl_info
->admin_queue_memory_base_dma_handle
,
3198 if (!ctrl_info
->admin_queue_memory_base
)
3201 ctrl_info
->admin_queue_memory_length
= alloc_length
;
3203 admin_queues
= &ctrl_info
->admin_queues
;
3204 admin_queues_aligned
= PTR_ALIGN(ctrl_info
->admin_queue_memory_base
,
3205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3206 admin_queues
->iq_element_array
=
3207 &admin_queues_aligned
->iq_element_array
;
3208 admin_queues
->oq_element_array
=
3209 &admin_queues_aligned
->oq_element_array
;
3210 admin_queues
->iq_ci
= &admin_queues_aligned
->iq_ci
;
3211 admin_queues
->oq_pi
= &admin_queues_aligned
->oq_pi
;
3213 admin_queues
->iq_element_array_bus_addr
=
3214 ctrl_info
->admin_queue_memory_base_dma_handle
+
3215 (admin_queues
->iq_element_array
-
3216 ctrl_info
->admin_queue_memory_base
);
3217 admin_queues
->oq_element_array_bus_addr
=
3218 ctrl_info
->admin_queue_memory_base_dma_handle
+
3219 (admin_queues
->oq_element_array
-
3220 ctrl_info
->admin_queue_memory_base
);
3221 admin_queues
->iq_ci_bus_addr
=
3222 ctrl_info
->admin_queue_memory_base_dma_handle
+
3223 ((void *)admin_queues
->iq_ci
-
3224 ctrl_info
->admin_queue_memory_base
);
3225 admin_queues
->oq_pi_bus_addr
=
3226 ctrl_info
->admin_queue_memory_base_dma_handle
+
3227 ((void *)admin_queues
->oq_pi
-
3228 ctrl_info
->admin_queue_memory_base
);
3233 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3234 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3236 static int pqi_create_admin_queues(struct pqi_ctrl_info
*ctrl_info
)
3238 struct pqi_device_registers __iomem
*pqi_registers
;
3239 struct pqi_admin_queues
*admin_queues
;
3240 unsigned long timeout
;
3244 pqi_registers
= ctrl_info
->pqi_registers
;
3245 admin_queues
= &ctrl_info
->admin_queues
;
3247 writeq((u64
)admin_queues
->iq_element_array_bus_addr
,
3248 &pqi_registers
->admin_iq_element_array_addr
);
3249 writeq((u64
)admin_queues
->oq_element_array_bus_addr
,
3250 &pqi_registers
->admin_oq_element_array_addr
);
3251 writeq((u64
)admin_queues
->iq_ci_bus_addr
,
3252 &pqi_registers
->admin_iq_ci_addr
);
3253 writeq((u64
)admin_queues
->oq_pi_bus_addr
,
3254 &pqi_registers
->admin_oq_pi_addr
);
3256 reg
= PQI_ADMIN_IQ_NUM_ELEMENTS
|
3257 (PQI_ADMIN_OQ_NUM_ELEMENTS
) << 8 |
3258 (admin_queues
->int_msg_num
<< 16);
3259 writel(reg
, &pqi_registers
->admin_iq_num_elements
);
3260 writel(PQI_CREATE_ADMIN_QUEUE_PAIR
,
3261 &pqi_registers
->function_and_status_code
);
3263 timeout
= PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES
+ jiffies
;
3265 status
= readb(&pqi_registers
->function_and_status_code
);
3266 if (status
== PQI_STATUS_IDLE
)
3268 if (time_after(jiffies
, timeout
))
3270 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS
);
3274 * The offset registers are not initialized to the correct
3275 * offsets until *after* the create admin queue pair command
3276 * completes successfully.
3278 admin_queues
->iq_pi
= ctrl_info
->iomem_base
+
3279 PQI_DEVICE_REGISTERS_OFFSET
+
3280 readq(&pqi_registers
->admin_iq_pi_offset
);
3281 admin_queues
->oq_ci
= ctrl_info
->iomem_base
+
3282 PQI_DEVICE_REGISTERS_OFFSET
+
3283 readq(&pqi_registers
->admin_oq_ci_offset
);
3288 static void pqi_submit_admin_request(struct pqi_ctrl_info
*ctrl_info
,
3289 struct pqi_general_admin_request
*request
)
3291 struct pqi_admin_queues
*admin_queues
;
3295 admin_queues
= &ctrl_info
->admin_queues
;
3296 iq_pi
= admin_queues
->iq_pi_copy
;
3298 next_element
= admin_queues
->iq_element_array
+
3299 (iq_pi
* PQI_ADMIN_IQ_ELEMENT_LENGTH
);
3301 memcpy(next_element
, request
, sizeof(*request
));
3303 iq_pi
= (iq_pi
+ 1) % PQI_ADMIN_IQ_NUM_ELEMENTS
;
3304 admin_queues
->iq_pi_copy
= iq_pi
;
3307 * This write notifies the controller that an IU is available to be
3310 writel(iq_pi
, admin_queues
->iq_pi
);
3313 static int pqi_poll_for_admin_response(struct pqi_ctrl_info
*ctrl_info
,
3314 struct pqi_general_admin_response
*response
)
3316 struct pqi_admin_queues
*admin_queues
;
3319 unsigned long timeout
;
3321 admin_queues
= &ctrl_info
->admin_queues
;
3322 oq_ci
= admin_queues
->oq_ci_copy
;
3324 timeout
= (3 * HZ
) + jiffies
;
3327 oq_pi
= *admin_queues
->oq_pi
;
3330 if (time_after(jiffies
, timeout
)) {
3331 dev_err(&ctrl_info
->pci_dev
->dev
,
3332 "timed out waiting for admin response\n");
3335 usleep_range(1000, 2000);
3338 memcpy(response
, admin_queues
->oq_element_array
+
3339 (oq_ci
* PQI_ADMIN_OQ_ELEMENT_LENGTH
), sizeof(*response
));
3341 oq_ci
= (oq_ci
+ 1) % PQI_ADMIN_OQ_NUM_ELEMENTS
;
3342 admin_queues
->oq_ci_copy
= oq_ci
;
3343 writel(oq_ci
, admin_queues
->oq_ci
);
3348 static void pqi_start_io(struct pqi_ctrl_info
*ctrl_info
,
3349 struct pqi_queue_group
*queue_group
, enum pqi_io_path path
,
3350 struct pqi_io_request
*io_request
)
3352 struct pqi_io_request
*next
;
3357 unsigned long flags
;
3358 unsigned int num_elements_needed
;
3359 unsigned int num_elements_to_end_of_queue
;
3361 struct pqi_iu_header
*request
;
3363 spin_lock_irqsave(&queue_group
->submit_lock
[path
], flags
);
3366 list_add_tail(&io_request
->request_list_entry
,
3367 &queue_group
->request_list
[path
]);
3369 iq_pi
= queue_group
->iq_pi_copy
[path
];
3371 list_for_each_entry_safe(io_request
, next
,
3372 &queue_group
->request_list
[path
], request_list_entry
) {
3374 request
= io_request
->iu
;
3376 iu_length
= get_unaligned_le16(&request
->iu_length
) +
3377 PQI_REQUEST_HEADER_LENGTH
;
3378 num_elements_needed
=
3379 DIV_ROUND_UP(iu_length
,
3380 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3382 iq_ci
= *queue_group
->iq_ci
[path
];
3384 if (num_elements_needed
> pqi_num_elements_free(iq_pi
, iq_ci
,
3385 ctrl_info
->num_elements_per_iq
))
3388 put_unaligned_le16(queue_group
->oq_id
,
3389 &request
->response_queue_id
);
3391 next_element
= queue_group
->iq_element_array
[path
] +
3392 (iq_pi
* PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3394 num_elements_to_end_of_queue
=
3395 ctrl_info
->num_elements_per_iq
- iq_pi
;
3397 if (num_elements_needed
<= num_elements_to_end_of_queue
) {
3398 memcpy(next_element
, request
, iu_length
);
3400 copy_count
= num_elements_to_end_of_queue
*
3401 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
;
3402 memcpy(next_element
, request
, copy_count
);
3403 memcpy(queue_group
->iq_element_array
[path
],
3404 (u8
*)request
+ copy_count
,
3405 iu_length
- copy_count
);
3408 iq_pi
= (iq_pi
+ num_elements_needed
) %
3409 ctrl_info
->num_elements_per_iq
;
3411 list_del(&io_request
->request_list_entry
);
3414 if (iq_pi
!= queue_group
->iq_pi_copy
[path
]) {
3415 queue_group
->iq_pi_copy
[path
] = iq_pi
;
3417 * This write notifies the controller that one or more IUs are
3418 * available to be processed.
3420 writel(iq_pi
, queue_group
->iq_pi
[path
]);
3423 spin_unlock_irqrestore(&queue_group
->submit_lock
[path
], flags
);
3426 static void pqi_raid_synchronous_complete(struct pqi_io_request
*io_request
,
3429 struct completion
*waiting
= context
;
3434 static int pqi_submit_raid_request_synchronous_with_io_request(
3435 struct pqi_ctrl_info
*ctrl_info
, struct pqi_io_request
*io_request
,
3436 unsigned long timeout_msecs
)
3439 DECLARE_COMPLETION_ONSTACK(wait
);
3441 io_request
->io_complete_callback
= pqi_raid_synchronous_complete
;
3442 io_request
->context
= &wait
;
3444 pqi_start_io(ctrl_info
,
3445 &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
], RAID_PATH
,
3448 if (timeout_msecs
== NO_TIMEOUT
) {
3449 wait_for_completion_io(&wait
);
3451 if (!wait_for_completion_io_timeout(&wait
,
3452 msecs_to_jiffies(timeout_msecs
))) {
3453 dev_warn(&ctrl_info
->pci_dev
->dev
,
3454 "command timed out\n");
3462 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info
*ctrl_info
,
3463 struct pqi_iu_header
*request
, unsigned int flags
,
3464 struct pqi_raid_error_info
*error_info
, unsigned long timeout_msecs
)
3467 struct pqi_io_request
*io_request
;
3468 unsigned long start_jiffies
;
3469 unsigned long msecs_blocked
;
3473 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3474 * are mutually exclusive.
3477 if (flags
& PQI_SYNC_FLAGS_INTERRUPTABLE
) {
3478 if (down_interruptible(&ctrl_info
->sync_request_sem
))
3479 return -ERESTARTSYS
;
3481 if (timeout_msecs
== NO_TIMEOUT
) {
3482 down(&ctrl_info
->sync_request_sem
);
3484 start_jiffies
= jiffies
;
3485 if (down_timeout(&ctrl_info
->sync_request_sem
,
3486 msecs_to_jiffies(timeout_msecs
)))
3489 jiffies_to_msecs(jiffies
- start_jiffies
);
3490 if (msecs_blocked
>= timeout_msecs
)
3492 timeout_msecs
-= msecs_blocked
;
3496 io_request
= pqi_alloc_io_request(ctrl_info
);
3498 put_unaligned_le16(io_request
->index
,
3499 &(((struct pqi_raid_path_request
*)request
)->request_id
));
3501 if (request
->iu_type
== PQI_REQUEST_IU_RAID_PATH_IO
)
3502 ((struct pqi_raid_path_request
*)request
)->error_index
=
3503 ((struct pqi_raid_path_request
*)request
)->request_id
;
3505 iu_length
= get_unaligned_le16(&request
->iu_length
) +
3506 PQI_REQUEST_HEADER_LENGTH
;
3507 memcpy(io_request
->iu
, request
, iu_length
);
3509 rc
= pqi_submit_raid_request_synchronous_with_io_request(ctrl_info
,
3510 io_request
, timeout_msecs
);
3513 if (io_request
->error_info
)
3514 memcpy(error_info
, io_request
->error_info
,
3515 sizeof(*error_info
));
3517 memset(error_info
, 0, sizeof(*error_info
));
3518 } else if (rc
== 0 && io_request
->error_info
) {
3520 struct pqi_raid_error_info
*raid_error_info
;
3522 raid_error_info
= io_request
->error_info
;
3523 scsi_status
= raid_error_info
->status
;
3525 if (scsi_status
== SAM_STAT_CHECK_CONDITION
&&
3526 raid_error_info
->data_out_result
==
3527 PQI_DATA_IN_OUT_UNDERFLOW
)
3528 scsi_status
= SAM_STAT_GOOD
;
3530 if (scsi_status
!= SAM_STAT_GOOD
)
3534 pqi_free_io_request(io_request
);
3536 up(&ctrl_info
->sync_request_sem
);
3541 static int pqi_validate_admin_response(
3542 struct pqi_general_admin_response
*response
, u8 expected_function_code
)
3544 if (response
->header
.iu_type
!= PQI_RESPONSE_IU_GENERAL_ADMIN
)
3547 if (get_unaligned_le16(&response
->header
.iu_length
) !=
3548 PQI_GENERAL_ADMIN_IU_LENGTH
)
3551 if (response
->function_code
!= expected_function_code
)
3554 if (response
->status
!= PQI_GENERAL_ADMIN_STATUS_SUCCESS
)
3560 static int pqi_submit_admin_request_synchronous(
3561 struct pqi_ctrl_info
*ctrl_info
,
3562 struct pqi_general_admin_request
*request
,
3563 struct pqi_general_admin_response
*response
)
3567 pqi_submit_admin_request(ctrl_info
, request
);
3569 rc
= pqi_poll_for_admin_response(ctrl_info
, response
);
3572 rc
= pqi_validate_admin_response(response
,
3573 request
->function_code
);
3578 static int pqi_report_device_capability(struct pqi_ctrl_info
*ctrl_info
)
3581 struct pqi_general_admin_request request
;
3582 struct pqi_general_admin_response response
;
3583 struct pqi_device_capability
*capability
;
3584 struct pqi_iu_layer_descriptor
*sop_iu_layer_descriptor
;
3586 capability
= kmalloc(sizeof(*capability
), GFP_KERNEL
);
3590 memset(&request
, 0, sizeof(request
));
3592 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3593 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3594 &request
.header
.iu_length
);
3595 request
.function_code
=
3596 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY
;
3597 put_unaligned_le32(sizeof(*capability
),
3598 &request
.data
.report_device_capability
.buffer_length
);
3600 rc
= pqi_map_single(ctrl_info
->pci_dev
,
3601 &request
.data
.report_device_capability
.sg_descriptor
,
3602 capability
, sizeof(*capability
),
3603 PCI_DMA_FROMDEVICE
);
3607 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3610 pqi_pci_unmap(ctrl_info
->pci_dev
,
3611 &request
.data
.report_device_capability
.sg_descriptor
, 1,
3612 PCI_DMA_FROMDEVICE
);
3617 if (response
.status
!= PQI_GENERAL_ADMIN_STATUS_SUCCESS
) {
3622 ctrl_info
->max_inbound_queues
=
3623 get_unaligned_le16(&capability
->max_inbound_queues
);
3624 ctrl_info
->max_elements_per_iq
=
3625 get_unaligned_le16(&capability
->max_elements_per_iq
);
3626 ctrl_info
->max_iq_element_length
=
3627 get_unaligned_le16(&capability
->max_iq_element_length
)
3629 ctrl_info
->max_outbound_queues
=
3630 get_unaligned_le16(&capability
->max_outbound_queues
);
3631 ctrl_info
->max_elements_per_oq
=
3632 get_unaligned_le16(&capability
->max_elements_per_oq
);
3633 ctrl_info
->max_oq_element_length
=
3634 get_unaligned_le16(&capability
->max_oq_element_length
)
3637 sop_iu_layer_descriptor
=
3638 &capability
->iu_layer_descriptors
[PQI_PROTOCOL_SOP
];
3640 ctrl_info
->max_inbound_iu_length_per_firmware
=
3642 &sop_iu_layer_descriptor
->max_inbound_iu_length
);
3643 ctrl_info
->inbound_spanning_supported
=
3644 sop_iu_layer_descriptor
->inbound_spanning_supported
;
3645 ctrl_info
->outbound_spanning_supported
=
3646 sop_iu_layer_descriptor
->outbound_spanning_supported
;
3654 static int pqi_validate_device_capability(struct pqi_ctrl_info
*ctrl_info
)
3656 if (ctrl_info
->max_iq_element_length
<
3657 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) {
3658 dev_err(&ctrl_info
->pci_dev
->dev
,
3659 "max. inbound queue element length of %d is less than the required length of %d\n",
3660 ctrl_info
->max_iq_element_length
,
3661 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3665 if (ctrl_info
->max_oq_element_length
<
3666 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
) {
3667 dev_err(&ctrl_info
->pci_dev
->dev
,
3668 "max. outbound queue element length of %d is less than the required length of %d\n",
3669 ctrl_info
->max_oq_element_length
,
3670 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
);
3674 if (ctrl_info
->max_inbound_iu_length_per_firmware
<
3675 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) {
3676 dev_err(&ctrl_info
->pci_dev
->dev
,
3677 "max. inbound IU length of %u is less than the min. required length of %d\n",
3678 ctrl_info
->max_inbound_iu_length_per_firmware
,
3679 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3683 if (!ctrl_info
->inbound_spanning_supported
) {
3684 dev_err(&ctrl_info
->pci_dev
->dev
,
3685 "the controller does not support inbound spanning\n");
3689 if (ctrl_info
->outbound_spanning_supported
) {
3690 dev_err(&ctrl_info
->pci_dev
->dev
,
3691 "the controller supports outbound spanning but this driver does not\n");
3698 static int pqi_delete_operational_queue(struct pqi_ctrl_info
*ctrl_info
,
3699 bool inbound_queue
, u16 queue_id
)
3701 struct pqi_general_admin_request request
;
3702 struct pqi_general_admin_response response
;
3704 memset(&request
, 0, sizeof(request
));
3705 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3706 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3707 &request
.header
.iu_length
);
3709 request
.function_code
=
3710 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ
;
3712 request
.function_code
=
3713 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ
;
3714 put_unaligned_le16(queue_id
,
3715 &request
.data
.delete_operational_queue
.queue_id
);
3717 return pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3721 static int pqi_create_event_queue(struct pqi_ctrl_info
*ctrl_info
)
3724 struct pqi_event_queue
*event_queue
;
3725 struct pqi_general_admin_request request
;
3726 struct pqi_general_admin_response response
;
3728 event_queue
= &ctrl_info
->event_queue
;
3731 * Create OQ (Outbound Queue - device to host queue) to dedicate
3734 memset(&request
, 0, sizeof(request
));
3735 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3736 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3737 &request
.header
.iu_length
);
3738 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ
;
3739 put_unaligned_le16(event_queue
->oq_id
,
3740 &request
.data
.create_operational_oq
.queue_id
);
3741 put_unaligned_le64((u64
)event_queue
->oq_element_array_bus_addr
,
3742 &request
.data
.create_operational_oq
.element_array_addr
);
3743 put_unaligned_le64((u64
)event_queue
->oq_pi_bus_addr
,
3744 &request
.data
.create_operational_oq
.pi_addr
);
3745 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS
,
3746 &request
.data
.create_operational_oq
.num_elements
);
3747 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH
/ 16,
3748 &request
.data
.create_operational_oq
.element_length
);
3749 request
.data
.create_operational_oq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3750 put_unaligned_le16(event_queue
->int_msg_num
,
3751 &request
.data
.create_operational_oq
.int_msg_num
);
3753 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3758 event_queue
->oq_ci
= ctrl_info
->iomem_base
+
3759 PQI_DEVICE_REGISTERS_OFFSET
+
3761 &response
.data
.create_operational_oq
.oq_ci_offset
);
3766 static int pqi_create_queue_group(struct pqi_ctrl_info
*ctrl_info
)
3770 struct pqi_queue_group
*queue_group
;
3771 struct pqi_general_admin_request request
;
3772 struct pqi_general_admin_response response
;
3774 i
= ctrl_info
->num_active_queue_groups
;
3775 queue_group
= &ctrl_info
->queue_groups
[i
];
3778 * Create IQ (Inbound Queue - host to device queue) for
3781 memset(&request
, 0, sizeof(request
));
3782 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3783 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3784 &request
.header
.iu_length
);
3785 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ
;
3786 put_unaligned_le16(queue_group
->iq_id
[RAID_PATH
],
3787 &request
.data
.create_operational_iq
.queue_id
);
3789 (u64
)queue_group
->iq_element_array_bus_addr
[RAID_PATH
],
3790 &request
.data
.create_operational_iq
.element_array_addr
);
3791 put_unaligned_le64((u64
)queue_group
->iq_ci_bus_addr
[RAID_PATH
],
3792 &request
.data
.create_operational_iq
.ci_addr
);
3793 put_unaligned_le16(ctrl_info
->num_elements_per_iq
,
3794 &request
.data
.create_operational_iq
.num_elements
);
3795 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
/ 16,
3796 &request
.data
.create_operational_iq
.element_length
);
3797 request
.data
.create_operational_iq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3799 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3802 dev_err(&ctrl_info
->pci_dev
->dev
,
3803 "error creating inbound RAID queue\n");
3807 queue_group
->iq_pi
[RAID_PATH
] = ctrl_info
->iomem_base
+
3808 PQI_DEVICE_REGISTERS_OFFSET
+
3810 &response
.data
.create_operational_iq
.iq_pi_offset
);
3813 * Create IQ (Inbound Queue - host to device queue) for
3814 * Advanced I/O (AIO) path.
3816 memset(&request
, 0, sizeof(request
));
3817 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3818 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3819 &request
.header
.iu_length
);
3820 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ
;
3821 put_unaligned_le16(queue_group
->iq_id
[AIO_PATH
],
3822 &request
.data
.create_operational_iq
.queue_id
);
3823 put_unaligned_le64((u64
)queue_group
->
3824 iq_element_array_bus_addr
[AIO_PATH
],
3825 &request
.data
.create_operational_iq
.element_array_addr
);
3826 put_unaligned_le64((u64
)queue_group
->iq_ci_bus_addr
[AIO_PATH
],
3827 &request
.data
.create_operational_iq
.ci_addr
);
3828 put_unaligned_le16(ctrl_info
->num_elements_per_iq
,
3829 &request
.data
.create_operational_iq
.num_elements
);
3830 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
/ 16,
3831 &request
.data
.create_operational_iq
.element_length
);
3832 request
.data
.create_operational_iq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3834 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3837 dev_err(&ctrl_info
->pci_dev
->dev
,
3838 "error creating inbound AIO queue\n");
3839 goto delete_inbound_queue_raid
;
3842 queue_group
->iq_pi
[AIO_PATH
] = ctrl_info
->iomem_base
+
3843 PQI_DEVICE_REGISTERS_OFFSET
+
3845 &response
.data
.create_operational_iq
.iq_pi_offset
);
3848 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3849 * assumed to be for RAID path I/O unless we change the queue's
3852 memset(&request
, 0, sizeof(request
));
3853 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3854 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3855 &request
.header
.iu_length
);
3856 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY
;
3857 put_unaligned_le16(queue_group
->iq_id
[AIO_PATH
],
3858 &request
.data
.change_operational_iq_properties
.queue_id
);
3859 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE
,
3860 &request
.data
.change_operational_iq_properties
.vendor_specific
);
3862 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3865 dev_err(&ctrl_info
->pci_dev
->dev
,
3866 "error changing queue property\n");
3867 goto delete_inbound_queue_aio
;
3871 * Create OQ (Outbound Queue - device to host queue).
3873 memset(&request
, 0, sizeof(request
));
3874 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3875 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3876 &request
.header
.iu_length
);
3877 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ
;
3878 put_unaligned_le16(queue_group
->oq_id
,
3879 &request
.data
.create_operational_oq
.queue_id
);
3880 put_unaligned_le64((u64
)queue_group
->oq_element_array_bus_addr
,
3881 &request
.data
.create_operational_oq
.element_array_addr
);
3882 put_unaligned_le64((u64
)queue_group
->oq_pi_bus_addr
,
3883 &request
.data
.create_operational_oq
.pi_addr
);
3884 put_unaligned_le16(ctrl_info
->num_elements_per_oq
,
3885 &request
.data
.create_operational_oq
.num_elements
);
3886 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
/ 16,
3887 &request
.data
.create_operational_oq
.element_length
);
3888 request
.data
.create_operational_oq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3889 put_unaligned_le16(queue_group
->int_msg_num
,
3890 &request
.data
.create_operational_oq
.int_msg_num
);
3892 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3895 dev_err(&ctrl_info
->pci_dev
->dev
,
3896 "error creating outbound queue\n");
3897 goto delete_inbound_queue_aio
;
3900 queue_group
->oq_ci
= ctrl_info
->iomem_base
+
3901 PQI_DEVICE_REGISTERS_OFFSET
+
3903 &response
.data
.create_operational_oq
.oq_ci_offset
);
3905 ctrl_info
->num_active_queue_groups
++;
3909 delete_inbound_queue_aio
:
3910 pqi_delete_operational_queue(ctrl_info
, true,
3911 queue_group
->iq_id
[AIO_PATH
]);
3913 delete_inbound_queue_raid
:
3914 pqi_delete_operational_queue(ctrl_info
, true,
3915 queue_group
->iq_id
[RAID_PATH
]);
3920 static int pqi_create_queues(struct pqi_ctrl_info
*ctrl_info
)
3925 rc
= pqi_create_event_queue(ctrl_info
);
3927 dev_err(&ctrl_info
->pci_dev
->dev
,
3928 "error creating event queue\n");
3932 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3933 rc
= pqi_create_queue_group(ctrl_info
);
3935 dev_err(&ctrl_info
->pci_dev
->dev
,
3936 "error creating queue group number %u/%u\n",
3937 i
, ctrl_info
->num_queue_groups
);
3945 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3946 (offsetof(struct pqi_event_config, descriptors) + \
3947 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3949 static int pqi_configure_events(struct pqi_ctrl_info
*ctrl_info
)
3953 struct pqi_event_config
*event_config
;
3954 struct pqi_general_management_request request
;
3956 event_config
= kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3961 memset(&request
, 0, sizeof(request
));
3963 request
.header
.iu_type
= PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG
;
3964 put_unaligned_le16(offsetof(struct pqi_general_management_request
,
3965 data
.report_event_configuration
.sg_descriptors
[1]) -
3966 PQI_REQUEST_HEADER_LENGTH
, &request
.header
.iu_length
);
3967 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3968 &request
.data
.report_event_configuration
.buffer_length
);
3970 rc
= pqi_map_single(ctrl_info
->pci_dev
,
3971 request
.data
.report_event_configuration
.sg_descriptors
,
3972 event_config
, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3973 PCI_DMA_FROMDEVICE
);
3977 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
3978 0, NULL
, NO_TIMEOUT
);
3980 pqi_pci_unmap(ctrl_info
->pci_dev
,
3981 request
.data
.report_event_configuration
.sg_descriptors
, 1,
3982 PCI_DMA_FROMDEVICE
);
3987 for (i
= 0; i
< event_config
->num_event_descriptors
; i
++)
3988 put_unaligned_le16(ctrl_info
->event_queue
.oq_id
,
3989 &event_config
->descriptors
[i
].oq_id
);
3991 memset(&request
, 0, sizeof(request
));
3993 request
.header
.iu_type
= PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG
;
3994 put_unaligned_le16(offsetof(struct pqi_general_management_request
,
3995 data
.report_event_configuration
.sg_descriptors
[1]) -
3996 PQI_REQUEST_HEADER_LENGTH
, &request
.header
.iu_length
);
3997 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3998 &request
.data
.report_event_configuration
.buffer_length
);
4000 rc
= pqi_map_single(ctrl_info
->pci_dev
,
4001 request
.data
.report_event_configuration
.sg_descriptors
,
4002 event_config
, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
4007 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
4010 pqi_pci_unmap(ctrl_info
->pci_dev
,
4011 request
.data
.report_event_configuration
.sg_descriptors
, 1,
4015 kfree(event_config
);
4020 static void pqi_free_all_io_requests(struct pqi_ctrl_info
*ctrl_info
)
4024 size_t sg_chain_buffer_length
;
4025 struct pqi_io_request
*io_request
;
4027 if (!ctrl_info
->io_request_pool
)
4030 dev
= &ctrl_info
->pci_dev
->dev
;
4031 sg_chain_buffer_length
= ctrl_info
->sg_chain_buffer_length
;
4032 io_request
= ctrl_info
->io_request_pool
;
4034 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
4035 kfree(io_request
->iu
);
4036 if (!io_request
->sg_chain_buffer
)
4038 dma_free_coherent(dev
, sg_chain_buffer_length
,
4039 io_request
->sg_chain_buffer
,
4040 io_request
->sg_chain_buffer_dma_handle
);
4044 kfree(ctrl_info
->io_request_pool
);
4045 ctrl_info
->io_request_pool
= NULL
;
4048 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info
*ctrl_info
)
4050 ctrl_info
->error_buffer
= dma_zalloc_coherent(&ctrl_info
->pci_dev
->dev
,
4051 ctrl_info
->error_buffer_length
,
4052 &ctrl_info
->error_buffer_dma_handle
, GFP_KERNEL
);
4054 if (!ctrl_info
->error_buffer
)
4060 static int pqi_alloc_io_resources(struct pqi_ctrl_info
*ctrl_info
)
4063 void *sg_chain_buffer
;
4064 size_t sg_chain_buffer_length
;
4065 dma_addr_t sg_chain_buffer_dma_handle
;
4067 struct pqi_io_request
*io_request
;
4069 ctrl_info
->io_request_pool
= kzalloc(ctrl_info
->max_io_slots
*
4070 sizeof(ctrl_info
->io_request_pool
[0]), GFP_KERNEL
);
4072 if (!ctrl_info
->io_request_pool
) {
4073 dev_err(&ctrl_info
->pci_dev
->dev
,
4074 "failed to allocate I/O request pool\n");
4078 dev
= &ctrl_info
->pci_dev
->dev
;
4079 sg_chain_buffer_length
= ctrl_info
->sg_chain_buffer_length
;
4080 io_request
= ctrl_info
->io_request_pool
;
4082 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
4084 kmalloc(ctrl_info
->max_inbound_iu_length
, GFP_KERNEL
);
4086 if (!io_request
->iu
) {
4087 dev_err(&ctrl_info
->pci_dev
->dev
,
4088 "failed to allocate IU buffers\n");
4092 sg_chain_buffer
= dma_alloc_coherent(dev
,
4093 sg_chain_buffer_length
, &sg_chain_buffer_dma_handle
,
4096 if (!sg_chain_buffer
) {
4097 dev_err(&ctrl_info
->pci_dev
->dev
,
4098 "failed to allocate PQI scatter-gather chain buffers\n");
4102 io_request
->index
= i
;
4103 io_request
->sg_chain_buffer
= sg_chain_buffer
;
4104 io_request
->sg_chain_buffer_dma_handle
=
4105 sg_chain_buffer_dma_handle
;
4112 pqi_free_all_io_requests(ctrl_info
);
4118 * Calculate required resources that are sized based on max. outstanding
4119 * requests and max. transfer size.
4122 static void pqi_calculate_io_resources(struct pqi_ctrl_info
*ctrl_info
)
4124 u32 max_transfer_size
;
4127 ctrl_info
->scsi_ml_can_queue
=
4128 ctrl_info
->max_outstanding_requests
- PQI_RESERVED_IO_SLOTS
;
4129 ctrl_info
->max_io_slots
= ctrl_info
->max_outstanding_requests
;
4131 ctrl_info
->error_buffer_length
=
4132 ctrl_info
->max_io_slots
* PQI_ERROR_BUFFER_ELEMENT_LENGTH
;
4135 min(ctrl_info
->max_transfer_size
, PQI_MAX_TRANSFER_SIZE
);
4137 max_sg_entries
= max_transfer_size
/ PAGE_SIZE
;
4139 /* +1 to cover when the buffer is not page-aligned. */
4142 max_sg_entries
= min(ctrl_info
->max_sg_entries
, max_sg_entries
);
4144 max_transfer_size
= (max_sg_entries
- 1) * PAGE_SIZE
;
4146 ctrl_info
->sg_chain_buffer_length
=
4147 max_sg_entries
* sizeof(struct pqi_sg_descriptor
);
4148 ctrl_info
->sg_tablesize
= max_sg_entries
;
4149 ctrl_info
->max_sectors
= max_transfer_size
/ 512;
4152 static void pqi_calculate_queue_resources(struct pqi_ctrl_info
*ctrl_info
)
4155 int max_queue_groups
;
4156 int num_queue_groups
;
4157 u16 num_elements_per_iq
;
4158 u16 num_elements_per_oq
;
4160 max_queue_groups
= min(ctrl_info
->max_inbound_queues
/ 2,
4161 ctrl_info
->max_outbound_queues
- 1);
4162 max_queue_groups
= min(max_queue_groups
, PQI_MAX_QUEUE_GROUPS
);
4164 num_cpus
= num_online_cpus();
4165 num_queue_groups
= min(num_cpus
, ctrl_info
->max_msix_vectors
);
4166 num_queue_groups
= min(num_queue_groups
, max_queue_groups
);
4168 ctrl_info
->num_queue_groups
= num_queue_groups
;
4171 * Make sure that the max. inbound IU length is an even multiple
4172 * of our inbound element length.
4174 ctrl_info
->max_inbound_iu_length
=
4175 (ctrl_info
->max_inbound_iu_length_per_firmware
/
4176 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) *
4177 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
;
4179 num_elements_per_iq
=
4180 (ctrl_info
->max_inbound_iu_length
/
4181 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
4183 /* Add one because one element in each queue is unusable. */
4184 num_elements_per_iq
++;
4186 num_elements_per_iq
= min(num_elements_per_iq
,
4187 ctrl_info
->max_elements_per_iq
);
4189 num_elements_per_oq
= ((num_elements_per_iq
- 1) * 2) + 1;
4190 num_elements_per_oq
= min(num_elements_per_oq
,
4191 ctrl_info
->max_elements_per_oq
);
4193 ctrl_info
->num_elements_per_iq
= num_elements_per_iq
;
4194 ctrl_info
->num_elements_per_oq
= num_elements_per_oq
;
4196 ctrl_info
->max_sg_per_iu
=
4197 ((ctrl_info
->max_inbound_iu_length
-
4198 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) /
4199 sizeof(struct pqi_sg_descriptor
)) +
4200 PQI_MAX_EMBEDDED_SG_DESCRIPTORS
;
4203 static inline void pqi_set_sg_descriptor(
4204 struct pqi_sg_descriptor
*sg_descriptor
, struct scatterlist
*sg
)
4206 u64 address
= (u64
)sg_dma_address(sg
);
4207 unsigned int length
= sg_dma_len(sg
);
4209 put_unaligned_le64(address
, &sg_descriptor
->address
);
4210 put_unaligned_le32(length
, &sg_descriptor
->length
);
4211 put_unaligned_le32(0, &sg_descriptor
->flags
);
4214 static int pqi_build_raid_sg_list(struct pqi_ctrl_info
*ctrl_info
,
4215 struct pqi_raid_path_request
*request
, struct scsi_cmnd
*scmd
,
4216 struct pqi_io_request
*io_request
)
4222 unsigned int num_sg_in_iu
;
4223 unsigned int max_sg_per_iu
;
4224 struct scatterlist
*sg
;
4225 struct pqi_sg_descriptor
*sg_descriptor
;
4227 sg_count
= scsi_dma_map(scmd
);
4231 iu_length
= offsetof(struct pqi_raid_path_request
, sg_descriptors
) -
4232 PQI_REQUEST_HEADER_LENGTH
;
4237 sg
= scsi_sglist(scmd
);
4238 sg_descriptor
= request
->sg_descriptors
;
4239 max_sg_per_iu
= ctrl_info
->max_sg_per_iu
- 1;
4245 pqi_set_sg_descriptor(sg_descriptor
, sg
);
4252 if (i
== max_sg_per_iu
) {
4254 (u64
)io_request
->sg_chain_buffer_dma_handle
,
4255 &sg_descriptor
->address
);
4256 put_unaligned_le32((sg_count
- num_sg_in_iu
)
4257 * sizeof(*sg_descriptor
),
4258 &sg_descriptor
->length
);
4259 put_unaligned_le32(CISS_SG_CHAIN
,
4260 &sg_descriptor
->flags
);
4263 sg_descriptor
= io_request
->sg_chain_buffer
;
4268 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
4269 request
->partial
= chained
;
4270 iu_length
+= num_sg_in_iu
* sizeof(*sg_descriptor
);
4273 put_unaligned_le16(iu_length
, &request
->header
.iu_length
);
4278 static int pqi_build_aio_sg_list(struct pqi_ctrl_info
*ctrl_info
,
4279 struct pqi_aio_path_request
*request
, struct scsi_cmnd
*scmd
,
4280 struct pqi_io_request
*io_request
)
4286 unsigned int num_sg_in_iu
;
4287 unsigned int max_sg_per_iu
;
4288 struct scatterlist
*sg
;
4289 struct pqi_sg_descriptor
*sg_descriptor
;
4291 sg_count
= scsi_dma_map(scmd
);
4295 iu_length
= offsetof(struct pqi_aio_path_request
, sg_descriptors
) -
4296 PQI_REQUEST_HEADER_LENGTH
;
4302 sg
= scsi_sglist(scmd
);
4303 sg_descriptor
= request
->sg_descriptors
;
4304 max_sg_per_iu
= ctrl_info
->max_sg_per_iu
- 1;
4309 pqi_set_sg_descriptor(sg_descriptor
, sg
);
4316 if (i
== max_sg_per_iu
) {
4318 (u64
)io_request
->sg_chain_buffer_dma_handle
,
4319 &sg_descriptor
->address
);
4320 put_unaligned_le32((sg_count
- num_sg_in_iu
)
4321 * sizeof(*sg_descriptor
),
4322 &sg_descriptor
->length
);
4323 put_unaligned_le32(CISS_SG_CHAIN
,
4324 &sg_descriptor
->flags
);
4327 sg_descriptor
= io_request
->sg_chain_buffer
;
4332 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
4333 request
->partial
= chained
;
4334 iu_length
+= num_sg_in_iu
* sizeof(*sg_descriptor
);
4337 put_unaligned_le16(iu_length
, &request
->header
.iu_length
);
4338 request
->num_sg_descriptors
= num_sg_in_iu
;
4343 static void pqi_raid_io_complete(struct pqi_io_request
*io_request
,
4346 struct scsi_cmnd
*scmd
;
4348 scmd
= io_request
->scmd
;
4349 pqi_free_io_request(io_request
);
4350 scsi_dma_unmap(scmd
);
4351 pqi_scsi_done(scmd
);
4354 static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
4355 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
4356 struct pqi_queue_group
*queue_group
)
4360 struct pqi_io_request
*io_request
;
4361 struct pqi_raid_path_request
*request
;
4363 io_request
= pqi_alloc_io_request(ctrl_info
);
4364 io_request
->io_complete_callback
= pqi_raid_io_complete
;
4365 io_request
->scmd
= scmd
;
4367 scmd
->host_scribble
= (unsigned char *)io_request
;
4369 request
= io_request
->iu
;
4371 offsetof(struct pqi_raid_path_request
, sg_descriptors
));
4373 request
->header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
4374 put_unaligned_le32(scsi_bufflen(scmd
), &request
->buffer_length
);
4375 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4376 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4377 request
->error_index
= request
->request_id
;
4378 memcpy(request
->lun_number
, device
->scsi3addr
,
4379 sizeof(request
->lun_number
));
4381 cdb_length
= min_t(size_t, scmd
->cmd_len
, sizeof(request
->cdb
));
4382 memcpy(request
->cdb
, scmd
->cmnd
, cdb_length
);
4384 switch (cdb_length
) {
4389 /* No bytes in the Additional CDB bytes field */
4390 request
->additional_cdb_bytes_usage
=
4391 SOP_ADDITIONAL_CDB_BYTES_0
;
4394 /* 4 bytes in the Additional cdb field */
4395 request
->additional_cdb_bytes_usage
=
4396 SOP_ADDITIONAL_CDB_BYTES_4
;
4399 /* 8 bytes in the Additional cdb field */
4400 request
->additional_cdb_bytes_usage
=
4401 SOP_ADDITIONAL_CDB_BYTES_8
;
4404 /* 12 bytes in the Additional cdb field */
4405 request
->additional_cdb_bytes_usage
=
4406 SOP_ADDITIONAL_CDB_BYTES_12
;
4410 /* 16 bytes in the Additional cdb field */
4411 request
->additional_cdb_bytes_usage
=
4412 SOP_ADDITIONAL_CDB_BYTES_16
;
4416 switch (scmd
->sc_data_direction
) {
4418 request
->data_direction
= SOP_READ_FLAG
;
4420 case DMA_FROM_DEVICE
:
4421 request
->data_direction
= SOP_WRITE_FLAG
;
4424 request
->data_direction
= SOP_NO_DIRECTION_FLAG
;
4426 case DMA_BIDIRECTIONAL
:
4427 request
->data_direction
= SOP_BIDIRECTIONAL
;
4430 dev_err(&ctrl_info
->pci_dev
->dev
,
4431 "unknown data direction: %d\n",
4432 scmd
->sc_data_direction
);
4433 WARN_ON(scmd
->sc_data_direction
);
4437 rc
= pqi_build_raid_sg_list(ctrl_info
, request
, scmd
, io_request
);
4439 pqi_free_io_request(io_request
);
4440 return SCSI_MLQUEUE_HOST_BUSY
;
4443 pqi_start_io(ctrl_info
, queue_group
, RAID_PATH
, io_request
);
4448 static void pqi_aio_io_complete(struct pqi_io_request
*io_request
,
4451 struct scsi_cmnd
*scmd
;
4453 scmd
= io_request
->scmd
;
4454 scsi_dma_unmap(scmd
);
4455 if (io_request
->status
== -EAGAIN
)
4456 set_host_byte(scmd
, DID_IMM_RETRY
);
4457 pqi_free_io_request(io_request
);
4458 pqi_scsi_done(scmd
);
4461 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
4462 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
4463 struct pqi_queue_group
*queue_group
)
4465 return pqi_aio_submit_io(ctrl_info
, scmd
, device
->aio_handle
,
4466 scmd
->cmnd
, scmd
->cmd_len
, queue_group
, NULL
);
4469 static int pqi_aio_submit_io(struct pqi_ctrl_info
*ctrl_info
,
4470 struct scsi_cmnd
*scmd
, u32 aio_handle
, u8
*cdb
,
4471 unsigned int cdb_length
, struct pqi_queue_group
*queue_group
,
4472 struct pqi_encryption_info
*encryption_info
)
4475 struct pqi_io_request
*io_request
;
4476 struct pqi_aio_path_request
*request
;
4478 io_request
= pqi_alloc_io_request(ctrl_info
);
4479 io_request
->io_complete_callback
= pqi_aio_io_complete
;
4480 io_request
->scmd
= scmd
;
4482 scmd
->host_scribble
= (unsigned char *)io_request
;
4484 request
= io_request
->iu
;
4486 offsetof(struct pqi_raid_path_request
, sg_descriptors
));
4488 request
->header
.iu_type
= PQI_REQUEST_IU_AIO_PATH_IO
;
4489 put_unaligned_le32(aio_handle
, &request
->nexus_id
);
4490 put_unaligned_le32(scsi_bufflen(scmd
), &request
->buffer_length
);
4491 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4492 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4493 request
->error_index
= request
->request_id
;
4494 if (cdb_length
> sizeof(request
->cdb
))
4495 cdb_length
= sizeof(request
->cdb
);
4496 request
->cdb_length
= cdb_length
;
4497 memcpy(request
->cdb
, cdb
, cdb_length
);
4499 switch (scmd
->sc_data_direction
) {
4501 request
->data_direction
= SOP_READ_FLAG
;
4503 case DMA_FROM_DEVICE
:
4504 request
->data_direction
= SOP_WRITE_FLAG
;
4507 request
->data_direction
= SOP_NO_DIRECTION_FLAG
;
4509 case DMA_BIDIRECTIONAL
:
4510 request
->data_direction
= SOP_BIDIRECTIONAL
;
4513 dev_err(&ctrl_info
->pci_dev
->dev
,
4514 "unknown data direction: %d\n",
4515 scmd
->sc_data_direction
);
4516 WARN_ON(scmd
->sc_data_direction
);
4520 if (encryption_info
) {
4521 request
->encryption_enable
= true;
4522 put_unaligned_le16(encryption_info
->data_encryption_key_index
,
4523 &request
->data_encryption_key_index
);
4524 put_unaligned_le32(encryption_info
->encrypt_tweak_lower
,
4525 &request
->encrypt_tweak_lower
);
4526 put_unaligned_le32(encryption_info
->encrypt_tweak_upper
,
4527 &request
->encrypt_tweak_upper
);
4530 rc
= pqi_build_aio_sg_list(ctrl_info
, request
, scmd
, io_request
);
4532 pqi_free_io_request(io_request
);
4533 return SCSI_MLQUEUE_HOST_BUSY
;
4536 pqi_start_io(ctrl_info
, queue_group
, AIO_PATH
, io_request
);
4541 static int pqi_scsi_queue_command(struct Scsi_Host
*shost
,
4542 struct scsi_cmnd
*scmd
)
4545 struct pqi_ctrl_info
*ctrl_info
;
4546 struct pqi_scsi_dev
*device
;
4548 struct pqi_queue_group
*queue_group
;
4551 device
= scmd
->device
->hostdata
;
4552 ctrl_info
= shost_to_hba(shost
);
4554 if (pqi_ctrl_offline(ctrl_info
)) {
4555 set_host_byte(scmd
, DID_NO_CONNECT
);
4556 pqi_scsi_done(scmd
);
4560 hwq
= blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd
->request
));
4561 if (hwq
>= ctrl_info
->num_queue_groups
)
4564 queue_group
= &ctrl_info
->queue_groups
[hwq
];
4566 if (pqi_is_logical_device(device
)) {
4567 raid_bypassed
= false;
4568 if (device
->offload_enabled
&&
4569 scmd
->request
->cmd_type
== REQ_TYPE_FS
) {
4570 rc
= pqi_raid_bypass_submit_scsi_cmd(ctrl_info
, device
,
4573 rc
== SCSI_MLQUEUE_HOST_BUSY
||
4574 rc
== SAM_STAT_CHECK_CONDITION
||
4575 rc
== SAM_STAT_RESERVATION_CONFLICT
)
4576 raid_bypassed
= true;
4579 rc
= pqi_raid_submit_scsi_cmd(ctrl_info
, device
, scmd
,
4582 if (device
->aio_enabled
)
4583 rc
= pqi_aio_submit_scsi_cmd(ctrl_info
, device
, scmd
,
4586 rc
= pqi_raid_submit_scsi_cmd(ctrl_info
, device
, scmd
,
4593 static void pqi_lun_reset_complete(struct pqi_io_request
*io_request
,
4596 struct completion
*waiting
= context
;
4601 #define PQI_LUN_RESET_TIMEOUT_SECS 10
4603 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info
*ctrl_info
,
4604 struct pqi_scsi_dev
*device
, struct completion
*wait
)
4607 unsigned int wait_secs
= 0;
4610 if (wait_for_completion_io_timeout(wait
,
4611 PQI_LUN_RESET_TIMEOUT_SECS
* HZ
)) {
4616 pqi_check_ctrl_health(ctrl_info
);
4617 if (pqi_ctrl_offline(ctrl_info
)) {
4622 wait_secs
+= PQI_LUN_RESET_TIMEOUT_SECS
;
4624 dev_err(&ctrl_info
->pci_dev
->dev
,
4625 "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4626 ctrl_info
->scsi_host
->host_no
, device
->bus
,
4627 device
->target
, device
->lun
, wait_secs
);
4633 static int pqi_lun_reset(struct pqi_ctrl_info
*ctrl_info
,
4634 struct pqi_scsi_dev
*device
)
4637 struct pqi_io_request
*io_request
;
4638 DECLARE_COMPLETION_ONSTACK(wait
);
4639 struct pqi_task_management_request
*request
;
4641 down(&ctrl_info
->lun_reset_sem
);
4643 io_request
= pqi_alloc_io_request(ctrl_info
);
4644 io_request
->io_complete_callback
= pqi_lun_reset_complete
;
4645 io_request
->context
= &wait
;
4647 request
= io_request
->iu
;
4648 memset(request
, 0, sizeof(*request
));
4650 request
->header
.iu_type
= PQI_REQUEST_IU_TASK_MANAGEMENT
;
4651 put_unaligned_le16(sizeof(*request
) - PQI_REQUEST_HEADER_LENGTH
,
4652 &request
->header
.iu_length
);
4653 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4654 memcpy(request
->lun_number
, device
->scsi3addr
,
4655 sizeof(request
->lun_number
));
4656 request
->task_management_function
= SOP_TASK_MANAGEMENT_LUN_RESET
;
4658 pqi_start_io(ctrl_info
,
4659 &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
], RAID_PATH
,
4662 rc
= pqi_wait_for_lun_reset_completion(ctrl_info
, device
, &wait
);
4664 rc
= io_request
->status
;
4666 pqi_free_io_request(io_request
);
4667 up(&ctrl_info
->lun_reset_sem
);
4672 /* Performs a reset at the LUN level. */
4674 static int pqi_device_reset(struct pqi_ctrl_info
*ctrl_info
,
4675 struct pqi_scsi_dev
*device
)
4679 pqi_check_ctrl_health(ctrl_info
);
4680 if (pqi_ctrl_offline(ctrl_info
))
4683 rc
= pqi_lun_reset(ctrl_info
, device
);
4685 return rc
== 0 ? SUCCESS
: FAILED
;
4688 static int pqi_eh_device_reset_handler(struct scsi_cmnd
*scmd
)
4691 struct pqi_ctrl_info
*ctrl_info
;
4692 struct pqi_scsi_dev
*device
;
4694 ctrl_info
= shost_to_hba(scmd
->device
->host
);
4695 device
= scmd
->device
->hostdata
;
4697 dev_err(&ctrl_info
->pci_dev
->dev
,
4698 "resetting scsi %d:%d:%d:%d\n",
4699 ctrl_info
->scsi_host
->host_no
,
4700 device
->bus
, device
->target
, device
->lun
);
4702 rc
= pqi_device_reset(ctrl_info
, device
);
4704 dev_err(&ctrl_info
->pci_dev
->dev
,
4705 "reset of scsi %d:%d:%d:%d: %s\n",
4706 ctrl_info
->scsi_host
->host_no
,
4707 device
->bus
, device
->target
, device
->lun
,
4708 rc
== SUCCESS
? "SUCCESS" : "FAILED");
4713 static int pqi_slave_alloc(struct scsi_device
*sdev
)
4715 struct pqi_scsi_dev
*device
;
4716 unsigned long flags
;
4717 struct pqi_ctrl_info
*ctrl_info
;
4718 struct scsi_target
*starget
;
4719 struct sas_rphy
*rphy
;
4721 ctrl_info
= shost_to_hba(sdev
->host
);
4723 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
4725 if (sdev_channel(sdev
) == PQI_PHYSICAL_DEVICE_BUS
) {
4726 starget
= scsi_target(sdev
);
4727 rphy
= target_to_rphy(starget
);
4728 device
= pqi_find_device_by_sas_rphy(ctrl_info
, rphy
);
4730 device
->target
= sdev_id(sdev
);
4731 device
->lun
= sdev
->lun
;
4732 device
->target_lun_valid
= true;
4735 device
= pqi_find_scsi_dev(ctrl_info
, sdev_channel(sdev
),
4736 sdev_id(sdev
), sdev
->lun
);
4739 if (device
&& device
->expose_device
) {
4740 sdev
->hostdata
= device
;
4741 device
->sdev
= sdev
;
4742 if (device
->queue_depth
) {
4743 device
->advertised_queue_depth
= device
->queue_depth
;
4744 scsi_change_queue_depth(sdev
,
4745 device
->advertised_queue_depth
);
4749 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
4754 static int pqi_slave_configure(struct scsi_device
*sdev
)
4756 struct pqi_scsi_dev
*device
;
4758 device
= sdev
->hostdata
;
4759 if (!device
->expose_device
)
4760 sdev
->no_uld_attach
= true;
4765 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info
*ctrl_info
,
4768 struct pci_dev
*pci_dev
;
4769 u32 subsystem_vendor
;
4770 u32 subsystem_device
;
4771 cciss_pci_info_struct pciinfo
;
4776 pci_dev
= ctrl_info
->pci_dev
;
4778 pciinfo
.domain
= pci_domain_nr(pci_dev
->bus
);
4779 pciinfo
.bus
= pci_dev
->bus
->number
;
4780 pciinfo
.dev_fn
= pci_dev
->devfn
;
4781 subsystem_vendor
= pci_dev
->subsystem_vendor
;
4782 subsystem_device
= pci_dev
->subsystem_device
;
4783 pciinfo
.board_id
= ((subsystem_device
<< 16) & 0xffff0000) |
4786 if (copy_to_user(arg
, &pciinfo
, sizeof(pciinfo
)))
4792 static int pqi_getdrivver_ioctl(void __user
*arg
)
4799 version
= (DRIVER_MAJOR
<< 28) | (DRIVER_MINOR
<< 24) |
4800 (DRIVER_RELEASE
<< 16) | DRIVER_REVISION
;
4802 if (copy_to_user(arg
, &version
, sizeof(version
)))
4808 struct ciss_error_info
{
4811 size_t sense_data_length
;
4814 static void pqi_error_info_to_ciss(struct pqi_raid_error_info
*pqi_error_info
,
4815 struct ciss_error_info
*ciss_error_info
)
4817 int ciss_cmd_status
;
4818 size_t sense_data_length
;
4820 switch (pqi_error_info
->data_out_result
) {
4821 case PQI_DATA_IN_OUT_GOOD
:
4822 ciss_cmd_status
= CISS_CMD_STATUS_SUCCESS
;
4824 case PQI_DATA_IN_OUT_UNDERFLOW
:
4825 ciss_cmd_status
= CISS_CMD_STATUS_DATA_UNDERRUN
;
4827 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW
:
4828 ciss_cmd_status
= CISS_CMD_STATUS_DATA_OVERRUN
;
4830 case PQI_DATA_IN_OUT_PROTOCOL_ERROR
:
4831 case PQI_DATA_IN_OUT_BUFFER_ERROR
:
4832 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA
:
4833 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE
:
4834 case PQI_DATA_IN_OUT_ERROR
:
4835 ciss_cmd_status
= CISS_CMD_STATUS_PROTOCOL_ERROR
;
4837 case PQI_DATA_IN_OUT_HARDWARE_ERROR
:
4838 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR
:
4839 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT
:
4840 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED
:
4841 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED
:
4842 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED
:
4843 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST
:
4844 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION
:
4845 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED
:
4846 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ
:
4847 ciss_cmd_status
= CISS_CMD_STATUS_HARDWARE_ERROR
;
4849 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT
:
4850 ciss_cmd_status
= CISS_CMD_STATUS_UNSOLICITED_ABORT
;
4852 case PQI_DATA_IN_OUT_ABORTED
:
4853 ciss_cmd_status
= CISS_CMD_STATUS_ABORTED
;
4855 case PQI_DATA_IN_OUT_TIMEOUT
:
4856 ciss_cmd_status
= CISS_CMD_STATUS_TIMEOUT
;
4859 ciss_cmd_status
= CISS_CMD_STATUS_TARGET_STATUS
;
4864 get_unaligned_le16(&pqi_error_info
->sense_data_length
);
4865 if (sense_data_length
== 0)
4867 get_unaligned_le16(&pqi_error_info
->response_data_length
);
4868 if (sense_data_length
)
4869 if (sense_data_length
> sizeof(pqi_error_info
->data
))
4870 sense_data_length
= sizeof(pqi_error_info
->data
);
4872 ciss_error_info
->scsi_status
= pqi_error_info
->status
;
4873 ciss_error_info
->command_status
= ciss_cmd_status
;
4874 ciss_error_info
->sense_data_length
= sense_data_length
;
4877 static int pqi_passthru_ioctl(struct pqi_ctrl_info
*ctrl_info
, void __user
*arg
)
4880 char *kernel_buffer
= NULL
;
4882 size_t sense_data_length
;
4883 IOCTL_Command_struct iocommand
;
4884 struct pqi_raid_path_request request
;
4885 struct pqi_raid_error_info pqi_error_info
;
4886 struct ciss_error_info ciss_error_info
;
4888 if (pqi_ctrl_offline(ctrl_info
))
4892 if (!capable(CAP_SYS_RAWIO
))
4894 if (copy_from_user(&iocommand
, arg
, sizeof(iocommand
)))
4896 if (iocommand
.buf_size
< 1 &&
4897 iocommand
.Request
.Type
.Direction
!= XFER_NONE
)
4899 if (iocommand
.Request
.CDBLen
> sizeof(request
.cdb
))
4901 if (iocommand
.Request
.Type
.Type
!= TYPE_CMD
)
4904 switch (iocommand
.Request
.Type
.Direction
) {
4913 if (iocommand
.buf_size
> 0) {
4914 kernel_buffer
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
4917 if (iocommand
.Request
.Type
.Direction
& XFER_WRITE
) {
4918 if (copy_from_user(kernel_buffer
, iocommand
.buf
,
4919 iocommand
.buf_size
)) {
4924 memset(kernel_buffer
, 0, iocommand
.buf_size
);
4928 memset(&request
, 0, sizeof(request
));
4930 request
.header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
4931 iu_length
= offsetof(struct pqi_raid_path_request
, sg_descriptors
) -
4932 PQI_REQUEST_HEADER_LENGTH
;
4933 memcpy(request
.lun_number
, iocommand
.LUN_info
.LunAddrBytes
,
4934 sizeof(request
.lun_number
));
4935 memcpy(request
.cdb
, iocommand
.Request
.CDB
, iocommand
.Request
.CDBLen
);
4936 request
.additional_cdb_bytes_usage
= SOP_ADDITIONAL_CDB_BYTES_0
;
4938 switch (iocommand
.Request
.Type
.Direction
) {
4940 request
.data_direction
= SOP_NO_DIRECTION_FLAG
;
4943 request
.data_direction
= SOP_WRITE_FLAG
;
4946 request
.data_direction
= SOP_READ_FLAG
;
4950 request
.task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4952 if (iocommand
.buf_size
> 0) {
4953 put_unaligned_le32(iocommand
.buf_size
, &request
.buffer_length
);
4955 rc
= pqi_map_single(ctrl_info
->pci_dev
,
4956 &request
.sg_descriptors
[0], kernel_buffer
,
4957 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
4961 iu_length
+= sizeof(request
.sg_descriptors
[0]);
4964 put_unaligned_le16(iu_length
, &request
.header
.iu_length
);
4966 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
4967 PQI_SYNC_FLAGS_INTERRUPTABLE
, &pqi_error_info
, NO_TIMEOUT
);
4969 if (iocommand
.buf_size
> 0)
4970 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
4971 PCI_DMA_BIDIRECTIONAL
);
4973 memset(&iocommand
.error_info
, 0, sizeof(iocommand
.error_info
));
4976 pqi_error_info_to_ciss(&pqi_error_info
, &ciss_error_info
);
4977 iocommand
.error_info
.ScsiStatus
= ciss_error_info
.scsi_status
;
4978 iocommand
.error_info
.CommandStatus
=
4979 ciss_error_info
.command_status
;
4980 sense_data_length
= ciss_error_info
.sense_data_length
;
4981 if (sense_data_length
) {
4982 if (sense_data_length
>
4983 sizeof(iocommand
.error_info
.SenseInfo
))
4985 sizeof(iocommand
.error_info
.SenseInfo
);
4986 memcpy(iocommand
.error_info
.SenseInfo
,
4987 pqi_error_info
.data
, sense_data_length
);
4988 iocommand
.error_info
.SenseLen
= sense_data_length
;
4992 if (copy_to_user(arg
, &iocommand
, sizeof(iocommand
))) {
4997 if (rc
== 0 && iocommand
.buf_size
> 0 &&
4998 (iocommand
.Request
.Type
.Direction
& XFER_READ
)) {
4999 if (copy_to_user(iocommand
.buf
, kernel_buffer
,
5000 iocommand
.buf_size
)) {
5006 kfree(kernel_buffer
);
5011 static int pqi_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
5014 struct pqi_ctrl_info
*ctrl_info
;
5016 ctrl_info
= shost_to_hba(sdev
->host
);
5019 case CCISS_DEREGDISK
:
5020 case CCISS_REGNEWDISK
:
5022 rc
= pqi_scan_scsi_devices(ctrl_info
);
5024 case CCISS_GETPCIINFO
:
5025 rc
= pqi_getpciinfo_ioctl(ctrl_info
, arg
);
5027 case CCISS_GETDRIVVER
:
5028 rc
= pqi_getdrivver_ioctl(arg
);
5030 case CCISS_PASSTHRU
:
5031 rc
= pqi_passthru_ioctl(ctrl_info
, arg
);
5041 static ssize_t
pqi_version_show(struct device
*dev
,
5042 struct device_attribute
*attr
, char *buffer
)
5045 struct Scsi_Host
*shost
;
5046 struct pqi_ctrl_info
*ctrl_info
;
5048 shost
= class_to_shost(dev
);
5049 ctrl_info
= shost_to_hba(shost
);
5051 count
+= snprintf(buffer
+ count
, PAGE_SIZE
- count
,
5052 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP
);
5054 count
+= snprintf(buffer
+ count
, PAGE_SIZE
- count
,
5055 "firmware: %s\n", ctrl_info
->firmware_version
);
5060 static ssize_t
pqi_host_rescan_store(struct device
*dev
,
5061 struct device_attribute
*attr
, const char *buffer
, size_t count
)
5063 struct Scsi_Host
*shost
= class_to_shost(dev
);
5065 pqi_scan_start(shost
);
5070 static DEVICE_ATTR(version
, S_IRUGO
, pqi_version_show
, NULL
);
5071 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, pqi_host_rescan_store
);
5073 static struct device_attribute
*pqi_shost_attrs
[] = {
5079 static ssize_t
pqi_sas_address_show(struct device
*dev
,
5080 struct device_attribute
*attr
, char *buffer
)
5082 struct pqi_ctrl_info
*ctrl_info
;
5083 struct scsi_device
*sdev
;
5084 struct pqi_scsi_dev
*device
;
5085 unsigned long flags
;
5088 sdev
= to_scsi_device(dev
);
5089 ctrl_info
= shost_to_hba(sdev
->host
);
5091 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
5093 device
= sdev
->hostdata
;
5094 if (pqi_is_logical_device(device
)) {
5095 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
,
5099 sas_address
= device
->sas_address
;
5101 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
5103 return snprintf(buffer
, PAGE_SIZE
, "0x%016llx\n", sas_address
);
5106 static ssize_t
pqi_ssd_smart_path_enabled_show(struct device
*dev
,
5107 struct device_attribute
*attr
, char *buffer
)
5109 struct pqi_ctrl_info
*ctrl_info
;
5110 struct scsi_device
*sdev
;
5111 struct pqi_scsi_dev
*device
;
5112 unsigned long flags
;
5114 sdev
= to_scsi_device(dev
);
5115 ctrl_info
= shost_to_hba(sdev
->host
);
5117 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
5119 device
= sdev
->hostdata
;
5120 buffer
[0] = device
->offload_enabled
? '1' : '0';
5124 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
5129 static DEVICE_ATTR(sas_address
, S_IRUGO
, pqi_sas_address_show
, NULL
);
5130 static DEVICE_ATTR(ssd_smart_path_enabled
, S_IRUGO
,
5131 pqi_ssd_smart_path_enabled_show
, NULL
);
5133 static struct device_attribute
*pqi_sdev_attrs
[] = {
5134 &dev_attr_sas_address
,
5135 &dev_attr_ssd_smart_path_enabled
,
5139 static struct scsi_host_template pqi_driver_template
= {
5140 .module
= THIS_MODULE
,
5141 .name
= DRIVER_NAME_SHORT
,
5142 .proc_name
= DRIVER_NAME_SHORT
,
5143 .queuecommand
= pqi_scsi_queue_command
,
5144 .scan_start
= pqi_scan_start
,
5145 .scan_finished
= pqi_scan_finished
,
5147 .use_clustering
= ENABLE_CLUSTERING
,
5148 .eh_device_reset_handler
= pqi_eh_device_reset_handler
,
5150 .slave_alloc
= pqi_slave_alloc
,
5151 .slave_configure
= pqi_slave_configure
,
5152 .sdev_attrs
= pqi_sdev_attrs
,
5153 .shost_attrs
= pqi_shost_attrs
,
5156 static int pqi_register_scsi(struct pqi_ctrl_info
*ctrl_info
)
5159 struct Scsi_Host
*shost
;
5161 shost
= scsi_host_alloc(&pqi_driver_template
, sizeof(ctrl_info
));
5163 dev_err(&ctrl_info
->pci_dev
->dev
,
5164 "scsi_host_alloc failed for controller %u\n",
5165 ctrl_info
->ctrl_id
);
5170 shost
->n_io_port
= 0;
5171 shost
->this_id
= -1;
5172 shost
->max_channel
= PQI_MAX_BUS
;
5173 shost
->max_cmd_len
= MAX_COMMAND_SIZE
;
5174 shost
->max_lun
= ~0;
5176 shost
->max_sectors
= ctrl_info
->max_sectors
;
5177 shost
->can_queue
= ctrl_info
->scsi_ml_can_queue
;
5178 shost
->cmd_per_lun
= shost
->can_queue
;
5179 shost
->sg_tablesize
= ctrl_info
->sg_tablesize
;
5180 shost
->transportt
= pqi_sas_transport_template
;
5181 shost
->irq
= ctrl_info
->msix_vectors
[0];
5182 shost
->unique_id
= shost
->irq
;
5183 shost
->nr_hw_queues
= ctrl_info
->num_queue_groups
;
5184 shost
->hostdata
[0] = (unsigned long)ctrl_info
;
5186 rc
= scsi_add_host(shost
, &ctrl_info
->pci_dev
->dev
);
5188 dev_err(&ctrl_info
->pci_dev
->dev
,
5189 "scsi_add_host failed for controller %u\n",
5190 ctrl_info
->ctrl_id
);
5194 rc
= pqi_add_sas_host(shost
, ctrl_info
);
5196 dev_err(&ctrl_info
->pci_dev
->dev
,
5197 "add SAS host failed for controller %u\n",
5198 ctrl_info
->ctrl_id
);
5202 ctrl_info
->scsi_host
= shost
;
5207 scsi_remove_host(shost
);
5209 scsi_host_put(shost
);
5214 static void pqi_unregister_scsi(struct pqi_ctrl_info
*ctrl_info
)
5216 struct Scsi_Host
*shost
;
5218 pqi_delete_sas_host(ctrl_info
);
5220 shost
= ctrl_info
->scsi_host
;
5224 scsi_remove_host(shost
);
5225 scsi_host_put(shost
);
5228 #define PQI_RESET_ACTION_RESET 0x1
5230 #define PQI_RESET_TYPE_NO_RESET 0x0
5231 #define PQI_RESET_TYPE_SOFT_RESET 0x1
5232 #define PQI_RESET_TYPE_FIRM_RESET 0x2
5233 #define PQI_RESET_TYPE_HARD_RESET 0x3
5235 static int pqi_reset(struct pqi_ctrl_info
*ctrl_info
)
5240 reset_params
= (PQI_RESET_ACTION_RESET
<< 5) |
5241 PQI_RESET_TYPE_HARD_RESET
;
5243 writel(reset_params
,
5244 &ctrl_info
->pqi_registers
->device_reset
);
5246 rc
= pqi_wait_for_pqi_mode_ready(ctrl_info
);
5248 dev_err(&ctrl_info
->pci_dev
->dev
,
5249 "PQI reset failed\n");
5254 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info
*ctrl_info
)
5257 struct bmic_identify_controller
*identify
;
5259 identify
= kmalloc(sizeof(*identify
), GFP_KERNEL
);
5263 rc
= pqi_identify_controller(ctrl_info
, identify
);
5267 memcpy(ctrl_info
->firmware_version
, identify
->firmware_version
,
5268 sizeof(identify
->firmware_version
));
5269 ctrl_info
->firmware_version
[sizeof(identify
->firmware_version
)] = '\0';
5270 snprintf(ctrl_info
->firmware_version
+
5271 strlen(ctrl_info
->firmware_version
),
5272 sizeof(ctrl_info
->firmware_version
),
5273 "-%u", get_unaligned_le16(&identify
->firmware_build_number
));
5281 static int pqi_kdump_init(struct pqi_ctrl_info
*ctrl_info
)
5283 if (!sis_is_firmware_running(ctrl_info
))
5286 if (pqi_get_ctrl_mode(ctrl_info
) == PQI_MODE
) {
5287 sis_disable_msix(ctrl_info
);
5288 if (pqi_reset(ctrl_info
) == 0)
5289 sis_reenable_sis_mode(ctrl_info
);
5295 static int pqi_ctrl_init(struct pqi_ctrl_info
*ctrl_info
)
5299 if (reset_devices
) {
5300 rc
= pqi_kdump_init(ctrl_info
);
5306 * When the controller comes out of reset, it is always running
5307 * in legacy SIS mode. This is so that it can be compatible
5308 * with legacy drivers shipped with OSes. So we have to talk
5309 * to it using SIS commands at first. Once we are satisified
5310 * that the controller supports PQI, we transition it into PQI
5315 * Wait until the controller is ready to start accepting SIS
5318 rc
= sis_wait_for_ctrl_ready(ctrl_info
);
5320 dev_err(&ctrl_info
->pci_dev
->dev
,
5321 "error initializing SIS interface\n");
5326 * Get the controller properties. This allows us to determine
5327 * whether or not it supports PQI mode.
5329 rc
= sis_get_ctrl_properties(ctrl_info
);
5331 dev_err(&ctrl_info
->pci_dev
->dev
,
5332 "error obtaining controller properties\n");
5336 rc
= sis_get_pqi_capabilities(ctrl_info
);
5338 dev_err(&ctrl_info
->pci_dev
->dev
,
5339 "error obtaining controller capabilities\n");
5343 if (ctrl_info
->max_outstanding_requests
> PQI_MAX_OUTSTANDING_REQUESTS
)
5344 ctrl_info
->max_outstanding_requests
=
5345 PQI_MAX_OUTSTANDING_REQUESTS
;
5347 pqi_calculate_io_resources(ctrl_info
);
5349 rc
= pqi_alloc_error_buffer(ctrl_info
);
5351 dev_err(&ctrl_info
->pci_dev
->dev
,
5352 "failed to allocate PQI error buffer\n");
5357 * If the function we are about to call succeeds, the
5358 * controller will transition from legacy SIS mode
5361 rc
= sis_init_base_struct_addr(ctrl_info
);
5363 dev_err(&ctrl_info
->pci_dev
->dev
,
5364 "error initializing PQI mode\n");
5368 /* Wait for the controller to complete the SIS -> PQI transition. */
5369 rc
= pqi_wait_for_pqi_mode_ready(ctrl_info
);
5371 dev_err(&ctrl_info
->pci_dev
->dev
,
5372 "transition to PQI mode failed\n");
5376 /* From here on, we are running in PQI mode. */
5377 ctrl_info
->pqi_mode_enabled
= true;
5378 pqi_save_ctrl_mode(ctrl_info
, PQI_MODE
);
5380 rc
= pqi_alloc_admin_queues(ctrl_info
);
5382 dev_err(&ctrl_info
->pci_dev
->dev
,
5383 "error allocating admin queues\n");
5387 rc
= pqi_create_admin_queues(ctrl_info
);
5389 dev_err(&ctrl_info
->pci_dev
->dev
,
5390 "error creating admin queues\n");
5394 rc
= pqi_report_device_capability(ctrl_info
);
5396 dev_err(&ctrl_info
->pci_dev
->dev
,
5397 "obtaining device capability failed\n");
5401 rc
= pqi_validate_device_capability(ctrl_info
);
5405 pqi_calculate_queue_resources(ctrl_info
);
5407 rc
= pqi_enable_msix_interrupts(ctrl_info
);
5411 if (ctrl_info
->num_msix_vectors_enabled
< ctrl_info
->num_queue_groups
) {
5412 ctrl_info
->max_msix_vectors
=
5413 ctrl_info
->num_msix_vectors_enabled
;
5414 pqi_calculate_queue_resources(ctrl_info
);
5417 rc
= pqi_alloc_io_resources(ctrl_info
);
5421 rc
= pqi_alloc_operational_queues(ctrl_info
);
5425 pqi_init_operational_queues(ctrl_info
);
5427 rc
= pqi_request_irqs(ctrl_info
);
5431 pqi_irq_set_affinity_hint(ctrl_info
);
5433 rc
= pqi_create_queues(ctrl_info
);
5437 sis_enable_msix(ctrl_info
);
5439 rc
= pqi_configure_events(ctrl_info
);
5441 dev_err(&ctrl_info
->pci_dev
->dev
,
5442 "error configuring events\n");
5446 pqi_start_heartbeat_timer(ctrl_info
);
5448 ctrl_info
->controller_online
= true;
5450 /* Register with the SCSI subsystem. */
5451 rc
= pqi_register_scsi(ctrl_info
);
5455 rc
= pqi_get_ctrl_firmware_version(ctrl_info
);
5457 dev_err(&ctrl_info
->pci_dev
->dev
,
5458 "error obtaining firmware version\n");
5462 rc
= pqi_write_driver_version_to_host_wellness(ctrl_info
);
5464 dev_err(&ctrl_info
->pci_dev
->dev
,
5465 "error updating host wellness\n");
5469 pqi_schedule_update_time_worker(ctrl_info
);
5471 pqi_scan_scsi_devices(ctrl_info
);
5476 static int pqi_pci_init(struct pqi_ctrl_info
*ctrl_info
)
5481 rc
= pci_enable_device(ctrl_info
->pci_dev
);
5483 dev_err(&ctrl_info
->pci_dev
->dev
,
5484 "failed to enable PCI device\n");
5488 if (sizeof(dma_addr_t
) > 4)
5489 mask
= DMA_BIT_MASK(64);
5491 mask
= DMA_BIT_MASK(32);
5493 rc
= dma_set_mask(&ctrl_info
->pci_dev
->dev
, mask
);
5495 dev_err(&ctrl_info
->pci_dev
->dev
, "failed to set DMA mask\n");
5496 goto disable_device
;
5499 rc
= pci_request_regions(ctrl_info
->pci_dev
, DRIVER_NAME_SHORT
);
5501 dev_err(&ctrl_info
->pci_dev
->dev
,
5502 "failed to obtain PCI resources\n");
5503 goto disable_device
;
5506 ctrl_info
->iomem_base
= ioremap_nocache(pci_resource_start(
5507 ctrl_info
->pci_dev
, 0),
5508 sizeof(struct pqi_ctrl_registers
));
5509 if (!ctrl_info
->iomem_base
) {
5510 dev_err(&ctrl_info
->pci_dev
->dev
,
5511 "failed to map memory for controller registers\n");
5513 goto release_regions
;
5516 ctrl_info
->registers
= ctrl_info
->iomem_base
;
5517 ctrl_info
->pqi_registers
= &ctrl_info
->registers
->pqi_registers
;
5519 /* Enable bus mastering. */
5520 pci_set_master(ctrl_info
->pci_dev
);
5522 pci_set_drvdata(ctrl_info
->pci_dev
, ctrl_info
);
5527 pci_release_regions(ctrl_info
->pci_dev
);
5529 pci_disable_device(ctrl_info
->pci_dev
);
5534 static void pqi_cleanup_pci_init(struct pqi_ctrl_info
*ctrl_info
)
5536 iounmap(ctrl_info
->iomem_base
);
5537 pci_release_regions(ctrl_info
->pci_dev
);
5538 pci_disable_device(ctrl_info
->pci_dev
);
5539 pci_set_drvdata(ctrl_info
->pci_dev
, NULL
);
5542 static struct pqi_ctrl_info
*pqi_alloc_ctrl_info(int numa_node
)
5544 struct pqi_ctrl_info
*ctrl_info
;
5546 ctrl_info
= kzalloc_node(sizeof(struct pqi_ctrl_info
),
5547 GFP_KERNEL
, numa_node
);
5551 mutex_init(&ctrl_info
->scan_mutex
);
5553 INIT_LIST_HEAD(&ctrl_info
->scsi_device_list
);
5554 spin_lock_init(&ctrl_info
->scsi_device_list_lock
);
5556 INIT_WORK(&ctrl_info
->event_work
, pqi_event_worker
);
5557 atomic_set(&ctrl_info
->num_interrupts
, 0);
5559 INIT_DELAYED_WORK(&ctrl_info
->rescan_work
, pqi_rescan_worker
);
5560 INIT_DELAYED_WORK(&ctrl_info
->update_time_work
, pqi_update_time_worker
);
5562 sema_init(&ctrl_info
->sync_request_sem
,
5563 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS
);
5564 sema_init(&ctrl_info
->lun_reset_sem
, PQI_RESERVED_IO_SLOTS_LUN_RESET
);
5566 ctrl_info
->ctrl_id
= atomic_inc_return(&pqi_controller_count
) - 1;
5567 ctrl_info
->max_msix_vectors
= PQI_MAX_MSIX_VECTORS
;
5572 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info
*ctrl_info
)
5577 static void pqi_free_interrupts(struct pqi_ctrl_info
*ctrl_info
)
5579 pqi_irq_unset_affinity_hint(ctrl_info
);
5580 pqi_free_irqs(ctrl_info
);
5581 if (ctrl_info
->num_msix_vectors_enabled
)
5582 pci_disable_msix(ctrl_info
->pci_dev
);
5585 static void pqi_free_ctrl_resources(struct pqi_ctrl_info
*ctrl_info
)
5587 pqi_stop_heartbeat_timer(ctrl_info
);
5588 pqi_free_interrupts(ctrl_info
);
5589 if (ctrl_info
->queue_memory_base
)
5590 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
5591 ctrl_info
->queue_memory_length
,
5592 ctrl_info
->queue_memory_base
,
5593 ctrl_info
->queue_memory_base_dma_handle
);
5594 if (ctrl_info
->admin_queue_memory_base
)
5595 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
5596 ctrl_info
->admin_queue_memory_length
,
5597 ctrl_info
->admin_queue_memory_base
,
5598 ctrl_info
->admin_queue_memory_base_dma_handle
);
5599 pqi_free_all_io_requests(ctrl_info
);
5600 if (ctrl_info
->error_buffer
)
5601 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
5602 ctrl_info
->error_buffer_length
,
5603 ctrl_info
->error_buffer
,
5604 ctrl_info
->error_buffer_dma_handle
);
5605 if (ctrl_info
->iomem_base
)
5606 pqi_cleanup_pci_init(ctrl_info
);
5607 pqi_free_ctrl_info(ctrl_info
);
5610 static void pqi_remove_ctrl(struct pqi_ctrl_info
*ctrl_info
)
5614 if (ctrl_info
->controller_online
) {
5615 cancel_delayed_work_sync(&ctrl_info
->rescan_work
);
5616 cancel_delayed_work_sync(&ctrl_info
->update_time_work
);
5617 pqi_remove_all_scsi_devices(ctrl_info
);
5618 pqi_unregister_scsi(ctrl_info
);
5619 ctrl_info
->controller_online
= false;
5621 if (ctrl_info
->pqi_mode_enabled
) {
5622 sis_disable_msix(ctrl_info
);
5623 rc
= pqi_reset(ctrl_info
);
5625 sis_reenable_sis_mode(ctrl_info
);
5627 pqi_free_ctrl_resources(ctrl_info
);
5630 static void pqi_print_ctrl_info(struct pci_dev
*pdev
,
5631 const struct pci_device_id
*id
)
5633 char *ctrl_description
;
5635 if (id
->driver_data
) {
5636 ctrl_description
= (char *)id
->driver_data
;
5638 switch (id
->subvendor
) {
5639 case PCI_VENDOR_ID_HP
:
5640 ctrl_description
= hpe_branded_controller
;
5642 case PCI_VENDOR_ID_ADAPTEC2
:
5644 ctrl_description
= microsemi_branded_controller
;
5649 dev_info(&pdev
->dev
, "%s found\n", ctrl_description
);
5652 static int pqi_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
5656 struct pqi_ctrl_info
*ctrl_info
;
5658 pqi_print_ctrl_info(pdev
, id
);
5660 if (pqi_disable_device_id_wildcards
&&
5661 id
->subvendor
== PCI_ANY_ID
&&
5662 id
->subdevice
== PCI_ANY_ID
) {
5663 dev_warn(&pdev
->dev
,
5664 "controller not probed because device ID wildcards are disabled\n");
5668 if (id
->subvendor
== PCI_ANY_ID
|| id
->subdevice
== PCI_ANY_ID
)
5669 dev_warn(&pdev
->dev
,
5670 "controller device ID matched using wildcards\n");
5672 node
= dev_to_node(&pdev
->dev
);
5673 if (node
== NUMA_NO_NODE
)
5674 set_dev_node(&pdev
->dev
, 0);
5676 ctrl_info
= pqi_alloc_ctrl_info(node
);
5679 "failed to allocate controller info block\n");
5683 ctrl_info
->pci_dev
= pdev
;
5685 rc
= pqi_pci_init(ctrl_info
);
5689 rc
= pqi_ctrl_init(ctrl_info
);
5696 pqi_remove_ctrl(ctrl_info
);
5701 static void pqi_pci_remove(struct pci_dev
*pdev
)
5703 struct pqi_ctrl_info
*ctrl_info
;
5705 ctrl_info
= pci_get_drvdata(pdev
);
5709 pqi_remove_ctrl(ctrl_info
);
5712 static void pqi_shutdown(struct pci_dev
*pdev
)
5715 struct pqi_ctrl_info
*ctrl_info
;
5717 ctrl_info
= pci_get_drvdata(pdev
);
5722 * Write all data in the controller's battery-backed cache to
5725 rc
= pqi_flush_cache(ctrl_info
);
5730 dev_warn(&pdev
->dev
,
5731 "unable to flush controller cache\n");
5734 /* Define the PCI IDs for the controllers that we support. */
5735 static const struct pci_device_id pqi_pci_id_table
[] = {
5737 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5738 PCI_VENDOR_ID_ADAPTEC2
, 0x0110)
5741 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5742 PCI_VENDOR_ID_HP
, 0x0600)
5745 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5746 PCI_VENDOR_ID_HP
, 0x0601)
5749 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5750 PCI_VENDOR_ID_HP
, 0x0602)
5753 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5754 PCI_VENDOR_ID_HP
, 0x0603)
5757 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5758 PCI_VENDOR_ID_HP
, 0x0650)
5761 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5762 PCI_VENDOR_ID_HP
, 0x0651)
5765 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5766 PCI_VENDOR_ID_HP
, 0x0652)
5769 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5770 PCI_VENDOR_ID_HP
, 0x0653)
5773 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5774 PCI_VENDOR_ID_HP
, 0x0654)
5777 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5778 PCI_VENDOR_ID_HP
, 0x0655)
5781 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5782 PCI_VENDOR_ID_HP
, 0x0700)
5785 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5786 PCI_VENDOR_ID_HP
, 0x0701)
5789 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5790 PCI_VENDOR_ID_ADAPTEC2
, 0x0800)
5793 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5794 PCI_VENDOR_ID_ADAPTEC2
, 0x0801)
5797 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5798 PCI_VENDOR_ID_ADAPTEC2
, 0x0802)
5801 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5802 PCI_VENDOR_ID_ADAPTEC2
, 0x0803)
5805 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5806 PCI_VENDOR_ID_ADAPTEC2
, 0x0804)
5809 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5810 PCI_VENDOR_ID_ADAPTEC2
, 0x0805)
5813 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5814 PCI_VENDOR_ID_ADAPTEC2
, 0x0900)
5817 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5818 PCI_VENDOR_ID_ADAPTEC2
, 0x0901)
5821 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5822 PCI_VENDOR_ID_ADAPTEC2
, 0x0902)
5825 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5826 PCI_VENDOR_ID_ADAPTEC2
, 0x0903)
5829 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5830 PCI_VENDOR_ID_ADAPTEC2
, 0x0904)
5833 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5834 PCI_VENDOR_ID_ADAPTEC2
, 0x0905)
5837 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5838 PCI_VENDOR_ID_ADAPTEC2
, 0x0906)
5841 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5842 PCI_VENDOR_ID_HP
, 0x1001)
5845 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5846 PCI_VENDOR_ID_HP
, 0x1100)
5849 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5850 PCI_VENDOR_ID_HP
, 0x1101)
5853 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5854 PCI_VENDOR_ID_HP
, 0x1102)
5857 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5858 PCI_VENDOR_ID_HP
, 0x1150)
5861 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5862 PCI_ANY_ID
, PCI_ANY_ID
)
5867 MODULE_DEVICE_TABLE(pci
, pqi_pci_id_table
);
5869 static struct pci_driver pqi_pci_driver
= {
5870 .name
= DRIVER_NAME_SHORT
,
5871 .id_table
= pqi_pci_id_table
,
5872 .probe
= pqi_pci_probe
,
5873 .remove
= pqi_pci_remove
,
5874 .shutdown
= pqi_shutdown
,
5877 static int __init
pqi_init(void)
5881 pr_info(DRIVER_NAME
"\n");
5883 pqi_sas_transport_template
=
5884 sas_attach_transport(&pqi_sas_transport_functions
);
5885 if (!pqi_sas_transport_template
)
5888 rc
= pci_register_driver(&pqi_pci_driver
);
5890 sas_release_transport(pqi_sas_transport_template
);
5895 static void __exit
pqi_cleanup(void)
5897 pci_unregister_driver(&pqi_pci_driver
);
5898 sas_release_transport(pqi_sas_transport_template
);
5901 module_init(pqi_init
);
5902 module_exit(pqi_cleanup
);
5904 static void __attribute__((unused
)) verify_structures(void)
5906 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5907 sis_host_to_ctrl_doorbell
) != 0x20);
5908 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5909 sis_interrupt_mask
) != 0x34);
5910 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5911 sis_ctrl_to_host_doorbell
) != 0x9c);
5912 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5913 sis_ctrl_to_host_doorbell_clear
) != 0xa0);
5914 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5915 sis_driver_scratch
) != 0xb0);
5916 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5917 sis_firmware_status
) != 0xbc);
5918 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5919 sis_mailbox
) != 0x1000);
5920 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5921 pqi_registers
) != 0x4000);
5923 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5925 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5927 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5928 response_queue_id
) != 0x4);
5929 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5931 BUILD_BUG_ON(sizeof(struct pqi_iu_header
) != 0x8);
5933 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5935 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5936 service_response
) != 0x1);
5937 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5938 data_present
) != 0x2);
5939 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5941 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5942 residual_count
) != 0x4);
5943 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5944 data_length
) != 0x8);
5945 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5947 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5949 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info
) != 0x10c);
5951 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5952 data_in_result
) != 0x0);
5953 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5954 data_out_result
) != 0x1);
5955 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5957 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5959 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5960 status_qualifier
) != 0x6);
5961 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5962 sense_data_length
) != 0x8);
5963 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5964 response_data_length
) != 0xa);
5965 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5966 data_in_transferred
) != 0xc);
5967 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5968 data_out_transferred
) != 0x10);
5969 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5971 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info
) != 0x114);
5973 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5975 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5976 function_and_status_code
) != 0x8);
5977 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5978 max_admin_iq_elements
) != 0x10);
5979 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5980 max_admin_oq_elements
) != 0x11);
5981 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5982 admin_iq_element_length
) != 0x12);
5983 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5984 admin_oq_element_length
) != 0x13);
5985 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5986 max_reset_timeout
) != 0x14);
5987 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5988 legacy_intx_status
) != 0x18);
5989 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5990 legacy_intx_mask_set
) != 0x1c);
5991 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5992 legacy_intx_mask_clear
) != 0x20);
5993 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5994 device_status
) != 0x40);
5995 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5996 admin_iq_pi_offset
) != 0x48);
5997 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5998 admin_oq_ci_offset
) != 0x50);
5999 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6000 admin_iq_element_array_addr
) != 0x58);
6001 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6002 admin_oq_element_array_addr
) != 0x60);
6003 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6004 admin_iq_ci_addr
) != 0x68);
6005 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6006 admin_oq_pi_addr
) != 0x70);
6007 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6008 admin_iq_num_elements
) != 0x78);
6009 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6010 admin_oq_num_elements
) != 0x79);
6011 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6012 admin_queue_int_msg_num
) != 0x7a);
6013 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6014 device_error
) != 0x80);
6015 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6016 error_details
) != 0x88);
6017 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6018 device_reset
) != 0x90);
6019 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
6020 power_action
) != 0x94);
6021 BUILD_BUG_ON(sizeof(struct pqi_device_registers
) != 0x100);
6023 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6024 header
.iu_type
) != 0);
6025 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6026 header
.iu_length
) != 2);
6027 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6028 header
.work_area
) != 6);
6029 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6031 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6032 function_code
) != 10);
6033 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6034 data
.report_device_capability
.buffer_length
) != 44);
6035 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6036 data
.report_device_capability
.sg_descriptor
) != 48);
6037 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6038 data
.create_operational_iq
.queue_id
) != 12);
6039 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6040 data
.create_operational_iq
.element_array_addr
) != 16);
6041 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6042 data
.create_operational_iq
.ci_addr
) != 24);
6043 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6044 data
.create_operational_iq
.num_elements
) != 32);
6045 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6046 data
.create_operational_iq
.element_length
) != 34);
6047 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6048 data
.create_operational_iq
.queue_protocol
) != 36);
6049 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6050 data
.create_operational_oq
.queue_id
) != 12);
6051 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6052 data
.create_operational_oq
.element_array_addr
) != 16);
6053 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6054 data
.create_operational_oq
.pi_addr
) != 24);
6055 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6056 data
.create_operational_oq
.num_elements
) != 32);
6057 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6058 data
.create_operational_oq
.element_length
) != 34);
6059 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6060 data
.create_operational_oq
.queue_protocol
) != 36);
6061 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6062 data
.create_operational_oq
.int_msg_num
) != 40);
6063 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6064 data
.create_operational_oq
.coalescing_count
) != 42);
6065 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6066 data
.create_operational_oq
.min_coalescing_time
) != 44);
6067 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6068 data
.create_operational_oq
.max_coalescing_time
) != 48);
6069 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6070 data
.delete_operational_queue
.queue_id
) != 12);
6071 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request
) != 64);
6072 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request
,
6073 data
.create_operational_iq
) != 64 - 11);
6074 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request
,
6075 data
.create_operational_oq
) != 64 - 11);
6076 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request
,
6077 data
.delete_operational_queue
) != 64 - 11);
6079 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6080 header
.iu_type
) != 0);
6081 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6082 header
.iu_length
) != 2);
6083 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6084 header
.work_area
) != 6);
6085 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6087 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6088 function_code
) != 10);
6089 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6091 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6092 data
.create_operational_iq
.status_descriptor
) != 12);
6093 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6094 data
.create_operational_iq
.iq_pi_offset
) != 16);
6095 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6096 data
.create_operational_oq
.status_descriptor
) != 12);
6097 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6098 data
.create_operational_oq
.oq_ci_offset
) != 16);
6099 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response
) != 64);
6101 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6102 header
.iu_type
) != 0);
6103 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6104 header
.iu_length
) != 2);
6105 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6106 header
.response_queue_id
) != 4);
6107 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6108 header
.work_area
) != 6);
6109 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6111 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6113 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6114 buffer_length
) != 12);
6115 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6117 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6118 protocol_specific
) != 24);
6119 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6120 error_index
) != 27);
6121 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6123 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6124 sg_descriptors
) != 64);
6125 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request
) !=
6126 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
6128 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6129 header
.iu_type
) != 0);
6130 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6131 header
.iu_length
) != 2);
6132 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6133 header
.response_queue_id
) != 4);
6134 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6135 header
.work_area
) != 6);
6136 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6138 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6140 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6141 buffer_length
) != 16);
6142 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6143 data_encryption_key_index
) != 22);
6144 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6145 encrypt_tweak_lower
) != 24);
6146 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6147 encrypt_tweak_upper
) != 28);
6148 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6150 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6151 error_index
) != 48);
6152 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6153 num_sg_descriptors
) != 50);
6154 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6156 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6158 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6159 sg_descriptors
) != 64);
6160 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request
) !=
6161 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
6163 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6164 header
.iu_type
) != 0);
6165 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6166 header
.iu_length
) != 2);
6167 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6169 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6170 error_index
) != 10);
6172 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6173 header
.iu_type
) != 0);
6174 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6175 header
.iu_length
) != 2);
6176 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6177 header
.response_queue_id
) != 4);
6178 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6180 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6181 data
.report_event_configuration
.buffer_length
) != 12);
6182 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6183 data
.report_event_configuration
.sg_descriptors
) != 16);
6184 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6185 data
.set_event_configuration
.global_event_oq_id
) != 10);
6186 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6187 data
.set_event_configuration
.buffer_length
) != 12);
6188 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6189 data
.set_event_configuration
.sg_descriptors
) != 16);
6191 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor
,
6192 max_inbound_iu_length
) != 6);
6193 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor
,
6194 max_outbound_iu_length
) != 14);
6195 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor
) != 16);
6197 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6199 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6200 iq_arbitration_priority_support_bitmask
) != 8);
6201 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6202 maximum_aw_a
) != 9);
6203 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6204 maximum_aw_b
) != 10);
6205 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6206 maximum_aw_c
) != 11);
6207 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6208 max_inbound_queues
) != 16);
6209 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6210 max_elements_per_iq
) != 18);
6211 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6212 max_iq_element_length
) != 24);
6213 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6214 min_iq_element_length
) != 26);
6215 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6216 max_outbound_queues
) != 30);
6217 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6218 max_elements_per_oq
) != 32);
6219 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6220 intr_coalescing_time_granularity
) != 34);
6221 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6222 max_oq_element_length
) != 36);
6223 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6224 min_oq_element_length
) != 38);
6225 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6226 iu_layer_descriptors
) != 64);
6227 BUILD_BUG_ON(sizeof(struct pqi_device_capability
) != 576);
6229 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor
,
6231 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor
,
6233 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor
) != 4);
6235 BUILD_BUG_ON(offsetof(struct pqi_event_config
,
6236 num_event_descriptors
) != 2);
6237 BUILD_BUG_ON(offsetof(struct pqi_event_config
,
6240 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6241 header
.iu_type
) != 0);
6242 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6243 header
.iu_length
) != 2);
6244 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6246 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6248 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6249 additional_event_id
) != 12);
6250 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6252 BUILD_BUG_ON(sizeof(struct pqi_event_response
) != 32);
6254 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6255 header
.iu_type
) != 0);
6256 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6257 header
.iu_length
) != 2);
6258 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6260 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6262 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6263 additional_event_id
) != 12);
6264 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request
) != 16);
6266 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6267 header
.iu_type
) != 0);
6268 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6269 header
.iu_length
) != 2);
6270 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6272 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6274 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6276 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6277 protocol_specific
) != 24);
6278 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6279 outbound_queue_id_to_manage
) != 26);
6280 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6281 request_id_to_manage
) != 28);
6282 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6283 task_management_function
) != 30);
6284 BUILD_BUG_ON(sizeof(struct pqi_task_management_request
) != 32);
6286 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6287 header
.iu_type
) != 0);
6288 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6289 header
.iu_length
) != 2);
6290 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6292 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6294 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6295 additional_response_info
) != 12);
6296 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6297 response_code
) != 15);
6298 BUILD_BUG_ON(sizeof(struct pqi_task_management_response
) != 16);
6300 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6301 configured_logical_drive_count
) != 0);
6302 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6303 configuration_signature
) != 1);
6304 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6305 firmware_version
) != 5);
6306 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6307 extended_logical_unit_count
) != 154);
6308 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6309 firmware_build_number
) != 190);
6310 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6311 controller_mode
) != 292);
6313 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS
> 255);
6314 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS
> 255);
6315 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH
%
6316 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6317 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH
%
6318 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6319 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
> 1048560);
6320 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
%
6321 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6322 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
> 1048560);
6323 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
%
6324 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6326 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS
>= PQI_MAX_OUTSTANDING_REQUESTS
);