2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/cciss_ioctl.h>
28 #include <linux/blk-mq-pci.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_transport_sas.h>
34 #include <asm/unaligned.h>
36 #include "smartpqi_sis.h"
38 #if !defined(BUILD_TIMESTAMP)
39 #define BUILD_TIMESTAMP
42 #define DRIVER_VERSION "0.9.13-370"
43 #define DRIVER_MAJOR 0
44 #define DRIVER_MINOR 9
45 #define DRIVER_RELEASE 13
46 #define DRIVER_REVISION 370
48 #define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
49 #define DRIVER_NAME_SHORT "smartpqi"
51 MODULE_AUTHOR("Microsemi");
52 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
54 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
55 MODULE_VERSION(DRIVER_VERSION
);
56 MODULE_LICENSE("GPL");
58 #define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
60 static char *hpe_branded_controller
= "HPE Smart Array Controller";
61 static char *microsemi_branded_controller
= "Microsemi Smart Family Controller";
63 static void pqi_take_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
);
64 static int pqi_scan_scsi_devices(struct pqi_ctrl_info
*ctrl_info
);
65 static void pqi_scan_start(struct Scsi_Host
*shost
);
66 static void pqi_start_io(struct pqi_ctrl_info
*ctrl_info
,
67 struct pqi_queue_group
*queue_group
, enum pqi_io_path path
,
68 struct pqi_io_request
*io_request
);
69 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info
*ctrl_info
,
70 struct pqi_iu_header
*request
, unsigned int flags
,
71 struct pqi_raid_error_info
*error_info
, unsigned long timeout_msecs
);
72 static int pqi_aio_submit_io(struct pqi_ctrl_info
*ctrl_info
,
73 struct scsi_cmnd
*scmd
, u32 aio_handle
, u8
*cdb
,
74 unsigned int cdb_length
, struct pqi_queue_group
*queue_group
,
75 struct pqi_encryption_info
*encryption_info
);
77 /* for flags argument to pqi_submit_raid_request_synchronous() */
78 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
80 static struct scsi_transport_template
*pqi_sas_transport_template
;
82 static atomic_t pqi_controller_count
= ATOMIC_INIT(0);
84 static int pqi_disable_device_id_wildcards
;
85 module_param_named(disable_device_id_wildcards
,
86 pqi_disable_device_id_wildcards
, int, S_IRUGO
| S_IWUSR
);
87 MODULE_PARM_DESC(disable_device_id_wildcards
,
88 "Disable device ID wildcards.");
90 static char *raid_levels
[] = {
100 static char *pqi_raid_level_to_string(u8 raid_level
)
102 if (raid_level
< ARRAY_SIZE(raid_levels
))
103 return raid_levels
[raid_level
];
110 #define SA_RAID_1 2 /* also used for RAID 10 */
111 #define SA_RAID_5 3 /* also used for RAID 50 */
113 #define SA_RAID_6 5 /* also used for RAID 60 */
114 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
115 #define SA_RAID_MAX SA_RAID_ADM
116 #define SA_RAID_UNKNOWN 0xff
118 static inline void pqi_scsi_done(struct scsi_cmnd
*scmd
)
120 scmd
->scsi_done(scmd
);
123 static inline bool pqi_scsi3addr_equal(u8
*scsi3addr1
, u8
*scsi3addr2
)
125 return memcmp(scsi3addr1
, scsi3addr2
, 8) == 0;
128 static inline struct pqi_ctrl_info
*shost_to_hba(struct Scsi_Host
*shost
)
130 void *hostdata
= shost_priv(shost
);
132 return *((struct pqi_ctrl_info
**)hostdata
);
135 static inline bool pqi_is_logical_device(struct pqi_scsi_dev
*device
)
137 return !device
->is_physical_device
;
140 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
)
142 return !ctrl_info
->controller_online
;
145 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info
*ctrl_info
)
147 if (ctrl_info
->controller_online
)
148 if (!sis_is_firmware_running(ctrl_info
))
149 pqi_take_ctrl_offline(ctrl_info
);
152 static inline bool pqi_is_hba_lunid(u8
*scsi3addr
)
154 return pqi_scsi3addr_equal(scsi3addr
, RAID_CTLR_LUNID
);
157 static inline enum pqi_ctrl_mode
pqi_get_ctrl_mode(
158 struct pqi_ctrl_info
*ctrl_info
)
160 return sis_read_driver_scratch(ctrl_info
);
163 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info
*ctrl_info
,
164 enum pqi_ctrl_mode mode
)
166 sis_write_driver_scratch(ctrl_info
, mode
);
169 #define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
171 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info
*ctrl_info
)
173 schedule_delayed_work(&ctrl_info
->rescan_work
,
174 PQI_RESCAN_WORK_INTERVAL
);
177 static int pqi_map_single(struct pci_dev
*pci_dev
,
178 struct pqi_sg_descriptor
*sg_descriptor
, void *buffer
,
179 size_t buffer_length
, int data_direction
)
181 dma_addr_t bus_address
;
183 if (!buffer
|| buffer_length
== 0 || data_direction
== PCI_DMA_NONE
)
186 bus_address
= pci_map_single(pci_dev
, buffer
, buffer_length
,
188 if (pci_dma_mapping_error(pci_dev
, bus_address
))
191 put_unaligned_le64((u64
)bus_address
, &sg_descriptor
->address
);
192 put_unaligned_le32(buffer_length
, &sg_descriptor
->length
);
193 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
198 static void pqi_pci_unmap(struct pci_dev
*pci_dev
,
199 struct pqi_sg_descriptor
*descriptors
, int num_descriptors
,
204 if (data_direction
== PCI_DMA_NONE
)
207 for (i
= 0; i
< num_descriptors
; i
++)
208 pci_unmap_single(pci_dev
,
209 (dma_addr_t
)get_unaligned_le64(&descriptors
[i
].address
),
210 get_unaligned_le32(&descriptors
[i
].length
),
214 static int pqi_build_raid_path_request(struct pqi_ctrl_info
*ctrl_info
,
215 struct pqi_raid_path_request
*request
, u8 cmd
,
216 u8
*scsi3addr
, void *buffer
, size_t buffer_length
,
217 u16 vpd_page
, int *pci_direction
)
222 memset(request
, 0, sizeof(*request
));
224 request
->header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
225 put_unaligned_le16(offsetof(struct pqi_raid_path_request
,
226 sg_descriptors
[1]) - PQI_REQUEST_HEADER_LENGTH
,
227 &request
->header
.iu_length
);
228 put_unaligned_le32(buffer_length
, &request
->buffer_length
);
229 memcpy(request
->lun_number
, scsi3addr
, sizeof(request
->lun_number
));
230 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
231 request
->additional_cdb_bytes_usage
= SOP_ADDITIONAL_CDB_BYTES_0
;
237 request
->data_direction
= SOP_READ_FLAG
;
239 if (vpd_page
& VPD_PAGE
) {
241 cdb
[2] = (u8
)vpd_page
;
243 cdb
[4] = (u8
)buffer_length
;
245 case CISS_REPORT_LOG
:
246 case CISS_REPORT_PHYS
:
247 request
->data_direction
= SOP_READ_FLAG
;
249 if (cmd
== CISS_REPORT_PHYS
)
250 cdb
[1] = CISS_REPORT_PHYS_EXTENDED
;
252 cdb
[1] = CISS_REPORT_LOG_EXTENDED
;
253 put_unaligned_be32(buffer_length
, &cdb
[6]);
255 case CISS_GET_RAID_MAP
:
256 request
->data_direction
= SOP_READ_FLAG
;
258 cdb
[1] = CISS_GET_RAID_MAP
;
259 put_unaligned_be32(buffer_length
, &cdb
[6]);
262 request
->data_direction
= SOP_WRITE_FLAG
;
264 cdb
[6] = BMIC_CACHE_FLUSH
;
265 put_unaligned_be16(buffer_length
, &cdb
[7]);
267 case BMIC_IDENTIFY_CONTROLLER
:
268 case BMIC_IDENTIFY_PHYSICAL_DEVICE
:
269 request
->data_direction
= SOP_READ_FLAG
;
272 put_unaligned_be16(buffer_length
, &cdb
[7]);
274 case BMIC_WRITE_HOST_WELLNESS
:
275 request
->data_direction
= SOP_WRITE_FLAG
;
278 put_unaligned_be16(buffer_length
, &cdb
[7]);
281 dev_err(&ctrl_info
->pci_dev
->dev
, "unknown command 0x%c\n",
287 switch (request
->data_direction
) {
289 pci_dir
= PCI_DMA_FROMDEVICE
;
292 pci_dir
= PCI_DMA_TODEVICE
;
294 case SOP_NO_DIRECTION_FLAG
:
295 pci_dir
= PCI_DMA_NONE
;
298 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
302 *pci_direction
= pci_dir
;
304 return pqi_map_single(ctrl_info
->pci_dev
, &request
->sg_descriptors
[0],
305 buffer
, buffer_length
, pci_dir
);
308 static struct pqi_io_request
*pqi_alloc_io_request(
309 struct pqi_ctrl_info
*ctrl_info
)
311 struct pqi_io_request
*io_request
;
312 u16 i
= ctrl_info
->next_io_request_slot
; /* benignly racy */
315 io_request
= &ctrl_info
->io_request_pool
[i
];
316 if (atomic_inc_return(&io_request
->refcount
) == 1)
318 atomic_dec(&io_request
->refcount
);
319 i
= (i
+ 1) % ctrl_info
->max_io_slots
;
323 ctrl_info
->next_io_request_slot
= (i
+ 1) % ctrl_info
->max_io_slots
;
325 io_request
->scmd
= NULL
;
326 io_request
->status
= 0;
327 io_request
->error_info
= NULL
;
332 static void pqi_free_io_request(struct pqi_io_request
*io_request
)
334 atomic_dec(&io_request
->refcount
);
337 static int pqi_identify_controller(struct pqi_ctrl_info
*ctrl_info
,
338 struct bmic_identify_controller
*buffer
)
342 struct pqi_raid_path_request request
;
344 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
345 BMIC_IDENTIFY_CONTROLLER
, RAID_CTLR_LUNID
, buffer
,
346 sizeof(*buffer
), 0, &pci_direction
);
350 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
353 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
359 static int pqi_scsi_inquiry(struct pqi_ctrl_info
*ctrl_info
,
360 u8
*scsi3addr
, u16 vpd_page
, void *buffer
, size_t buffer_length
)
364 struct pqi_raid_path_request request
;
366 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
367 INQUIRY
, scsi3addr
, buffer
, buffer_length
, vpd_page
,
372 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
375 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
381 static int pqi_identify_physical_device(struct pqi_ctrl_info
*ctrl_info
,
382 struct pqi_scsi_dev
*device
,
383 struct bmic_identify_physical_device
*buffer
,
384 size_t buffer_length
)
388 u16 bmic_device_index
;
389 struct pqi_raid_path_request request
;
391 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
392 BMIC_IDENTIFY_PHYSICAL_DEVICE
, RAID_CTLR_LUNID
, buffer
,
393 buffer_length
, 0, &pci_direction
);
397 bmic_device_index
= CISS_GET_DRIVE_NUMBER(device
->scsi3addr
);
398 request
.cdb
[2] = (u8
)bmic_device_index
;
399 request
.cdb
[9] = (u8
)(bmic_device_index
>> 8);
401 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
402 0, NULL
, NO_TIMEOUT
);
404 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
410 #define SA_CACHE_FLUSH_BUFFER_LENGTH 4
412 static int pqi_flush_cache(struct pqi_ctrl_info
*ctrl_info
)
415 struct pqi_raid_path_request request
;
420 * Don't bother trying to flush the cache if the controller is
423 if (pqi_ctrl_offline(ctrl_info
))
426 buffer
= kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH
, GFP_KERNEL
);
430 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
431 SA_CACHE_FLUSH
, RAID_CTLR_LUNID
, buffer
,
432 SA_CACHE_FLUSH_BUFFER_LENGTH
, 0, &pci_direction
);
436 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
437 0, NULL
, NO_TIMEOUT
);
439 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
448 static int pqi_write_host_wellness(struct pqi_ctrl_info
*ctrl_info
,
449 void *buffer
, size_t buffer_length
)
452 struct pqi_raid_path_request request
;
455 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
456 BMIC_WRITE_HOST_WELLNESS
, RAID_CTLR_LUNID
, buffer
,
457 buffer_length
, 0, &pci_direction
);
461 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
462 0, NULL
, NO_TIMEOUT
);
464 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
472 struct bmic_host_wellness_driver_version
{
474 u8 driver_version_tag
[2];
475 __le16 driver_version_length
;
476 char driver_version
[32];
482 static int pqi_write_driver_version_to_host_wellness(
483 struct pqi_ctrl_info
*ctrl_info
)
486 struct bmic_host_wellness_driver_version
*buffer
;
487 size_t buffer_length
;
489 buffer_length
= sizeof(*buffer
);
491 buffer
= kmalloc(buffer_length
, GFP_KERNEL
);
495 buffer
->start_tag
[0] = '<';
496 buffer
->start_tag
[1] = 'H';
497 buffer
->start_tag
[2] = 'W';
498 buffer
->start_tag
[3] = '>';
499 buffer
->driver_version_tag
[0] = 'D';
500 buffer
->driver_version_tag
[1] = 'V';
501 put_unaligned_le16(sizeof(buffer
->driver_version
),
502 &buffer
->driver_version_length
);
503 strncpy(buffer
->driver_version
, DRIVER_VERSION
,
504 sizeof(buffer
->driver_version
) - 1);
505 buffer
->driver_version
[sizeof(buffer
->driver_version
) - 1] = '\0';
506 buffer
->end_tag
[0] = 'Z';
507 buffer
->end_tag
[1] = 'Z';
509 rc
= pqi_write_host_wellness(ctrl_info
, buffer
, buffer_length
);
518 struct bmic_host_wellness_time
{
523 u8 dont_write_tag
[2];
529 static int pqi_write_current_time_to_host_wellness(
530 struct pqi_ctrl_info
*ctrl_info
)
533 struct bmic_host_wellness_time
*buffer
;
534 size_t buffer_length
;
540 buffer_length
= sizeof(*buffer
);
542 buffer
= kmalloc(buffer_length
, GFP_KERNEL
);
546 buffer
->start_tag
[0] = '<';
547 buffer
->start_tag
[1] = 'H';
548 buffer
->start_tag
[2] = 'W';
549 buffer
->start_tag
[3] = '>';
550 buffer
->time_tag
[0] = 'T';
551 buffer
->time_tag
[1] = 'D';
552 put_unaligned_le16(sizeof(buffer
->time
),
553 &buffer
->time_length
);
555 do_gettimeofday(&time
);
556 local_time
= time
.tv_sec
- (sys_tz
.tz_minuteswest
* 60);
557 rtc_time64_to_tm(local_time
, &tm
);
558 year
= tm
.tm_year
+ 1900;
560 buffer
->time
[0] = bin2bcd(tm
.tm_hour
);
561 buffer
->time
[1] = bin2bcd(tm
.tm_min
);
562 buffer
->time
[2] = bin2bcd(tm
.tm_sec
);
564 buffer
->time
[4] = bin2bcd(tm
.tm_mon
+ 1);
565 buffer
->time
[5] = bin2bcd(tm
.tm_mday
);
566 buffer
->time
[6] = bin2bcd(year
/ 100);
567 buffer
->time
[7] = bin2bcd(year
% 100);
569 buffer
->dont_write_tag
[0] = 'D';
570 buffer
->dont_write_tag
[1] = 'W';
571 buffer
->end_tag
[0] = 'Z';
572 buffer
->end_tag
[1] = 'Z';
574 rc
= pqi_write_host_wellness(ctrl_info
, buffer
, buffer_length
);
581 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
583 static void pqi_update_time_worker(struct work_struct
*work
)
586 struct pqi_ctrl_info
*ctrl_info
;
588 ctrl_info
= container_of(to_delayed_work(work
), struct pqi_ctrl_info
,
591 rc
= pqi_write_current_time_to_host_wellness(ctrl_info
);
593 dev_warn(&ctrl_info
->pci_dev
->dev
,
594 "error updating time on controller\n");
596 schedule_delayed_work(&ctrl_info
->update_time_work
,
597 PQI_UPDATE_TIME_WORK_INTERVAL
);
600 static inline void pqi_schedule_update_time_worker(
601 struct pqi_ctrl_info
*ctrl_info
)
603 schedule_delayed_work(&ctrl_info
->update_time_work
, 0);
606 static int pqi_report_luns(struct pqi_ctrl_info
*ctrl_info
, u8 cmd
,
607 void *buffer
, size_t buffer_length
)
611 struct pqi_raid_path_request request
;
613 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
614 cmd
, RAID_CTLR_LUNID
, buffer
, buffer_length
, 0, &pci_direction
);
618 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
621 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
627 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info
*ctrl_info
, u8 cmd
,
631 size_t lun_list_length
;
632 size_t lun_data_length
;
633 size_t new_lun_list_length
;
634 void *lun_data
= NULL
;
635 struct report_lun_header
*report_lun_header
;
637 report_lun_header
= kmalloc(sizeof(*report_lun_header
), GFP_KERNEL
);
638 if (!report_lun_header
) {
643 rc
= pqi_report_luns(ctrl_info
, cmd
, report_lun_header
,
644 sizeof(*report_lun_header
));
648 lun_list_length
= get_unaligned_be32(&report_lun_header
->list_length
);
651 lun_data_length
= sizeof(struct report_lun_header
) + lun_list_length
;
653 lun_data
= kmalloc(lun_data_length
, GFP_KERNEL
);
659 if (lun_list_length
== 0) {
660 memcpy(lun_data
, report_lun_header
, sizeof(*report_lun_header
));
664 rc
= pqi_report_luns(ctrl_info
, cmd
, lun_data
, lun_data_length
);
668 new_lun_list_length
= get_unaligned_be32(
669 &((struct report_lun_header
*)lun_data
)->list_length
);
671 if (new_lun_list_length
> lun_list_length
) {
672 lun_list_length
= new_lun_list_length
;
678 kfree(report_lun_header
);
690 static inline int pqi_report_phys_luns(struct pqi_ctrl_info
*ctrl_info
,
693 return pqi_report_phys_logical_luns(ctrl_info
, CISS_REPORT_PHYS
,
697 static inline int pqi_report_logical_luns(struct pqi_ctrl_info
*ctrl_info
,
700 return pqi_report_phys_logical_luns(ctrl_info
, CISS_REPORT_LOG
, buffer
);
703 static int pqi_get_device_lists(struct pqi_ctrl_info
*ctrl_info
,
704 struct report_phys_lun_extended
**physdev_list
,
705 struct report_log_lun_extended
**logdev_list
)
708 size_t logdev_list_length
;
709 size_t logdev_data_length
;
710 struct report_log_lun_extended
*internal_logdev_list
;
711 struct report_log_lun_extended
*logdev_data
;
712 struct report_lun_header report_lun_header
;
714 rc
= pqi_report_phys_luns(ctrl_info
, (void **)physdev_list
);
716 dev_err(&ctrl_info
->pci_dev
->dev
,
717 "report physical LUNs failed\n");
719 rc
= pqi_report_logical_luns(ctrl_info
, (void **)logdev_list
);
721 dev_err(&ctrl_info
->pci_dev
->dev
,
722 "report logical LUNs failed\n");
725 * Tack the controller itself onto the end of the logical device list.
728 logdev_data
= *logdev_list
;
732 get_unaligned_be32(&logdev_data
->header
.list_length
);
734 memset(&report_lun_header
, 0, sizeof(report_lun_header
));
736 (struct report_log_lun_extended
*)&report_lun_header
;
737 logdev_list_length
= 0;
740 logdev_data_length
= sizeof(struct report_lun_header
) +
743 internal_logdev_list
= kmalloc(logdev_data_length
+
744 sizeof(struct report_log_lun_extended
), GFP_KERNEL
);
745 if (!internal_logdev_list
) {
751 memcpy(internal_logdev_list
, logdev_data
, logdev_data_length
);
752 memset((u8
*)internal_logdev_list
+ logdev_data_length
, 0,
753 sizeof(struct report_log_lun_extended_entry
));
754 put_unaligned_be32(logdev_list_length
+
755 sizeof(struct report_log_lun_extended_entry
),
756 &internal_logdev_list
->header
.list_length
);
759 *logdev_list
= internal_logdev_list
;
764 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev
*device
,
765 int bus
, int target
, int lun
)
768 device
->target
= target
;
772 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev
*device
)
777 scsi3addr
= device
->scsi3addr
;
778 lunid
= get_unaligned_le32(scsi3addr
);
780 if (pqi_is_hba_lunid(scsi3addr
)) {
781 /* The specified device is the controller. */
782 pqi_set_bus_target_lun(device
, PQI_HBA_BUS
, 0, lunid
& 0x3fff);
783 device
->target_lun_valid
= true;
787 if (pqi_is_logical_device(device
)) {
788 pqi_set_bus_target_lun(device
, PQI_RAID_VOLUME_BUS
, 0,
790 device
->target_lun_valid
= true;
795 * Defer target and LUN assignment for non-controller physical devices
796 * because the SAS transport layer will make these assignments later.
798 pqi_set_bus_target_lun(device
, PQI_PHYSICAL_DEVICE_BUS
, 0, 0);
801 static void pqi_get_raid_level(struct pqi_ctrl_info
*ctrl_info
,
802 struct pqi_scsi_dev
*device
)
808 raid_level
= SA_RAID_UNKNOWN
;
810 buffer
= kmalloc(64, GFP_KERNEL
);
812 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
813 VPD_PAGE
| CISS_VPD_LV_DEVICE_GEOMETRY
, buffer
, 64);
815 raid_level
= buffer
[8];
816 if (raid_level
> SA_RAID_MAX
)
817 raid_level
= SA_RAID_UNKNOWN
;
822 device
->raid_level
= raid_level
;
825 static int pqi_validate_raid_map(struct pqi_ctrl_info
*ctrl_info
,
826 struct pqi_scsi_dev
*device
, struct raid_map
*raid_map
)
830 u32 r5or6_blocks_per_row
;
831 unsigned int num_phys_disks
;
832 unsigned int num_raid_map_entries
;
834 raid_map_size
= get_unaligned_le32(&raid_map
->structure_size
);
836 if (raid_map_size
< offsetof(struct raid_map
, disk_data
)) {
837 err_msg
= "RAID map too small";
841 if (raid_map_size
> sizeof(*raid_map
)) {
842 err_msg
= "RAID map too large";
846 num_phys_disks
= get_unaligned_le16(&raid_map
->layout_map_count
) *
847 (get_unaligned_le16(&raid_map
->data_disks_per_row
) +
848 get_unaligned_le16(&raid_map
->metadata_disks_per_row
));
849 num_raid_map_entries
= num_phys_disks
*
850 get_unaligned_le16(&raid_map
->row_cnt
);
852 if (num_raid_map_entries
> RAID_MAP_MAX_ENTRIES
) {
853 err_msg
= "invalid number of map entries in RAID map";
857 if (device
->raid_level
== SA_RAID_1
) {
858 if (get_unaligned_le16(&raid_map
->layout_map_count
) != 2) {
859 err_msg
= "invalid RAID-1 map";
862 } else if (device
->raid_level
== SA_RAID_ADM
) {
863 if (get_unaligned_le16(&raid_map
->layout_map_count
) != 3) {
864 err_msg
= "invalid RAID-1(ADM) map";
867 } else if ((device
->raid_level
== SA_RAID_5
||
868 device
->raid_level
== SA_RAID_6
) &&
869 get_unaligned_le16(&raid_map
->layout_map_count
) > 1) {
871 r5or6_blocks_per_row
=
872 get_unaligned_le16(&raid_map
->strip_size
) *
873 get_unaligned_le16(&raid_map
->data_disks_per_row
);
874 if (r5or6_blocks_per_row
== 0) {
875 err_msg
= "invalid RAID-5 or RAID-6 map";
883 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n", err_msg
);
888 static int pqi_get_raid_map(struct pqi_ctrl_info
*ctrl_info
,
889 struct pqi_scsi_dev
*device
)
893 struct pqi_raid_path_request request
;
894 struct raid_map
*raid_map
;
896 raid_map
= kmalloc(sizeof(*raid_map
), GFP_KERNEL
);
900 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
901 CISS_GET_RAID_MAP
, device
->scsi3addr
, raid_map
,
902 sizeof(*raid_map
), 0, &pci_direction
);
906 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
909 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
915 rc
= pqi_validate_raid_map(ctrl_info
, device
, raid_map
);
919 device
->raid_map
= raid_map
;
929 static void pqi_get_offload_status(struct pqi_ctrl_info
*ctrl_info
,
930 struct pqi_scsi_dev
*device
)
936 buffer
= kmalloc(64, GFP_KERNEL
);
940 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
941 VPD_PAGE
| CISS_VPD_LV_OFFLOAD_STATUS
, buffer
, 64);
945 #define OFFLOAD_STATUS_BYTE 4
946 #define OFFLOAD_CONFIGURED_BIT 0x1
947 #define OFFLOAD_ENABLED_BIT 0x2
949 offload_status
= buffer
[OFFLOAD_STATUS_BYTE
];
950 device
->offload_configured
=
951 !!(offload_status
& OFFLOAD_CONFIGURED_BIT
);
952 if (device
->offload_configured
) {
953 device
->offload_enabled_pending
=
954 !!(offload_status
& OFFLOAD_ENABLED_BIT
);
955 if (pqi_get_raid_map(ctrl_info
, device
))
956 device
->offload_enabled_pending
= false;
964 * Use vendor-specific VPD to determine online/offline status of a volume.
967 static void pqi_get_volume_status(struct pqi_ctrl_info
*ctrl_info
,
968 struct pqi_scsi_dev
*device
)
972 u8 volume_status
= CISS_LV_STATUS_UNAVAILABLE
;
973 bool volume_offline
= true;
975 struct ciss_vpd_logical_volume_status
*vpd
;
977 vpd
= kmalloc(sizeof(*vpd
), GFP_KERNEL
);
981 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
982 VPD_PAGE
| CISS_VPD_LV_STATUS
, vpd
, sizeof(*vpd
));
986 page_length
= offsetof(struct ciss_vpd_logical_volume_status
,
987 volume_status
) + vpd
->page_length
;
988 if (page_length
< sizeof(*vpd
))
991 volume_status
= vpd
->volume_status
;
992 volume_flags
= get_unaligned_be32(&vpd
->flags
);
993 volume_offline
= (volume_flags
& CISS_LV_FLAGS_NO_HOST_IO
) != 0;
998 device
->volume_status
= volume_status
;
999 device
->volume_offline
= volume_offline
;
1002 static int pqi_get_device_info(struct pqi_ctrl_info
*ctrl_info
,
1003 struct pqi_scsi_dev
*device
)
1008 buffer
= kmalloc(64, GFP_KERNEL
);
1012 /* Send an inquiry to the device to see what it is. */
1013 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
, 0, buffer
, 64);
1017 scsi_sanitize_inquiry_string(&buffer
[8], 8);
1018 scsi_sanitize_inquiry_string(&buffer
[16], 16);
1020 device
->devtype
= buffer
[0] & 0x1f;
1021 memcpy(device
->vendor
, &buffer
[8],
1022 sizeof(device
->vendor
));
1023 memcpy(device
->model
, &buffer
[16],
1024 sizeof(device
->model
));
1026 if (pqi_is_logical_device(device
) && device
->devtype
== TYPE_DISK
) {
1027 pqi_get_raid_level(ctrl_info
, device
);
1028 pqi_get_offload_status(ctrl_info
, device
);
1029 pqi_get_volume_status(ctrl_info
, device
);
1038 static void pqi_get_physical_disk_info(struct pqi_ctrl_info
*ctrl_info
,
1039 struct pqi_scsi_dev
*device
,
1040 struct bmic_identify_physical_device
*id_phys
)
1044 memset(id_phys
, 0, sizeof(*id_phys
));
1046 rc
= pqi_identify_physical_device(ctrl_info
, device
,
1047 id_phys
, sizeof(*id_phys
));
1049 device
->queue_depth
= PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH
;
1053 device
->queue_depth
=
1054 get_unaligned_le16(&id_phys
->current_queue_depth_limit
);
1055 device
->device_type
= id_phys
->device_type
;
1056 device
->active_path_index
= id_phys
->active_path_number
;
1057 device
->path_map
= id_phys
->redundant_path_present_map
;
1058 memcpy(&device
->box
,
1059 &id_phys
->alternate_paths_phys_box_on_port
,
1060 sizeof(device
->box
));
1061 memcpy(&device
->phys_connector
,
1062 &id_phys
->alternate_paths_phys_connector
,
1063 sizeof(device
->phys_connector
));
1064 device
->bay
= id_phys
->phys_bay_in_box
;
1067 static void pqi_show_volume_status(struct pqi_ctrl_info
*ctrl_info
,
1068 struct pqi_scsi_dev
*device
)
1071 static const char unknown_state_str
[] =
1072 "Volume is in an unknown state (%u)";
1073 char unknown_state_buffer
[sizeof(unknown_state_str
) + 10];
1075 switch (device
->volume_status
) {
1077 status
= "Volume online";
1079 case CISS_LV_FAILED
:
1080 status
= "Volume failed";
1082 case CISS_LV_NOT_CONFIGURED
:
1083 status
= "Volume not configured";
1085 case CISS_LV_DEGRADED
:
1086 status
= "Volume degraded";
1088 case CISS_LV_READY_FOR_RECOVERY
:
1089 status
= "Volume ready for recovery operation";
1091 case CISS_LV_UNDERGOING_RECOVERY
:
1092 status
= "Volume undergoing recovery";
1094 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED
:
1095 status
= "Wrong physical drive was replaced";
1097 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM
:
1098 status
= "A physical drive not properly connected";
1100 case CISS_LV_HARDWARE_OVERHEATING
:
1101 status
= "Hardware is overheating";
1103 case CISS_LV_HARDWARE_HAS_OVERHEATED
:
1104 status
= "Hardware has overheated";
1106 case CISS_LV_UNDERGOING_EXPANSION
:
1107 status
= "Volume undergoing expansion";
1109 case CISS_LV_NOT_AVAILABLE
:
1110 status
= "Volume waiting for transforming volume";
1112 case CISS_LV_QUEUED_FOR_EXPANSION
:
1113 status
= "Volume queued for expansion";
1115 case CISS_LV_DISABLED_SCSI_ID_CONFLICT
:
1116 status
= "Volume disabled due to SCSI ID conflict";
1118 case CISS_LV_EJECTED
:
1119 status
= "Volume has been ejected";
1121 case CISS_LV_UNDERGOING_ERASE
:
1122 status
= "Volume undergoing background erase";
1124 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD
:
1125 status
= "Volume ready for predictive spare rebuild";
1127 case CISS_LV_UNDERGOING_RPI
:
1128 status
= "Volume undergoing rapid parity initialization";
1130 case CISS_LV_PENDING_RPI
:
1131 status
= "Volume queued for rapid parity initialization";
1133 case CISS_LV_ENCRYPTED_NO_KEY
:
1134 status
= "Encrypted volume inaccessible - key not present";
1136 case CISS_LV_UNDERGOING_ENCRYPTION
:
1137 status
= "Volume undergoing encryption process";
1139 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1140 status
= "Volume undergoing encryption re-keying process";
1142 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1144 "Encrypted volume inaccessible - disabled on ctrl";
1146 case CISS_LV_PENDING_ENCRYPTION
:
1147 status
= "Volume pending migration to encrypted state";
1149 case CISS_LV_PENDING_ENCRYPTION_REKEYING
:
1150 status
= "Volume pending encryption rekeying";
1152 case CISS_LV_NOT_SUPPORTED
:
1153 status
= "Volume not supported on this controller";
1155 case CISS_LV_STATUS_UNAVAILABLE
:
1156 status
= "Volume status not available";
1159 snprintf(unknown_state_buffer
, sizeof(unknown_state_buffer
),
1160 unknown_state_str
, device
->volume_status
);
1161 status
= unknown_state_buffer
;
1165 dev_info(&ctrl_info
->pci_dev
->dev
,
1166 "scsi %d:%d:%d:%d %s\n",
1167 ctrl_info
->scsi_host
->host_no
,
1168 device
->bus
, device
->target
, device
->lun
, status
);
1171 static struct pqi_scsi_dev
*pqi_find_disk_by_aio_handle(
1172 struct pqi_ctrl_info
*ctrl_info
, u32 aio_handle
)
1174 struct pqi_scsi_dev
*device
;
1176 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1177 scsi_device_list_entry
) {
1178 if (device
->devtype
!= TYPE_DISK
&& device
->devtype
!= TYPE_ZBC
)
1180 if (pqi_is_logical_device(device
))
1182 if (device
->aio_handle
== aio_handle
)
1189 static void pqi_update_logical_drive_queue_depth(
1190 struct pqi_ctrl_info
*ctrl_info
, struct pqi_scsi_dev
*logical_drive
)
1193 struct raid_map
*raid_map
;
1194 struct raid_map_disk_data
*disk_data
;
1195 struct pqi_scsi_dev
*phys_disk
;
1196 unsigned int num_phys_disks
;
1197 unsigned int num_raid_map_entries
;
1198 unsigned int queue_depth
;
1200 logical_drive
->queue_depth
= PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH
;
1202 raid_map
= logical_drive
->raid_map
;
1206 disk_data
= raid_map
->disk_data
;
1207 num_phys_disks
= get_unaligned_le16(&raid_map
->layout_map_count
) *
1208 (get_unaligned_le16(&raid_map
->data_disks_per_row
) +
1209 get_unaligned_le16(&raid_map
->metadata_disks_per_row
));
1210 num_raid_map_entries
= num_phys_disks
*
1211 get_unaligned_le16(&raid_map
->row_cnt
);
1214 for (i
= 0; i
< num_raid_map_entries
; i
++) {
1215 phys_disk
= pqi_find_disk_by_aio_handle(ctrl_info
,
1216 disk_data
[i
].aio_handle
);
1219 dev_warn(&ctrl_info
->pci_dev
->dev
,
1220 "failed to find physical disk for logical drive %016llx\n",
1221 get_unaligned_be64(logical_drive
->scsi3addr
));
1222 logical_drive
->offload_enabled
= false;
1223 logical_drive
->offload_enabled_pending
= false;
1225 logical_drive
->raid_map
= NULL
;
1229 queue_depth
+= phys_disk
->queue_depth
;
1232 logical_drive
->queue_depth
= queue_depth
;
1235 static void pqi_update_all_logical_drive_queue_depths(
1236 struct pqi_ctrl_info
*ctrl_info
)
1238 struct pqi_scsi_dev
*device
;
1240 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1241 scsi_device_list_entry
) {
1242 if (device
->devtype
!= TYPE_DISK
&& device
->devtype
!= TYPE_ZBC
)
1244 if (!pqi_is_logical_device(device
))
1246 pqi_update_logical_drive_queue_depth(ctrl_info
, device
);
1250 static void pqi_rescan_worker(struct work_struct
*work
)
1252 struct pqi_ctrl_info
*ctrl_info
;
1254 ctrl_info
= container_of(to_delayed_work(work
), struct pqi_ctrl_info
,
1257 pqi_scan_scsi_devices(ctrl_info
);
1260 static int pqi_add_device(struct pqi_ctrl_info
*ctrl_info
,
1261 struct pqi_scsi_dev
*device
)
1265 if (pqi_is_logical_device(device
))
1266 rc
= scsi_add_device(ctrl_info
->scsi_host
, device
->bus
,
1267 device
->target
, device
->lun
);
1269 rc
= pqi_add_sas_device(ctrl_info
->sas_host
, device
);
1274 static inline void pqi_remove_device(struct pqi_ctrl_info
*ctrl_info
,
1275 struct pqi_scsi_dev
*device
)
1277 if (pqi_is_logical_device(device
))
1278 scsi_remove_device(device
->sdev
);
1280 pqi_remove_sas_device(device
);
1283 /* Assumes the SCSI device list lock is held. */
1285 static struct pqi_scsi_dev
*pqi_find_scsi_dev(struct pqi_ctrl_info
*ctrl_info
,
1286 int bus
, int target
, int lun
)
1288 struct pqi_scsi_dev
*device
;
1290 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1291 scsi_device_list_entry
)
1292 if (device
->bus
== bus
&& device
->target
== target
&&
1299 static inline bool pqi_device_equal(struct pqi_scsi_dev
*dev1
,
1300 struct pqi_scsi_dev
*dev2
)
1302 if (dev1
->is_physical_device
!= dev2
->is_physical_device
)
1305 if (dev1
->is_physical_device
)
1306 return dev1
->wwid
== dev2
->wwid
;
1308 return memcmp(dev1
->volume_id
, dev2
->volume_id
,
1309 sizeof(dev1
->volume_id
)) == 0;
1312 enum pqi_find_result
{
1318 static enum pqi_find_result
pqi_scsi_find_entry(struct pqi_ctrl_info
*ctrl_info
,
1319 struct pqi_scsi_dev
*device_to_find
,
1320 struct pqi_scsi_dev
**matching_device
)
1322 struct pqi_scsi_dev
*device
;
1324 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1325 scsi_device_list_entry
) {
1326 if (pqi_scsi3addr_equal(device_to_find
->scsi3addr
,
1327 device
->scsi3addr
)) {
1328 *matching_device
= device
;
1329 if (pqi_device_equal(device_to_find
, device
)) {
1330 if (device_to_find
->volume_offline
)
1331 return DEVICE_CHANGED
;
1334 return DEVICE_CHANGED
;
1338 return DEVICE_NOT_FOUND
;
1341 static void pqi_dev_info(struct pqi_ctrl_info
*ctrl_info
,
1342 char *action
, struct pqi_scsi_dev
*device
)
1344 dev_info(&ctrl_info
->pci_dev
->dev
,
1345 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1347 ctrl_info
->scsi_host
->host_no
,
1351 scsi_device_type(device
->devtype
),
1354 pqi_raid_level_to_string(device
->raid_level
),
1355 device
->offload_configured
? '+' : '-',
1356 device
->offload_enabled_pending
? '+' : '-',
1357 device
->expose_device
? '+' : '-',
1358 device
->queue_depth
);
1361 /* Assumes the SCSI device list lock is held. */
1363 static void pqi_scsi_update_device(struct pqi_scsi_dev
*existing_device
,
1364 struct pqi_scsi_dev
*new_device
)
1366 existing_device
->devtype
= new_device
->devtype
;
1367 existing_device
->device_type
= new_device
->device_type
;
1368 existing_device
->bus
= new_device
->bus
;
1369 if (new_device
->target_lun_valid
) {
1370 existing_device
->target
= new_device
->target
;
1371 existing_device
->lun
= new_device
->lun
;
1372 existing_device
->target_lun_valid
= true;
1375 /* By definition, the scsi3addr and wwid fields are already the same. */
1377 existing_device
->is_physical_device
= new_device
->is_physical_device
;
1378 existing_device
->expose_device
= new_device
->expose_device
;
1379 existing_device
->no_uld_attach
= new_device
->no_uld_attach
;
1380 existing_device
->aio_enabled
= new_device
->aio_enabled
;
1381 memcpy(existing_device
->vendor
, new_device
->vendor
,
1382 sizeof(existing_device
->vendor
));
1383 memcpy(existing_device
->model
, new_device
->model
,
1384 sizeof(existing_device
->model
));
1385 existing_device
->sas_address
= new_device
->sas_address
;
1386 existing_device
->raid_level
= new_device
->raid_level
;
1387 existing_device
->queue_depth
= new_device
->queue_depth
;
1388 existing_device
->aio_handle
= new_device
->aio_handle
;
1389 existing_device
->volume_status
= new_device
->volume_status
;
1390 existing_device
->active_path_index
= new_device
->active_path_index
;
1391 existing_device
->path_map
= new_device
->path_map
;
1392 existing_device
->bay
= new_device
->bay
;
1393 memcpy(existing_device
->box
, new_device
->box
,
1394 sizeof(existing_device
->box
));
1395 memcpy(existing_device
->phys_connector
, new_device
->phys_connector
,
1396 sizeof(existing_device
->phys_connector
));
1397 existing_device
->offload_configured
= new_device
->offload_configured
;
1398 existing_device
->offload_enabled
= false;
1399 existing_device
->offload_enabled_pending
=
1400 new_device
->offload_enabled_pending
;
1401 existing_device
->offload_to_mirror
= 0;
1402 kfree(existing_device
->raid_map
);
1403 existing_device
->raid_map
= new_device
->raid_map
;
1405 /* To prevent this from being freed later. */
1406 new_device
->raid_map
= NULL
;
1409 static inline void pqi_free_device(struct pqi_scsi_dev
*device
)
1412 kfree(device
->raid_map
);
1418 * Called when exposing a new device to the OS fails in order to re-adjust
1419 * our internal SCSI device list to match the SCSI ML's view.
1422 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info
*ctrl_info
,
1423 struct pqi_scsi_dev
*device
)
1425 unsigned long flags
;
1427 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1428 list_del(&device
->scsi_device_list_entry
);
1429 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1431 /* Allow the device structure to be freed later. */
1432 device
->keep_device
= false;
1435 static void pqi_update_device_list(struct pqi_ctrl_info
*ctrl_info
,
1436 struct pqi_scsi_dev
*new_device_list
[], unsigned int num_new_devices
)
1440 unsigned long flags
;
1441 enum pqi_find_result find_result
;
1442 struct pqi_scsi_dev
*device
;
1443 struct pqi_scsi_dev
*next
;
1444 struct pqi_scsi_dev
*matching_device
;
1445 struct list_head add_list
;
1446 struct list_head delete_list
;
1448 INIT_LIST_HEAD(&add_list
);
1449 INIT_LIST_HEAD(&delete_list
);
1452 * The idea here is to do as little work as possible while holding the
1453 * spinlock. That's why we go to great pains to defer anything other
1454 * than updating the internal device list until after we release the
1458 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1460 /* Assume that all devices in the existing list have gone away. */
1461 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1462 scsi_device_list_entry
)
1463 device
->device_gone
= true;
1465 for (i
= 0; i
< num_new_devices
; i
++) {
1466 device
= new_device_list
[i
];
1468 find_result
= pqi_scsi_find_entry(ctrl_info
, device
,
1471 switch (find_result
) {
1474 * The newly found device is already in the existing
1477 device
->new_device
= false;
1478 matching_device
->device_gone
= false;
1479 pqi_scsi_update_device(matching_device
, device
);
1481 case DEVICE_NOT_FOUND
:
1483 * The newly found device is NOT in the existing device
1486 device
->new_device
= true;
1488 case DEVICE_CHANGED
:
1490 * The original device has gone away and we need to add
1493 device
->new_device
= true;
1496 WARN_ON(find_result
);
1501 /* Process all devices that have gone away. */
1502 list_for_each_entry_safe(device
, next
, &ctrl_info
->scsi_device_list
,
1503 scsi_device_list_entry
) {
1504 if (device
->device_gone
) {
1505 list_del(&device
->scsi_device_list_entry
);
1506 list_add_tail(&device
->delete_list_entry
, &delete_list
);
1510 /* Process all new devices. */
1511 for (i
= 0; i
< num_new_devices
; i
++) {
1512 device
= new_device_list
[i
];
1513 if (!device
->new_device
)
1515 if (device
->volume_offline
)
1517 list_add_tail(&device
->scsi_device_list_entry
,
1518 &ctrl_info
->scsi_device_list
);
1519 list_add_tail(&device
->add_list_entry
, &add_list
);
1520 /* To prevent this device structure from being freed later. */
1521 device
->keep_device
= true;
1524 pqi_update_all_logical_drive_queue_depths(ctrl_info
);
1526 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1527 scsi_device_list_entry
)
1528 device
->offload_enabled
=
1529 device
->offload_enabled_pending
;
1531 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1533 /* Remove all devices that have gone away. */
1534 list_for_each_entry_safe(device
, next
, &delete_list
,
1535 delete_list_entry
) {
1537 pqi_remove_device(ctrl_info
, device
);
1538 if (device
->volume_offline
) {
1539 pqi_dev_info(ctrl_info
, "offline", device
);
1540 pqi_show_volume_status(ctrl_info
, device
);
1542 pqi_dev_info(ctrl_info
, "removed", device
);
1544 list_del(&device
->delete_list_entry
);
1545 pqi_free_device(device
);
1549 * Notify the SCSI ML if the queue depth of any existing device has
1552 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1553 scsi_device_list_entry
) {
1554 if (device
->sdev
&& device
->queue_depth
!=
1555 device
->advertised_queue_depth
) {
1556 device
->advertised_queue_depth
= device
->queue_depth
;
1557 scsi_change_queue_depth(device
->sdev
,
1558 device
->advertised_queue_depth
);
1562 /* Expose any new devices. */
1563 list_for_each_entry_safe(device
, next
, &add_list
, add_list_entry
) {
1564 if (device
->expose_device
&& !device
->sdev
) {
1565 rc
= pqi_add_device(ctrl_info
, device
);
1567 dev_warn(&ctrl_info
->pci_dev
->dev
,
1568 "scsi %d:%d:%d:%d addition failed, device not added\n",
1569 ctrl_info
->scsi_host
->host_no
,
1570 device
->bus
, device
->target
,
1572 pqi_fixup_botched_add(ctrl_info
, device
);
1576 pqi_dev_info(ctrl_info
, "added", device
);
1580 static bool pqi_is_supported_device(struct pqi_scsi_dev
*device
)
1582 bool is_supported
= false;
1584 switch (device
->devtype
) {
1588 case TYPE_MEDIUM_CHANGER
:
1589 case TYPE_ENCLOSURE
:
1590 is_supported
= true;
1594 * Only support the HBA controller itself as a RAID
1595 * controller. If it's a RAID controller other than
1596 * the HBA itself (an external RAID controller, MSA500
1597 * or similar), we don't support it.
1599 if (pqi_is_hba_lunid(device
->scsi3addr
))
1600 is_supported
= true;
1604 return is_supported
;
1607 static inline bool pqi_skip_device(u8
*scsi3addr
,
1608 struct report_phys_lun_extended_entry
*phys_lun_ext_entry
)
1612 if (!MASKED_DEVICE(scsi3addr
))
1615 /* The device is masked. */
1617 device_flags
= phys_lun_ext_entry
->device_flags
;
1619 if (device_flags
& REPORT_PHYS_LUN_DEV_FLAG_NON_DISK
) {
1621 * It's a non-disk device. We ignore all devices of this type
1622 * when they're masked.
1630 static inline bool pqi_expose_device(struct pqi_scsi_dev
*device
)
1632 /* Expose all devices except for physical devices that are masked. */
1633 if (device
->is_physical_device
&& MASKED_DEVICE(device
->scsi3addr
))
1639 static int pqi_update_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1643 struct list_head new_device_list_head
;
1644 struct report_phys_lun_extended
*physdev_list
= NULL
;
1645 struct report_log_lun_extended
*logdev_list
= NULL
;
1646 struct report_phys_lun_extended_entry
*phys_lun_ext_entry
;
1647 struct report_log_lun_extended_entry
*log_lun_ext_entry
;
1648 struct bmic_identify_physical_device
*id_phys
= NULL
;
1651 struct pqi_scsi_dev
**new_device_list
= NULL
;
1652 struct pqi_scsi_dev
*device
;
1653 struct pqi_scsi_dev
*next
;
1654 unsigned int num_new_devices
;
1655 unsigned int num_valid_devices
;
1656 bool is_physical_device
;
1658 static char *out_of_memory_msg
=
1659 "out of memory, device discovery stopped";
1661 INIT_LIST_HEAD(&new_device_list_head
);
1663 rc
= pqi_get_device_lists(ctrl_info
, &physdev_list
, &logdev_list
);
1669 get_unaligned_be32(&physdev_list
->header
.list_length
)
1670 / sizeof(physdev_list
->lun_entries
[0]);
1676 get_unaligned_be32(&logdev_list
->header
.list_length
)
1677 / sizeof(logdev_list
->lun_entries
[0]);
1681 if (num_physicals
) {
1683 * We need this buffer for calls to pqi_get_physical_disk_info()
1684 * below. We allocate it here instead of inside
1685 * pqi_get_physical_disk_info() because it's a fairly large
1688 id_phys
= kmalloc(sizeof(*id_phys
), GFP_KERNEL
);
1690 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
1697 num_new_devices
= num_physicals
+ num_logicals
;
1699 new_device_list
= kmalloc(sizeof(*new_device_list
) *
1700 num_new_devices
, GFP_KERNEL
);
1701 if (!new_device_list
) {
1702 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n", out_of_memory_msg
);
1707 for (i
= 0; i
< num_new_devices
; i
++) {
1708 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
1710 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
1715 list_add_tail(&device
->new_device_list_entry
,
1716 &new_device_list_head
);
1720 num_valid_devices
= 0;
1722 for (i
= 0; i
< num_new_devices
; i
++) {
1724 if (i
< num_physicals
) {
1725 is_physical_device
= true;
1726 phys_lun_ext_entry
= &physdev_list
->lun_entries
[i
];
1727 log_lun_ext_entry
= NULL
;
1728 scsi3addr
= phys_lun_ext_entry
->lunid
;
1730 is_physical_device
= false;
1731 phys_lun_ext_entry
= NULL
;
1733 &logdev_list
->lun_entries
[i
- num_physicals
];
1734 scsi3addr
= log_lun_ext_entry
->lunid
;
1737 if (is_physical_device
&&
1738 pqi_skip_device(scsi3addr
, phys_lun_ext_entry
))
1742 device
= list_next_entry(device
, new_device_list_entry
);
1744 device
= list_first_entry(&new_device_list_head
,
1745 struct pqi_scsi_dev
, new_device_list_entry
);
1747 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
1748 device
->is_physical_device
= is_physical_device
;
1749 device
->raid_level
= SA_RAID_UNKNOWN
;
1751 /* Gather information about the device. */
1752 rc
= pqi_get_device_info(ctrl_info
, device
);
1753 if (rc
== -ENOMEM
) {
1754 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
1759 dev_warn(&ctrl_info
->pci_dev
->dev
,
1760 "obtaining device info failed, skipping device %016llx\n",
1761 get_unaligned_be64(device
->scsi3addr
));
1766 if (!pqi_is_supported_device(device
))
1769 pqi_assign_bus_target_lun(device
);
1771 device
->expose_device
= pqi_expose_device(device
);
1773 if (device
->is_physical_device
) {
1774 device
->wwid
= phys_lun_ext_entry
->wwid
;
1775 if ((phys_lun_ext_entry
->device_flags
&
1776 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED
) &&
1777 phys_lun_ext_entry
->aio_handle
)
1778 device
->aio_enabled
= true;
1780 memcpy(device
->volume_id
, log_lun_ext_entry
->volume_id
,
1781 sizeof(device
->volume_id
));
1784 switch (device
->devtype
) {
1787 case TYPE_ENCLOSURE
:
1788 if (device
->is_physical_device
) {
1789 device
->sas_address
=
1790 get_unaligned_be64(&device
->wwid
);
1791 if (device
->devtype
== TYPE_DISK
||
1792 device
->devtype
== TYPE_ZBC
) {
1793 device
->aio_handle
=
1794 phys_lun_ext_entry
->aio_handle
;
1795 pqi_get_physical_disk_info(ctrl_info
,
1802 new_device_list
[num_valid_devices
++] = device
;
1805 pqi_update_device_list(ctrl_info
, new_device_list
, num_valid_devices
);
1808 list_for_each_entry_safe(device
, next
, &new_device_list_head
,
1809 new_device_list_entry
) {
1810 if (device
->keep_device
)
1812 list_del(&device
->new_device_list_entry
);
1813 pqi_free_device(device
);
1816 kfree(new_device_list
);
1817 kfree(physdev_list
);
1824 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1826 unsigned long flags
;
1827 struct pqi_scsi_dev
*device
;
1828 struct pqi_scsi_dev
*next
;
1830 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1832 list_for_each_entry_safe(device
, next
, &ctrl_info
->scsi_device_list
,
1833 scsi_device_list_entry
) {
1835 pqi_remove_device(ctrl_info
, device
);
1836 list_del(&device
->scsi_device_list_entry
);
1837 pqi_free_device(device
);
1840 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1843 static int pqi_scan_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1847 if (pqi_ctrl_offline(ctrl_info
))
1850 mutex_lock(&ctrl_info
->scan_mutex
);
1852 rc
= pqi_update_scsi_devices(ctrl_info
);
1854 pqi_schedule_rescan_worker(ctrl_info
);
1856 mutex_unlock(&ctrl_info
->scan_mutex
);
1861 static void pqi_scan_start(struct Scsi_Host
*shost
)
1863 pqi_scan_scsi_devices(shost_to_hba(shost
));
1866 /* Returns TRUE if scan is finished. */
1868 static int pqi_scan_finished(struct Scsi_Host
*shost
,
1869 unsigned long elapsed_time
)
1871 struct pqi_ctrl_info
*ctrl_info
;
1873 ctrl_info
= shost_priv(shost
);
1875 return !mutex_is_locked(&ctrl_info
->scan_mutex
);
1878 static inline void pqi_set_encryption_info(
1879 struct pqi_encryption_info
*encryption_info
, struct raid_map
*raid_map
,
1882 u32 volume_blk_size
;
1885 * Set the encryption tweak values based on logical block address.
1886 * If the block size is 512, the tweak value is equal to the LBA.
1887 * For other block sizes, tweak value is (LBA * block size) / 512.
1889 volume_blk_size
= get_unaligned_le32(&raid_map
->volume_blk_size
);
1890 if (volume_blk_size
!= 512)
1891 first_block
= (first_block
* volume_blk_size
) / 512;
1893 encryption_info
->data_encryption_key_index
=
1894 get_unaligned_le16(&raid_map
->data_encryption_key_index
);
1895 encryption_info
->encrypt_tweak_lower
= lower_32_bits(first_block
);
1896 encryption_info
->encrypt_tweak_upper
= upper_32_bits(first_block
);
1900 * Attempt to perform offload RAID mapping for a logical volume I/O.
1903 #define PQI_RAID_BYPASS_INELIGIBLE 1
1905 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
1906 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
1907 struct pqi_queue_group
*queue_group
)
1909 struct raid_map
*raid_map
;
1910 bool is_write
= false;
1918 u32 first_row_offset
;
1919 u32 last_row_offset
;
1924 u32 r5or6_blocks_per_row
;
1925 u64 r5or6_first_row
;
1927 u32 r5or6_first_row_offset
;
1928 u32 r5or6_last_row_offset
;
1929 u32 r5or6_first_column
;
1930 u32 r5or6_last_column
;
1931 u16 data_disks_per_row
;
1932 u32 total_disks_per_row
;
1933 u16 layout_map_count
;
1945 int offload_to_mirror
;
1946 struct pqi_encryption_info
*encryption_info_ptr
;
1947 struct pqi_encryption_info encryption_info
;
1948 #if BITS_PER_LONG == 32
1952 /* Check for valid opcode, get LBA and block count. */
1953 switch (scmd
->cmnd
[0]) {
1958 first_block
= (u64
)(((scmd
->cmnd
[1] & 0x1f) << 16) |
1959 (scmd
->cmnd
[2] << 8) | scmd
->cmnd
[3]);
1960 block_cnt
= (u32
)scmd
->cmnd
[4];
1968 first_block
= (u64
)get_unaligned_be32(&scmd
->cmnd
[2]);
1969 block_cnt
= (u32
)get_unaligned_be16(&scmd
->cmnd
[7]);
1975 first_block
= (u64
)get_unaligned_be32(&scmd
->cmnd
[2]);
1976 block_cnt
= get_unaligned_be32(&scmd
->cmnd
[6]);
1982 first_block
= get_unaligned_be64(&scmd
->cmnd
[2]);
1983 block_cnt
= get_unaligned_be32(&scmd
->cmnd
[10]);
1986 /* Process via normal I/O path. */
1987 return PQI_RAID_BYPASS_INELIGIBLE
;
1990 /* Check for write to non-RAID-0. */
1991 if (is_write
&& device
->raid_level
!= SA_RAID_0
)
1992 return PQI_RAID_BYPASS_INELIGIBLE
;
1994 if (unlikely(block_cnt
== 0))
1995 return PQI_RAID_BYPASS_INELIGIBLE
;
1997 last_block
= first_block
+ block_cnt
- 1;
1998 raid_map
= device
->raid_map
;
2000 /* Check for invalid block or wraparound. */
2001 if (last_block
>= get_unaligned_le64(&raid_map
->volume_blk_cnt
) ||
2002 last_block
< first_block
)
2003 return PQI_RAID_BYPASS_INELIGIBLE
;
2005 data_disks_per_row
= get_unaligned_le16(&raid_map
->data_disks_per_row
);
2006 strip_size
= get_unaligned_le16(&raid_map
->strip_size
);
2007 layout_map_count
= get_unaligned_le16(&raid_map
->layout_map_count
);
2009 /* Calculate stripe information for the request. */
2010 blocks_per_row
= data_disks_per_row
* strip_size
;
2011 #if BITS_PER_LONG == 32
2012 tmpdiv
= first_block
;
2013 do_div(tmpdiv
, blocks_per_row
);
2015 tmpdiv
= last_block
;
2016 do_div(tmpdiv
, blocks_per_row
);
2018 first_row_offset
= (u32
)(first_block
- (first_row
* blocks_per_row
));
2019 last_row_offset
= (u32
)(last_block
- (last_row
* blocks_per_row
));
2020 tmpdiv
= first_row_offset
;
2021 do_div(tmpdiv
, strip_size
);
2022 first_column
= tmpdiv
;
2023 tmpdiv
= last_row_offset
;
2024 do_div(tmpdiv
, strip_size
);
2025 last_column
= tmpdiv
;
2027 first_row
= first_block
/ blocks_per_row
;
2028 last_row
= last_block
/ blocks_per_row
;
2029 first_row_offset
= (u32
)(first_block
- (first_row
* blocks_per_row
));
2030 last_row_offset
= (u32
)(last_block
- (last_row
* blocks_per_row
));
2031 first_column
= first_row_offset
/ strip_size
;
2032 last_column
= last_row_offset
/ strip_size
;
2035 /* If this isn't a single row/column then give to the controller. */
2036 if (first_row
!= last_row
|| first_column
!= last_column
)
2037 return PQI_RAID_BYPASS_INELIGIBLE
;
2039 /* Proceeding with driver mapping. */
2040 total_disks_per_row
= data_disks_per_row
+
2041 get_unaligned_le16(&raid_map
->metadata_disks_per_row
);
2042 map_row
= ((u32
)(first_row
>> raid_map
->parity_rotation_shift
)) %
2043 get_unaligned_le16(&raid_map
->row_cnt
);
2044 map_index
= (map_row
* total_disks_per_row
) + first_column
;
2047 if (device
->raid_level
== SA_RAID_1
) {
2048 if (device
->offload_to_mirror
)
2049 map_index
+= data_disks_per_row
;
2050 device
->offload_to_mirror
= !device
->offload_to_mirror
;
2051 } else if (device
->raid_level
== SA_RAID_ADM
) {
2054 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2057 offload_to_mirror
= device
->offload_to_mirror
;
2058 if (offload_to_mirror
== 0) {
2059 /* use physical disk in the first mirrored group. */
2060 map_index
%= data_disks_per_row
;
2064 * Determine mirror group that map_index
2067 current_group
= map_index
/ data_disks_per_row
;
2069 if (offload_to_mirror
!= current_group
) {
2071 layout_map_count
- 1) {
2073 * Select raid index from
2076 map_index
+= data_disks_per_row
;
2080 * Select raid index from first
2083 map_index
%= data_disks_per_row
;
2087 } while (offload_to_mirror
!= current_group
);
2090 /* Set mirror group to use next time. */
2092 (offload_to_mirror
>= layout_map_count
- 1) ?
2093 0 : offload_to_mirror
+ 1;
2094 WARN_ON(offload_to_mirror
>= layout_map_count
);
2095 device
->offload_to_mirror
= offload_to_mirror
;
2097 * Avoid direct use of device->offload_to_mirror within this
2098 * function since multiple threads might simultaneously
2099 * increment it beyond the range of device->layout_map_count -1.
2101 } else if ((device
->raid_level
== SA_RAID_5
||
2102 device
->raid_level
== SA_RAID_6
) && layout_map_count
> 1) {
2104 /* Verify first and last block are in same RAID group */
2105 r5or6_blocks_per_row
= strip_size
* data_disks_per_row
;
2106 stripesize
= r5or6_blocks_per_row
* layout_map_count
;
2107 #if BITS_PER_LONG == 32
2108 tmpdiv
= first_block
;
2109 first_group
= do_div(tmpdiv
, stripesize
);
2110 tmpdiv
= first_group
;
2111 do_div(tmpdiv
, r5or6_blocks_per_row
);
2112 first_group
= tmpdiv
;
2113 tmpdiv
= last_block
;
2114 last_group
= do_div(tmpdiv
, stripesize
);
2115 tmpdiv
= last_group
;
2116 do_div(tmpdiv
, r5or6_blocks_per_row
);
2117 last_group
= tmpdiv
;
2119 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
2120 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
2122 if (first_group
!= last_group
)
2123 return PQI_RAID_BYPASS_INELIGIBLE
;
2125 /* Verify request is in a single row of RAID 5/6 */
2126 #if BITS_PER_LONG == 32
2127 tmpdiv
= first_block
;
2128 do_div(tmpdiv
, stripesize
);
2129 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
2130 tmpdiv
= last_block
;
2131 do_div(tmpdiv
, stripesize
);
2132 r5or6_last_row
= r0_last_row
= tmpdiv
;
2134 first_row
= r5or6_first_row
= r0_first_row
=
2135 first_block
/ stripesize
;
2136 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
2138 if (r5or6_first_row
!= r5or6_last_row
)
2139 return PQI_RAID_BYPASS_INELIGIBLE
;
2141 /* Verify request is in a single column */
2142 #if BITS_PER_LONG == 32
2143 tmpdiv
= first_block
;
2144 first_row_offset
= do_div(tmpdiv
, stripesize
);
2145 tmpdiv
= first_row_offset
;
2146 first_row_offset
= (u32
)do_div(tmpdiv
, r5or6_blocks_per_row
);
2147 r5or6_first_row_offset
= first_row_offset
;
2148 tmpdiv
= last_block
;
2149 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
2150 tmpdiv
= r5or6_last_row_offset
;
2151 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
2152 tmpdiv
= r5or6_first_row_offset
;
2153 do_div(tmpdiv
, strip_size
);
2154 first_column
= r5or6_first_column
= tmpdiv
;
2155 tmpdiv
= r5or6_last_row_offset
;
2156 do_div(tmpdiv
, strip_size
);
2157 r5or6_last_column
= tmpdiv
;
2159 first_row_offset
= r5or6_first_row_offset
=
2160 (u32
)((first_block
% stripesize
) %
2161 r5or6_blocks_per_row
);
2163 r5or6_last_row_offset
=
2164 (u32
)((last_block
% stripesize
) %
2165 r5or6_blocks_per_row
);
2167 first_column
= r5or6_first_row_offset
/ strip_size
;
2168 r5or6_first_column
= first_column
;
2169 r5or6_last_column
= r5or6_last_row_offset
/ strip_size
;
2171 if (r5or6_first_column
!= r5or6_last_column
)
2172 return PQI_RAID_BYPASS_INELIGIBLE
;
2174 /* Request is eligible */
2176 ((u32
)(first_row
>> raid_map
->parity_rotation_shift
)) %
2177 get_unaligned_le16(&raid_map
->row_cnt
);
2179 map_index
= (first_group
*
2180 (get_unaligned_le16(&raid_map
->row_cnt
) *
2181 total_disks_per_row
)) +
2182 (map_row
* total_disks_per_row
) + first_column
;
2185 if (unlikely(map_index
>= RAID_MAP_MAX_ENTRIES
))
2186 return PQI_RAID_BYPASS_INELIGIBLE
;
2188 aio_handle
= raid_map
->disk_data
[map_index
].aio_handle
;
2189 disk_block
= get_unaligned_le64(&raid_map
->disk_starting_blk
) +
2190 first_row
* strip_size
+
2191 (first_row_offset
- first_column
* strip_size
);
2192 disk_block_cnt
= block_cnt
;
2194 /* Handle differing logical/physical block sizes. */
2195 if (raid_map
->phys_blk_shift
) {
2196 disk_block
<<= raid_map
->phys_blk_shift
;
2197 disk_block_cnt
<<= raid_map
->phys_blk_shift
;
2200 if (unlikely(disk_block_cnt
> 0xffff))
2201 return PQI_RAID_BYPASS_INELIGIBLE
;
2203 /* Build the new CDB for the physical disk I/O. */
2204 if (disk_block
> 0xffffffff) {
2205 cdb
[0] = is_write
? WRITE_16
: READ_16
;
2207 put_unaligned_be64(disk_block
, &cdb
[2]);
2208 put_unaligned_be32(disk_block_cnt
, &cdb
[10]);
2213 cdb
[0] = is_write
? WRITE_10
: READ_10
;
2215 put_unaligned_be32((u32
)disk_block
, &cdb
[2]);
2217 put_unaligned_be16((u16
)disk_block_cnt
, &cdb
[7]);
2222 if (get_unaligned_le16(&raid_map
->flags
) &
2223 RAID_MAP_ENCRYPTION_ENABLED
) {
2224 pqi_set_encryption_info(&encryption_info
, raid_map
,
2226 encryption_info_ptr
= &encryption_info
;
2228 encryption_info_ptr
= NULL
;
2231 return pqi_aio_submit_io(ctrl_info
, scmd
, aio_handle
,
2232 cdb
, cdb_length
, queue_group
, encryption_info_ptr
);
2235 #define PQI_STATUS_IDLE 0x0
2237 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2238 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2240 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2241 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2242 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2243 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2244 #define PQI_DEVICE_STATE_ERROR 0x4
2246 #define PQI_MODE_READY_TIMEOUT_SECS 30
2247 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2249 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info
*ctrl_info
)
2251 struct pqi_device_registers __iomem
*pqi_registers
;
2252 unsigned long timeout
;
2256 pqi_registers
= ctrl_info
->pqi_registers
;
2257 timeout
= (PQI_MODE_READY_TIMEOUT_SECS
* HZ
) + jiffies
;
2260 signature
= readq(&pqi_registers
->signature
);
2261 if (memcmp(&signature
, PQI_DEVICE_SIGNATURE
,
2262 sizeof(signature
)) == 0)
2264 if (time_after(jiffies
, timeout
)) {
2265 dev_err(&ctrl_info
->pci_dev
->dev
,
2266 "timed out waiting for PQI signature\n");
2269 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2273 status
= readb(&pqi_registers
->function_and_status_code
);
2274 if (status
== PQI_STATUS_IDLE
)
2276 if (time_after(jiffies
, timeout
)) {
2277 dev_err(&ctrl_info
->pci_dev
->dev
,
2278 "timed out waiting for PQI IDLE\n");
2281 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2285 if (readl(&pqi_registers
->device_status
) ==
2286 PQI_DEVICE_STATE_ALL_REGISTERS_READY
)
2288 if (time_after(jiffies
, timeout
)) {
2289 dev_err(&ctrl_info
->pci_dev
->dev
,
2290 "timed out waiting for PQI all registers ready\n");
2293 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2299 static inline void pqi_aio_path_disabled(struct pqi_io_request
*io_request
)
2301 struct pqi_scsi_dev
*device
;
2303 device
= io_request
->scmd
->device
->hostdata
;
2304 device
->offload_enabled
= false;
2307 static inline void pqi_take_device_offline(struct scsi_device
*sdev
)
2309 struct pqi_ctrl_info
*ctrl_info
;
2310 struct pqi_scsi_dev
*device
;
2312 if (scsi_device_online(sdev
)) {
2313 scsi_device_set_state(sdev
, SDEV_OFFLINE
);
2314 ctrl_info
= shost_to_hba(sdev
->host
);
2315 schedule_delayed_work(&ctrl_info
->rescan_work
, 0);
2316 device
= sdev
->hostdata
;
2317 dev_err(&ctrl_info
->pci_dev
->dev
, "offlined scsi %d:%d:%d:%d\n",
2318 ctrl_info
->scsi_host
->host_no
, device
->bus
,
2319 device
->target
, device
->lun
);
2323 static void pqi_process_raid_io_error(struct pqi_io_request
*io_request
)
2327 struct scsi_cmnd
*scmd
;
2328 struct pqi_raid_error_info
*error_info
;
2329 size_t sense_data_length
;
2332 struct scsi_sense_hdr sshdr
;
2334 scmd
= io_request
->scmd
;
2338 error_info
= io_request
->error_info
;
2339 scsi_status
= error_info
->status
;
2342 if (error_info
->data_out_result
== PQI_DATA_IN_OUT_UNDERFLOW
) {
2344 get_unaligned_le32(&error_info
->data_out_transferred
);
2345 residual_count
= scsi_bufflen(scmd
) - xfer_count
;
2346 scsi_set_resid(scmd
, residual_count
);
2347 if (xfer_count
< scmd
->underflow
)
2348 host_byte
= DID_SOFT_ERROR
;
2351 sense_data_length
= get_unaligned_le16(&error_info
->sense_data_length
);
2352 if (sense_data_length
== 0)
2354 get_unaligned_le16(&error_info
->response_data_length
);
2355 if (sense_data_length
) {
2356 if (sense_data_length
> sizeof(error_info
->data
))
2357 sense_data_length
= sizeof(error_info
->data
);
2359 if (scsi_status
== SAM_STAT_CHECK_CONDITION
&&
2360 scsi_normalize_sense(error_info
->data
,
2361 sense_data_length
, &sshdr
) &&
2362 sshdr
.sense_key
== HARDWARE_ERROR
&&
2363 sshdr
.asc
== 0x3e &&
2364 sshdr
.ascq
== 0x1) {
2365 pqi_take_device_offline(scmd
->device
);
2366 host_byte
= DID_NO_CONNECT
;
2369 if (sense_data_length
> SCSI_SENSE_BUFFERSIZE
)
2370 sense_data_length
= SCSI_SENSE_BUFFERSIZE
;
2371 memcpy(scmd
->sense_buffer
, error_info
->data
,
2375 scmd
->result
= scsi_status
;
2376 set_host_byte(scmd
, host_byte
);
2379 static void pqi_process_aio_io_error(struct pqi_io_request
*io_request
)
2383 struct scsi_cmnd
*scmd
;
2384 struct pqi_aio_error_info
*error_info
;
2385 size_t sense_data_length
;
2388 bool device_offline
;
2390 scmd
= io_request
->scmd
;
2391 error_info
= io_request
->error_info
;
2393 sense_data_length
= 0;
2394 device_offline
= false;
2396 switch (error_info
->service_response
) {
2397 case PQI_AIO_SERV_RESPONSE_COMPLETE
:
2398 scsi_status
= error_info
->status
;
2400 case PQI_AIO_SERV_RESPONSE_FAILURE
:
2401 switch (error_info
->status
) {
2402 case PQI_AIO_STATUS_IO_ABORTED
:
2403 scsi_status
= SAM_STAT_TASK_ABORTED
;
2405 case PQI_AIO_STATUS_UNDERRUN
:
2406 scsi_status
= SAM_STAT_GOOD
;
2407 residual_count
= get_unaligned_le32(
2408 &error_info
->residual_count
);
2409 scsi_set_resid(scmd
, residual_count
);
2410 xfer_count
= scsi_bufflen(scmd
) - residual_count
;
2411 if (xfer_count
< scmd
->underflow
)
2412 host_byte
= DID_SOFT_ERROR
;
2414 case PQI_AIO_STATUS_OVERRUN
:
2415 scsi_status
= SAM_STAT_GOOD
;
2417 case PQI_AIO_STATUS_AIO_PATH_DISABLED
:
2418 pqi_aio_path_disabled(io_request
);
2419 scsi_status
= SAM_STAT_GOOD
;
2420 io_request
->status
= -EAGAIN
;
2422 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE
:
2423 case PQI_AIO_STATUS_INVALID_DEVICE
:
2424 device_offline
= true;
2425 pqi_take_device_offline(scmd
->device
);
2426 host_byte
= DID_NO_CONNECT
;
2427 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2429 case PQI_AIO_STATUS_IO_ERROR
:
2431 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2435 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE
:
2436 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED
:
2437 scsi_status
= SAM_STAT_GOOD
;
2439 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED
:
2440 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN
:
2442 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2446 if (error_info
->data_present
) {
2448 get_unaligned_le16(&error_info
->data_length
);
2449 if (sense_data_length
) {
2450 if (sense_data_length
> sizeof(error_info
->data
))
2451 sense_data_length
= sizeof(error_info
->data
);
2452 if (sense_data_length
> SCSI_SENSE_BUFFERSIZE
)
2453 sense_data_length
= SCSI_SENSE_BUFFERSIZE
;
2454 memcpy(scmd
->sense_buffer
, error_info
->data
,
2459 if (device_offline
&& sense_data_length
== 0)
2460 scsi_build_sense_buffer(0, scmd
->sense_buffer
, HARDWARE_ERROR
,
2463 scmd
->result
= scsi_status
;
2464 set_host_byte(scmd
, host_byte
);
2467 static void pqi_process_io_error(unsigned int iu_type
,
2468 struct pqi_io_request
*io_request
)
2471 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR
:
2472 pqi_process_raid_io_error(io_request
);
2474 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR
:
2475 pqi_process_aio_io_error(io_request
);
2480 static int pqi_interpret_task_management_response(
2481 struct pqi_task_management_response
*response
)
2485 switch (response
->response_code
) {
2486 case SOP_TMF_COMPLETE
:
2487 case SOP_TMF_FUNCTION_SUCCEEDED
:
2498 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info
*ctrl_info
,
2499 struct pqi_queue_group
*queue_group
)
2501 unsigned int num_responses
;
2504 struct pqi_io_request
*io_request
;
2505 struct pqi_io_response
*response
;
2509 oq_ci
= queue_group
->oq_ci_copy
;
2512 oq_pi
= *queue_group
->oq_pi
;
2517 response
= queue_group
->oq_element_array
+
2518 (oq_ci
* PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
);
2520 request_id
= get_unaligned_le16(&response
->request_id
);
2521 WARN_ON(request_id
>= ctrl_info
->max_io_slots
);
2523 io_request
= &ctrl_info
->io_request_pool
[request_id
];
2524 WARN_ON(atomic_read(&io_request
->refcount
) == 0);
2526 switch (response
->header
.iu_type
) {
2527 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS
:
2528 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS
:
2529 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT
:
2531 case PQI_RESPONSE_IU_TASK_MANAGEMENT
:
2532 io_request
->status
=
2533 pqi_interpret_task_management_response(
2536 case PQI_RESPONSE_IU_AIO_PATH_DISABLED
:
2537 pqi_aio_path_disabled(io_request
);
2538 io_request
->status
= -EAGAIN
;
2540 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR
:
2541 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR
:
2542 io_request
->error_info
= ctrl_info
->error_buffer
+
2543 (get_unaligned_le16(&response
->error_index
) *
2544 PQI_ERROR_BUFFER_ELEMENT_LENGTH
);
2545 pqi_process_io_error(response
->header
.iu_type
,
2549 dev_err(&ctrl_info
->pci_dev
->dev
,
2550 "unexpected IU type: 0x%x\n",
2551 response
->header
.iu_type
);
2552 WARN_ON(response
->header
.iu_type
);
2556 io_request
->io_complete_callback(io_request
,
2557 io_request
->context
);
2560 * Note that the I/O request structure CANNOT BE TOUCHED after
2561 * returning from the I/O completion callback!
2564 oq_ci
= (oq_ci
+ 1) % ctrl_info
->num_elements_per_oq
;
2567 if (num_responses
) {
2568 queue_group
->oq_ci_copy
= oq_ci
;
2569 writel(oq_ci
, queue_group
->oq_ci
);
2572 return num_responses
;
2575 static inline unsigned int pqi_num_elements_free(unsigned int pi
,
2576 unsigned int ci
, unsigned int elements_in_queue
)
2578 unsigned int num_elements_used
;
2581 num_elements_used
= pi
- ci
;
2583 num_elements_used
= elements_in_queue
- ci
+ pi
;
2585 return elements_in_queue
- num_elements_used
- 1;
2588 #define PQI_EVENT_ACK_TIMEOUT 30
2590 static void pqi_start_event_ack(struct pqi_ctrl_info
*ctrl_info
,
2591 struct pqi_event_acknowledge_request
*iu
, size_t iu_length
)
2595 unsigned long flags
;
2597 unsigned long timeout
;
2598 struct pqi_queue_group
*queue_group
;
2600 queue_group
= &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
];
2601 put_unaligned_le16(queue_group
->oq_id
, &iu
->header
.response_queue_id
);
2603 timeout
= (PQI_EVENT_ACK_TIMEOUT
* HZ
) + jiffies
;
2606 spin_lock_irqsave(&queue_group
->submit_lock
[RAID_PATH
], flags
);
2608 iq_pi
= queue_group
->iq_pi_copy
[RAID_PATH
];
2609 iq_ci
= *queue_group
->iq_ci
[RAID_PATH
];
2611 if (pqi_num_elements_free(iq_pi
, iq_ci
,
2612 ctrl_info
->num_elements_per_iq
))
2615 spin_unlock_irqrestore(
2616 &queue_group
->submit_lock
[RAID_PATH
], flags
);
2618 if (time_after(jiffies
, timeout
)) {
2619 dev_err(&ctrl_info
->pci_dev
->dev
,
2620 "sending event acknowledge timed out\n");
2625 next_element
= queue_group
->iq_element_array
[RAID_PATH
] +
2626 (iq_pi
* PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
2628 memcpy(next_element
, iu
, iu_length
);
2630 iq_pi
= (iq_pi
+ 1) % ctrl_info
->num_elements_per_iq
;
2632 queue_group
->iq_pi_copy
[RAID_PATH
] = iq_pi
;
2635 * This write notifies the controller that an IU is available to be
2638 writel(iq_pi
, queue_group
->iq_pi
[RAID_PATH
]);
2640 spin_unlock_irqrestore(&queue_group
->submit_lock
[RAID_PATH
], flags
);
2643 static void pqi_acknowledge_event(struct pqi_ctrl_info
*ctrl_info
,
2644 struct pqi_event
*event
)
2646 struct pqi_event_acknowledge_request request
;
2648 memset(&request
, 0, sizeof(request
));
2650 request
.header
.iu_type
= PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT
;
2651 put_unaligned_le16(sizeof(request
) - PQI_REQUEST_HEADER_LENGTH
,
2652 &request
.header
.iu_length
);
2653 request
.event_type
= event
->event_type
;
2654 request
.event_id
= event
->event_id
;
2655 request
.additional_event_id
= event
->additional_event_id
;
2657 pqi_start_event_ack(ctrl_info
, &request
, sizeof(request
));
2660 static void pqi_event_worker(struct work_struct
*work
)
2663 struct pqi_ctrl_info
*ctrl_info
;
2664 struct pqi_event
*pending_event
;
2665 bool got_non_heartbeat_event
= false;
2667 ctrl_info
= container_of(work
, struct pqi_ctrl_info
, event_work
);
2669 pending_event
= ctrl_info
->pending_events
;
2670 for (i
= 0; i
< PQI_NUM_SUPPORTED_EVENTS
; i
++) {
2671 if (pending_event
->pending
) {
2672 pending_event
->pending
= false;
2673 pqi_acknowledge_event(ctrl_info
, pending_event
);
2674 if (i
!= PQI_EVENT_HEARTBEAT
)
2675 got_non_heartbeat_event
= true;
2680 if (got_non_heartbeat_event
)
2681 pqi_schedule_rescan_worker(ctrl_info
);
2684 static void pqi_take_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
)
2688 struct pqi_queue_group
*queue_group
;
2689 unsigned long flags
;
2690 struct pqi_io_request
*io_request
;
2691 struct pqi_io_request
*next
;
2692 struct scsi_cmnd
*scmd
;
2694 ctrl_info
->controller_online
= false;
2695 dev_err(&ctrl_info
->pci_dev
->dev
, "controller offline\n");
2697 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
2698 queue_group
= &ctrl_info
->queue_groups
[i
];
2700 for (path
= 0; path
< 2; path
++) {
2702 &queue_group
->submit_lock
[path
], flags
);
2704 list_for_each_entry_safe(io_request
, next
,
2705 &queue_group
->request_list
[path
],
2706 request_list_entry
) {
2708 scmd
= io_request
->scmd
;
2710 set_host_byte(scmd
, DID_NO_CONNECT
);
2711 pqi_scsi_done(scmd
);
2714 list_del(&io_request
->request_list_entry
);
2717 spin_unlock_irqrestore(
2718 &queue_group
->submit_lock
[path
], flags
);
2723 #define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2724 #define PQI_MAX_HEARTBEAT_REQUESTS 5
2726 static void pqi_heartbeat_timer_handler(unsigned long data
)
2729 struct pqi_ctrl_info
*ctrl_info
= (struct pqi_ctrl_info
*)data
;
2731 num_interrupts
= atomic_read(&ctrl_info
->num_interrupts
);
2733 if (num_interrupts
== ctrl_info
->previous_num_interrupts
) {
2734 ctrl_info
->num_heartbeats_requested
++;
2735 if (ctrl_info
->num_heartbeats_requested
>
2736 PQI_MAX_HEARTBEAT_REQUESTS
) {
2737 pqi_take_ctrl_offline(ctrl_info
);
2740 ctrl_info
->pending_events
[PQI_EVENT_HEARTBEAT
].pending
= true;
2741 schedule_work(&ctrl_info
->event_work
);
2743 ctrl_info
->num_heartbeats_requested
= 0;
2746 ctrl_info
->previous_num_interrupts
= num_interrupts
;
2747 mod_timer(&ctrl_info
->heartbeat_timer
,
2748 jiffies
+ PQI_HEARTBEAT_TIMER_INTERVAL
);
2751 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info
*ctrl_info
)
2753 ctrl_info
->previous_num_interrupts
=
2754 atomic_read(&ctrl_info
->num_interrupts
);
2756 init_timer(&ctrl_info
->heartbeat_timer
);
2757 ctrl_info
->heartbeat_timer
.expires
=
2758 jiffies
+ PQI_HEARTBEAT_TIMER_INTERVAL
;
2759 ctrl_info
->heartbeat_timer
.data
= (unsigned long)ctrl_info
;
2760 ctrl_info
->heartbeat_timer
.function
= pqi_heartbeat_timer_handler
;
2761 add_timer(&ctrl_info
->heartbeat_timer
);
2762 ctrl_info
->heartbeat_timer_started
= true;
2765 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info
*ctrl_info
)
2767 if (ctrl_info
->heartbeat_timer_started
)
2768 del_timer_sync(&ctrl_info
->heartbeat_timer
);
2771 static int pqi_event_type_to_event_index(unsigned int event_type
)
2775 switch (event_type
) {
2776 case PQI_EVENT_TYPE_HEARTBEAT
:
2777 index
= PQI_EVENT_HEARTBEAT
;
2779 case PQI_EVENT_TYPE_HOTPLUG
:
2780 index
= PQI_EVENT_HOTPLUG
;
2782 case PQI_EVENT_TYPE_HARDWARE
:
2783 index
= PQI_EVENT_HARDWARE
;
2785 case PQI_EVENT_TYPE_PHYSICAL_DEVICE
:
2786 index
= PQI_EVENT_PHYSICAL_DEVICE
;
2788 case PQI_EVENT_TYPE_LOGICAL_DEVICE
:
2789 index
= PQI_EVENT_LOGICAL_DEVICE
;
2791 case PQI_EVENT_TYPE_AIO_STATE_CHANGE
:
2792 index
= PQI_EVENT_AIO_STATE_CHANGE
;
2794 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE
:
2795 index
= PQI_EVENT_AIO_CONFIG_CHANGE
;
2805 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info
*ctrl_info
)
2807 unsigned int num_events
;
2810 struct pqi_event_queue
*event_queue
;
2811 struct pqi_event_response
*response
;
2812 struct pqi_event
*pending_event
;
2813 bool need_delayed_work
;
2816 event_queue
= &ctrl_info
->event_queue
;
2818 need_delayed_work
= false;
2819 oq_ci
= event_queue
->oq_ci_copy
;
2822 oq_pi
= *event_queue
->oq_pi
;
2827 response
= event_queue
->oq_element_array
+
2828 (oq_ci
* PQI_EVENT_OQ_ELEMENT_LENGTH
);
2831 pqi_event_type_to_event_index(response
->event_type
);
2833 if (event_index
>= 0) {
2834 if (response
->request_acknowlege
) {
2836 &ctrl_info
->pending_events
[event_index
];
2837 pending_event
->event_type
=
2838 response
->event_type
;
2839 pending_event
->event_id
= response
->event_id
;
2840 pending_event
->additional_event_id
=
2841 response
->additional_event_id
;
2842 if (event_index
!= PQI_EVENT_HEARTBEAT
) {
2843 pending_event
->pending
= true;
2844 need_delayed_work
= true;
2849 oq_ci
= (oq_ci
+ 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS
;
2853 event_queue
->oq_ci_copy
= oq_ci
;
2854 writel(oq_ci
, event_queue
->oq_ci
);
2856 if (need_delayed_work
)
2857 schedule_work(&ctrl_info
->event_work
);
2863 static irqreturn_t
pqi_irq_handler(int irq
, void *data
)
2865 struct pqi_ctrl_info
*ctrl_info
;
2866 struct pqi_queue_group
*queue_group
;
2867 unsigned int num_responses_handled
;
2870 ctrl_info
= queue_group
->ctrl_info
;
2872 if (!ctrl_info
|| !queue_group
->oq_ci
)
2875 num_responses_handled
= pqi_process_io_intr(ctrl_info
, queue_group
);
2877 if (irq
== ctrl_info
->event_irq
)
2878 num_responses_handled
+= pqi_process_event_intr(ctrl_info
);
2880 if (num_responses_handled
)
2881 atomic_inc(&ctrl_info
->num_interrupts
);
2883 pqi_start_io(ctrl_info
, queue_group
, RAID_PATH
, NULL
);
2884 pqi_start_io(ctrl_info
, queue_group
, AIO_PATH
, NULL
);
2889 static int pqi_request_irqs(struct pqi_ctrl_info
*ctrl_info
)
2891 struct pci_dev
*pdev
= ctrl_info
->pci_dev
;
2895 ctrl_info
->event_irq
= pci_irq_vector(pdev
, 0);
2897 for (i
= 0; i
< ctrl_info
->num_msix_vectors_enabled
; i
++) {
2898 rc
= request_irq(pci_irq_vector(pdev
, i
), pqi_irq_handler
, 0,
2899 DRIVER_NAME_SHORT
, &ctrl_info
->queue_groups
[i
]);
2902 "irq %u init failed with error %d\n",
2903 pci_irq_vector(pdev
, i
), rc
);
2906 ctrl_info
->num_msix_vectors_initialized
++;
2912 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info
*ctrl_info
)
2916 ret
= pci_alloc_irq_vectors(ctrl_info
->pci_dev
,
2917 PQI_MIN_MSIX_VECTORS
, ctrl_info
->num_queue_groups
,
2918 PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
);
2920 dev_err(&ctrl_info
->pci_dev
->dev
,
2921 "MSI-X init failed with error %d\n", ret
);
2925 ctrl_info
->num_msix_vectors_enabled
= ret
;
2929 static int pqi_alloc_operational_queues(struct pqi_ctrl_info
*ctrl_info
)
2932 size_t alloc_length
;
2933 size_t element_array_length_per_iq
;
2934 size_t element_array_length_per_oq
;
2935 void *element_array
;
2936 void *next_queue_index
;
2937 void *aligned_pointer
;
2938 unsigned int num_inbound_queues
;
2939 unsigned int num_outbound_queues
;
2940 unsigned int num_queue_indexes
;
2941 struct pqi_queue_group
*queue_group
;
2943 element_array_length_per_iq
=
2944 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
*
2945 ctrl_info
->num_elements_per_iq
;
2946 element_array_length_per_oq
=
2947 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
*
2948 ctrl_info
->num_elements_per_oq
;
2949 num_inbound_queues
= ctrl_info
->num_queue_groups
* 2;
2950 num_outbound_queues
= ctrl_info
->num_queue_groups
;
2951 num_queue_indexes
= (ctrl_info
->num_queue_groups
* 3) + 1;
2953 aligned_pointer
= NULL
;
2955 for (i
= 0; i
< num_inbound_queues
; i
++) {
2956 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
2957 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
2958 aligned_pointer
+= element_array_length_per_iq
;
2961 for (i
= 0; i
< num_outbound_queues
; i
++) {
2962 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
2963 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
2964 aligned_pointer
+= element_array_length_per_oq
;
2967 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
2968 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
2969 aligned_pointer
+= PQI_NUM_EVENT_QUEUE_ELEMENTS
*
2970 PQI_EVENT_OQ_ELEMENT_LENGTH
;
2972 for (i
= 0; i
< num_queue_indexes
; i
++) {
2973 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
2974 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
2975 aligned_pointer
+= sizeof(pqi_index_t
);
2978 alloc_length
= (size_t)aligned_pointer
+
2979 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
;
2981 ctrl_info
->queue_memory_base
=
2982 dma_zalloc_coherent(&ctrl_info
->pci_dev
->dev
,
2984 &ctrl_info
->queue_memory_base_dma_handle
, GFP_KERNEL
);
2986 if (!ctrl_info
->queue_memory_base
) {
2987 dev_err(&ctrl_info
->pci_dev
->dev
,
2988 "failed to allocate memory for PQI admin queues\n");
2992 ctrl_info
->queue_memory_length
= alloc_length
;
2994 element_array
= PTR_ALIGN(ctrl_info
->queue_memory_base
,
2995 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
2997 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
2998 queue_group
= &ctrl_info
->queue_groups
[i
];
2999 queue_group
->iq_element_array
[RAID_PATH
] = element_array
;
3000 queue_group
->iq_element_array_bus_addr
[RAID_PATH
] =
3001 ctrl_info
->queue_memory_base_dma_handle
+
3002 (element_array
- ctrl_info
->queue_memory_base
);
3003 element_array
+= element_array_length_per_iq
;
3004 element_array
= PTR_ALIGN(element_array
,
3005 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3006 queue_group
->iq_element_array
[AIO_PATH
] = element_array
;
3007 queue_group
->iq_element_array_bus_addr
[AIO_PATH
] =
3008 ctrl_info
->queue_memory_base_dma_handle
+
3009 (element_array
- ctrl_info
->queue_memory_base
);
3010 element_array
+= element_array_length_per_iq
;
3011 element_array
= PTR_ALIGN(element_array
,
3012 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3015 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3016 queue_group
= &ctrl_info
->queue_groups
[i
];
3017 queue_group
->oq_element_array
= element_array
;
3018 queue_group
->oq_element_array_bus_addr
=
3019 ctrl_info
->queue_memory_base_dma_handle
+
3020 (element_array
- ctrl_info
->queue_memory_base
);
3021 element_array
+= element_array_length_per_oq
;
3022 element_array
= PTR_ALIGN(element_array
,
3023 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3026 ctrl_info
->event_queue
.oq_element_array
= element_array
;
3027 ctrl_info
->event_queue
.oq_element_array_bus_addr
=
3028 ctrl_info
->queue_memory_base_dma_handle
+
3029 (element_array
- ctrl_info
->queue_memory_base
);
3030 element_array
+= PQI_NUM_EVENT_QUEUE_ELEMENTS
*
3031 PQI_EVENT_OQ_ELEMENT_LENGTH
;
3033 next_queue_index
= PTR_ALIGN(element_array
,
3034 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3036 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3037 queue_group
= &ctrl_info
->queue_groups
[i
];
3038 queue_group
->iq_ci
[RAID_PATH
] = next_queue_index
;
3039 queue_group
->iq_ci_bus_addr
[RAID_PATH
] =
3040 ctrl_info
->queue_memory_base_dma_handle
+
3041 (next_queue_index
- ctrl_info
->queue_memory_base
);
3042 next_queue_index
+= sizeof(pqi_index_t
);
3043 next_queue_index
= PTR_ALIGN(next_queue_index
,
3044 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3045 queue_group
->iq_ci
[AIO_PATH
] = next_queue_index
;
3046 queue_group
->iq_ci_bus_addr
[AIO_PATH
] =
3047 ctrl_info
->queue_memory_base_dma_handle
+
3048 (next_queue_index
- ctrl_info
->queue_memory_base
);
3049 next_queue_index
+= sizeof(pqi_index_t
);
3050 next_queue_index
= PTR_ALIGN(next_queue_index
,
3051 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3052 queue_group
->oq_pi
= next_queue_index
;
3053 queue_group
->oq_pi_bus_addr
=
3054 ctrl_info
->queue_memory_base_dma_handle
+
3055 (next_queue_index
- ctrl_info
->queue_memory_base
);
3056 next_queue_index
+= sizeof(pqi_index_t
);
3057 next_queue_index
= PTR_ALIGN(next_queue_index
,
3058 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3061 ctrl_info
->event_queue
.oq_pi
= next_queue_index
;
3062 ctrl_info
->event_queue
.oq_pi_bus_addr
=
3063 ctrl_info
->queue_memory_base_dma_handle
+
3064 (next_queue_index
- ctrl_info
->queue_memory_base
);
3069 static void pqi_init_operational_queues(struct pqi_ctrl_info
*ctrl_info
)
3072 u16 next_iq_id
= PQI_MIN_OPERATIONAL_QUEUE_ID
;
3073 u16 next_oq_id
= PQI_MIN_OPERATIONAL_QUEUE_ID
;
3076 * Initialize the backpointers to the controller structure in
3077 * each operational queue group structure.
3079 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++)
3080 ctrl_info
->queue_groups
[i
].ctrl_info
= ctrl_info
;
3083 * Assign IDs to all operational queues. Note that the IDs
3084 * assigned to operational IQs are independent of the IDs
3085 * assigned to operational OQs.
3087 ctrl_info
->event_queue
.oq_id
= next_oq_id
++;
3088 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3089 ctrl_info
->queue_groups
[i
].iq_id
[RAID_PATH
] = next_iq_id
++;
3090 ctrl_info
->queue_groups
[i
].iq_id
[AIO_PATH
] = next_iq_id
++;
3091 ctrl_info
->queue_groups
[i
].oq_id
= next_oq_id
++;
3095 * Assign MSI-X table entry indexes to all queues. Note that the
3096 * interrupt for the event queue is shared with the first queue group.
3098 ctrl_info
->event_queue
.int_msg_num
= 0;
3099 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++)
3100 ctrl_info
->queue_groups
[i
].int_msg_num
= i
;
3102 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3103 spin_lock_init(&ctrl_info
->queue_groups
[i
].submit_lock
[0]);
3104 spin_lock_init(&ctrl_info
->queue_groups
[i
].submit_lock
[1]);
3105 INIT_LIST_HEAD(&ctrl_info
->queue_groups
[i
].request_list
[0]);
3106 INIT_LIST_HEAD(&ctrl_info
->queue_groups
[i
].request_list
[1]);
3110 static int pqi_alloc_admin_queues(struct pqi_ctrl_info
*ctrl_info
)
3112 size_t alloc_length
;
3113 struct pqi_admin_queues_aligned
*admin_queues_aligned
;
3114 struct pqi_admin_queues
*admin_queues
;
3116 alloc_length
= sizeof(struct pqi_admin_queues_aligned
) +
3117 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
;
3119 ctrl_info
->admin_queue_memory_base
=
3120 dma_zalloc_coherent(&ctrl_info
->pci_dev
->dev
,
3122 &ctrl_info
->admin_queue_memory_base_dma_handle
,
3125 if (!ctrl_info
->admin_queue_memory_base
)
3128 ctrl_info
->admin_queue_memory_length
= alloc_length
;
3130 admin_queues
= &ctrl_info
->admin_queues
;
3131 admin_queues_aligned
= PTR_ALIGN(ctrl_info
->admin_queue_memory_base
,
3132 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3133 admin_queues
->iq_element_array
=
3134 &admin_queues_aligned
->iq_element_array
;
3135 admin_queues
->oq_element_array
=
3136 &admin_queues_aligned
->oq_element_array
;
3137 admin_queues
->iq_ci
= &admin_queues_aligned
->iq_ci
;
3138 admin_queues
->oq_pi
= &admin_queues_aligned
->oq_pi
;
3140 admin_queues
->iq_element_array_bus_addr
=
3141 ctrl_info
->admin_queue_memory_base_dma_handle
+
3142 (admin_queues
->iq_element_array
-
3143 ctrl_info
->admin_queue_memory_base
);
3144 admin_queues
->oq_element_array_bus_addr
=
3145 ctrl_info
->admin_queue_memory_base_dma_handle
+
3146 (admin_queues
->oq_element_array
-
3147 ctrl_info
->admin_queue_memory_base
);
3148 admin_queues
->iq_ci_bus_addr
=
3149 ctrl_info
->admin_queue_memory_base_dma_handle
+
3150 ((void *)admin_queues
->iq_ci
-
3151 ctrl_info
->admin_queue_memory_base
);
3152 admin_queues
->oq_pi_bus_addr
=
3153 ctrl_info
->admin_queue_memory_base_dma_handle
+
3154 ((void *)admin_queues
->oq_pi
-
3155 ctrl_info
->admin_queue_memory_base
);
3160 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3161 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3163 static int pqi_create_admin_queues(struct pqi_ctrl_info
*ctrl_info
)
3165 struct pqi_device_registers __iomem
*pqi_registers
;
3166 struct pqi_admin_queues
*admin_queues
;
3167 unsigned long timeout
;
3171 pqi_registers
= ctrl_info
->pqi_registers
;
3172 admin_queues
= &ctrl_info
->admin_queues
;
3174 writeq((u64
)admin_queues
->iq_element_array_bus_addr
,
3175 &pqi_registers
->admin_iq_element_array_addr
);
3176 writeq((u64
)admin_queues
->oq_element_array_bus_addr
,
3177 &pqi_registers
->admin_oq_element_array_addr
);
3178 writeq((u64
)admin_queues
->iq_ci_bus_addr
,
3179 &pqi_registers
->admin_iq_ci_addr
);
3180 writeq((u64
)admin_queues
->oq_pi_bus_addr
,
3181 &pqi_registers
->admin_oq_pi_addr
);
3183 reg
= PQI_ADMIN_IQ_NUM_ELEMENTS
|
3184 (PQI_ADMIN_OQ_NUM_ELEMENTS
) << 8 |
3185 (admin_queues
->int_msg_num
<< 16);
3186 writel(reg
, &pqi_registers
->admin_iq_num_elements
);
3187 writel(PQI_CREATE_ADMIN_QUEUE_PAIR
,
3188 &pqi_registers
->function_and_status_code
);
3190 timeout
= PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES
+ jiffies
;
3192 status
= readb(&pqi_registers
->function_and_status_code
);
3193 if (status
== PQI_STATUS_IDLE
)
3195 if (time_after(jiffies
, timeout
))
3197 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS
);
3201 * The offset registers are not initialized to the correct
3202 * offsets until *after* the create admin queue pair command
3203 * completes successfully.
3205 admin_queues
->iq_pi
= ctrl_info
->iomem_base
+
3206 PQI_DEVICE_REGISTERS_OFFSET
+
3207 readq(&pqi_registers
->admin_iq_pi_offset
);
3208 admin_queues
->oq_ci
= ctrl_info
->iomem_base
+
3209 PQI_DEVICE_REGISTERS_OFFSET
+
3210 readq(&pqi_registers
->admin_oq_ci_offset
);
3215 static void pqi_submit_admin_request(struct pqi_ctrl_info
*ctrl_info
,
3216 struct pqi_general_admin_request
*request
)
3218 struct pqi_admin_queues
*admin_queues
;
3222 admin_queues
= &ctrl_info
->admin_queues
;
3223 iq_pi
= admin_queues
->iq_pi_copy
;
3225 next_element
= admin_queues
->iq_element_array
+
3226 (iq_pi
* PQI_ADMIN_IQ_ELEMENT_LENGTH
);
3228 memcpy(next_element
, request
, sizeof(*request
));
3230 iq_pi
= (iq_pi
+ 1) % PQI_ADMIN_IQ_NUM_ELEMENTS
;
3231 admin_queues
->iq_pi_copy
= iq_pi
;
3234 * This write notifies the controller that an IU is available to be
3237 writel(iq_pi
, admin_queues
->iq_pi
);
3240 static int pqi_poll_for_admin_response(struct pqi_ctrl_info
*ctrl_info
,
3241 struct pqi_general_admin_response
*response
)
3243 struct pqi_admin_queues
*admin_queues
;
3246 unsigned long timeout
;
3248 admin_queues
= &ctrl_info
->admin_queues
;
3249 oq_ci
= admin_queues
->oq_ci_copy
;
3251 timeout
= (3 * HZ
) + jiffies
;
3254 oq_pi
= *admin_queues
->oq_pi
;
3257 if (time_after(jiffies
, timeout
)) {
3258 dev_err(&ctrl_info
->pci_dev
->dev
,
3259 "timed out waiting for admin response\n");
3262 usleep_range(1000, 2000);
3265 memcpy(response
, admin_queues
->oq_element_array
+
3266 (oq_ci
* PQI_ADMIN_OQ_ELEMENT_LENGTH
), sizeof(*response
));
3268 oq_ci
= (oq_ci
+ 1) % PQI_ADMIN_OQ_NUM_ELEMENTS
;
3269 admin_queues
->oq_ci_copy
= oq_ci
;
3270 writel(oq_ci
, admin_queues
->oq_ci
);
3275 static void pqi_start_io(struct pqi_ctrl_info
*ctrl_info
,
3276 struct pqi_queue_group
*queue_group
, enum pqi_io_path path
,
3277 struct pqi_io_request
*io_request
)
3279 struct pqi_io_request
*next
;
3284 unsigned long flags
;
3285 unsigned int num_elements_needed
;
3286 unsigned int num_elements_to_end_of_queue
;
3288 struct pqi_iu_header
*request
;
3290 spin_lock_irqsave(&queue_group
->submit_lock
[path
], flags
);
3293 list_add_tail(&io_request
->request_list_entry
,
3294 &queue_group
->request_list
[path
]);
3296 iq_pi
= queue_group
->iq_pi_copy
[path
];
3298 list_for_each_entry_safe(io_request
, next
,
3299 &queue_group
->request_list
[path
], request_list_entry
) {
3301 request
= io_request
->iu
;
3303 iu_length
= get_unaligned_le16(&request
->iu_length
) +
3304 PQI_REQUEST_HEADER_LENGTH
;
3305 num_elements_needed
=
3306 DIV_ROUND_UP(iu_length
,
3307 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3309 iq_ci
= *queue_group
->iq_ci
[path
];
3311 if (num_elements_needed
> pqi_num_elements_free(iq_pi
, iq_ci
,
3312 ctrl_info
->num_elements_per_iq
))
3315 put_unaligned_le16(queue_group
->oq_id
,
3316 &request
->response_queue_id
);
3318 next_element
= queue_group
->iq_element_array
[path
] +
3319 (iq_pi
* PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3321 num_elements_to_end_of_queue
=
3322 ctrl_info
->num_elements_per_iq
- iq_pi
;
3324 if (num_elements_needed
<= num_elements_to_end_of_queue
) {
3325 memcpy(next_element
, request
, iu_length
);
3327 copy_count
= num_elements_to_end_of_queue
*
3328 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
;
3329 memcpy(next_element
, request
, copy_count
);
3330 memcpy(queue_group
->iq_element_array
[path
],
3331 (u8
*)request
+ copy_count
,
3332 iu_length
- copy_count
);
3335 iq_pi
= (iq_pi
+ num_elements_needed
) %
3336 ctrl_info
->num_elements_per_iq
;
3338 list_del(&io_request
->request_list_entry
);
3341 if (iq_pi
!= queue_group
->iq_pi_copy
[path
]) {
3342 queue_group
->iq_pi_copy
[path
] = iq_pi
;
3344 * This write notifies the controller that one or more IUs are
3345 * available to be processed.
3347 writel(iq_pi
, queue_group
->iq_pi
[path
]);
3350 spin_unlock_irqrestore(&queue_group
->submit_lock
[path
], flags
);
3353 static void pqi_raid_synchronous_complete(struct pqi_io_request
*io_request
,
3356 struct completion
*waiting
= context
;
3361 static int pqi_submit_raid_request_synchronous_with_io_request(
3362 struct pqi_ctrl_info
*ctrl_info
, struct pqi_io_request
*io_request
,
3363 unsigned long timeout_msecs
)
3366 DECLARE_COMPLETION_ONSTACK(wait
);
3368 io_request
->io_complete_callback
= pqi_raid_synchronous_complete
;
3369 io_request
->context
= &wait
;
3371 pqi_start_io(ctrl_info
,
3372 &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
], RAID_PATH
,
3375 if (timeout_msecs
== NO_TIMEOUT
) {
3376 wait_for_completion_io(&wait
);
3378 if (!wait_for_completion_io_timeout(&wait
,
3379 msecs_to_jiffies(timeout_msecs
))) {
3380 dev_warn(&ctrl_info
->pci_dev
->dev
,
3381 "command timed out\n");
3389 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info
*ctrl_info
,
3390 struct pqi_iu_header
*request
, unsigned int flags
,
3391 struct pqi_raid_error_info
*error_info
, unsigned long timeout_msecs
)
3394 struct pqi_io_request
*io_request
;
3395 unsigned long start_jiffies
;
3396 unsigned long msecs_blocked
;
3400 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3401 * are mutually exclusive.
3404 if (flags
& PQI_SYNC_FLAGS_INTERRUPTABLE
) {
3405 if (down_interruptible(&ctrl_info
->sync_request_sem
))
3406 return -ERESTARTSYS
;
3408 if (timeout_msecs
== NO_TIMEOUT
) {
3409 down(&ctrl_info
->sync_request_sem
);
3411 start_jiffies
= jiffies
;
3412 if (down_timeout(&ctrl_info
->sync_request_sem
,
3413 msecs_to_jiffies(timeout_msecs
)))
3416 jiffies_to_msecs(jiffies
- start_jiffies
);
3417 if (msecs_blocked
>= timeout_msecs
)
3419 timeout_msecs
-= msecs_blocked
;
3423 io_request
= pqi_alloc_io_request(ctrl_info
);
3425 put_unaligned_le16(io_request
->index
,
3426 &(((struct pqi_raid_path_request
*)request
)->request_id
));
3428 if (request
->iu_type
== PQI_REQUEST_IU_RAID_PATH_IO
)
3429 ((struct pqi_raid_path_request
*)request
)->error_index
=
3430 ((struct pqi_raid_path_request
*)request
)->request_id
;
3432 iu_length
= get_unaligned_le16(&request
->iu_length
) +
3433 PQI_REQUEST_HEADER_LENGTH
;
3434 memcpy(io_request
->iu
, request
, iu_length
);
3436 rc
= pqi_submit_raid_request_synchronous_with_io_request(ctrl_info
,
3437 io_request
, timeout_msecs
);
3440 if (io_request
->error_info
)
3441 memcpy(error_info
, io_request
->error_info
,
3442 sizeof(*error_info
));
3444 memset(error_info
, 0, sizeof(*error_info
));
3445 } else if (rc
== 0 && io_request
->error_info
) {
3447 struct pqi_raid_error_info
*raid_error_info
;
3449 raid_error_info
= io_request
->error_info
;
3450 scsi_status
= raid_error_info
->status
;
3452 if (scsi_status
== SAM_STAT_CHECK_CONDITION
&&
3453 raid_error_info
->data_out_result
==
3454 PQI_DATA_IN_OUT_UNDERFLOW
)
3455 scsi_status
= SAM_STAT_GOOD
;
3457 if (scsi_status
!= SAM_STAT_GOOD
)
3461 pqi_free_io_request(io_request
);
3463 up(&ctrl_info
->sync_request_sem
);
3468 static int pqi_validate_admin_response(
3469 struct pqi_general_admin_response
*response
, u8 expected_function_code
)
3471 if (response
->header
.iu_type
!= PQI_RESPONSE_IU_GENERAL_ADMIN
)
3474 if (get_unaligned_le16(&response
->header
.iu_length
) !=
3475 PQI_GENERAL_ADMIN_IU_LENGTH
)
3478 if (response
->function_code
!= expected_function_code
)
3481 if (response
->status
!= PQI_GENERAL_ADMIN_STATUS_SUCCESS
)
3487 static int pqi_submit_admin_request_synchronous(
3488 struct pqi_ctrl_info
*ctrl_info
,
3489 struct pqi_general_admin_request
*request
,
3490 struct pqi_general_admin_response
*response
)
3494 pqi_submit_admin_request(ctrl_info
, request
);
3496 rc
= pqi_poll_for_admin_response(ctrl_info
, response
);
3499 rc
= pqi_validate_admin_response(response
,
3500 request
->function_code
);
3505 static int pqi_report_device_capability(struct pqi_ctrl_info
*ctrl_info
)
3508 struct pqi_general_admin_request request
;
3509 struct pqi_general_admin_response response
;
3510 struct pqi_device_capability
*capability
;
3511 struct pqi_iu_layer_descriptor
*sop_iu_layer_descriptor
;
3513 capability
= kmalloc(sizeof(*capability
), GFP_KERNEL
);
3517 memset(&request
, 0, sizeof(request
));
3519 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3520 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3521 &request
.header
.iu_length
);
3522 request
.function_code
=
3523 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY
;
3524 put_unaligned_le32(sizeof(*capability
),
3525 &request
.data
.report_device_capability
.buffer_length
);
3527 rc
= pqi_map_single(ctrl_info
->pci_dev
,
3528 &request
.data
.report_device_capability
.sg_descriptor
,
3529 capability
, sizeof(*capability
),
3530 PCI_DMA_FROMDEVICE
);
3534 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3537 pqi_pci_unmap(ctrl_info
->pci_dev
,
3538 &request
.data
.report_device_capability
.sg_descriptor
, 1,
3539 PCI_DMA_FROMDEVICE
);
3544 if (response
.status
!= PQI_GENERAL_ADMIN_STATUS_SUCCESS
) {
3549 ctrl_info
->max_inbound_queues
=
3550 get_unaligned_le16(&capability
->max_inbound_queues
);
3551 ctrl_info
->max_elements_per_iq
=
3552 get_unaligned_le16(&capability
->max_elements_per_iq
);
3553 ctrl_info
->max_iq_element_length
=
3554 get_unaligned_le16(&capability
->max_iq_element_length
)
3556 ctrl_info
->max_outbound_queues
=
3557 get_unaligned_le16(&capability
->max_outbound_queues
);
3558 ctrl_info
->max_elements_per_oq
=
3559 get_unaligned_le16(&capability
->max_elements_per_oq
);
3560 ctrl_info
->max_oq_element_length
=
3561 get_unaligned_le16(&capability
->max_oq_element_length
)
3564 sop_iu_layer_descriptor
=
3565 &capability
->iu_layer_descriptors
[PQI_PROTOCOL_SOP
];
3567 ctrl_info
->max_inbound_iu_length_per_firmware
=
3569 &sop_iu_layer_descriptor
->max_inbound_iu_length
);
3570 ctrl_info
->inbound_spanning_supported
=
3571 sop_iu_layer_descriptor
->inbound_spanning_supported
;
3572 ctrl_info
->outbound_spanning_supported
=
3573 sop_iu_layer_descriptor
->outbound_spanning_supported
;
3581 static int pqi_validate_device_capability(struct pqi_ctrl_info
*ctrl_info
)
3583 if (ctrl_info
->max_iq_element_length
<
3584 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) {
3585 dev_err(&ctrl_info
->pci_dev
->dev
,
3586 "max. inbound queue element length of %d is less than the required length of %d\n",
3587 ctrl_info
->max_iq_element_length
,
3588 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3592 if (ctrl_info
->max_oq_element_length
<
3593 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
) {
3594 dev_err(&ctrl_info
->pci_dev
->dev
,
3595 "max. outbound queue element length of %d is less than the required length of %d\n",
3596 ctrl_info
->max_oq_element_length
,
3597 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
);
3601 if (ctrl_info
->max_inbound_iu_length_per_firmware
<
3602 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) {
3603 dev_err(&ctrl_info
->pci_dev
->dev
,
3604 "max. inbound IU length of %u is less than the min. required length of %d\n",
3605 ctrl_info
->max_inbound_iu_length_per_firmware
,
3606 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3610 if (!ctrl_info
->inbound_spanning_supported
) {
3611 dev_err(&ctrl_info
->pci_dev
->dev
,
3612 "the controller does not support inbound spanning\n");
3616 if (ctrl_info
->outbound_spanning_supported
) {
3617 dev_err(&ctrl_info
->pci_dev
->dev
,
3618 "the controller supports outbound spanning but this driver does not\n");
3625 static int pqi_delete_operational_queue(struct pqi_ctrl_info
*ctrl_info
,
3626 bool inbound_queue
, u16 queue_id
)
3628 struct pqi_general_admin_request request
;
3629 struct pqi_general_admin_response response
;
3631 memset(&request
, 0, sizeof(request
));
3632 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3633 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3634 &request
.header
.iu_length
);
3636 request
.function_code
=
3637 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ
;
3639 request
.function_code
=
3640 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ
;
3641 put_unaligned_le16(queue_id
,
3642 &request
.data
.delete_operational_queue
.queue_id
);
3644 return pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3648 static int pqi_create_event_queue(struct pqi_ctrl_info
*ctrl_info
)
3651 struct pqi_event_queue
*event_queue
;
3652 struct pqi_general_admin_request request
;
3653 struct pqi_general_admin_response response
;
3655 event_queue
= &ctrl_info
->event_queue
;
3658 * Create OQ (Outbound Queue - device to host queue) to dedicate
3661 memset(&request
, 0, sizeof(request
));
3662 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3663 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3664 &request
.header
.iu_length
);
3665 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ
;
3666 put_unaligned_le16(event_queue
->oq_id
,
3667 &request
.data
.create_operational_oq
.queue_id
);
3668 put_unaligned_le64((u64
)event_queue
->oq_element_array_bus_addr
,
3669 &request
.data
.create_operational_oq
.element_array_addr
);
3670 put_unaligned_le64((u64
)event_queue
->oq_pi_bus_addr
,
3671 &request
.data
.create_operational_oq
.pi_addr
);
3672 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS
,
3673 &request
.data
.create_operational_oq
.num_elements
);
3674 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH
/ 16,
3675 &request
.data
.create_operational_oq
.element_length
);
3676 request
.data
.create_operational_oq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3677 put_unaligned_le16(event_queue
->int_msg_num
,
3678 &request
.data
.create_operational_oq
.int_msg_num
);
3680 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3685 event_queue
->oq_ci
= ctrl_info
->iomem_base
+
3686 PQI_DEVICE_REGISTERS_OFFSET
+
3688 &response
.data
.create_operational_oq
.oq_ci_offset
);
3693 static int pqi_create_queue_group(struct pqi_ctrl_info
*ctrl_info
)
3697 struct pqi_queue_group
*queue_group
;
3698 struct pqi_general_admin_request request
;
3699 struct pqi_general_admin_response response
;
3701 i
= ctrl_info
->num_active_queue_groups
;
3702 queue_group
= &ctrl_info
->queue_groups
[i
];
3705 * Create IQ (Inbound Queue - host to device queue) for
3708 memset(&request
, 0, sizeof(request
));
3709 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3710 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3711 &request
.header
.iu_length
);
3712 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ
;
3713 put_unaligned_le16(queue_group
->iq_id
[RAID_PATH
],
3714 &request
.data
.create_operational_iq
.queue_id
);
3716 (u64
)queue_group
->iq_element_array_bus_addr
[RAID_PATH
],
3717 &request
.data
.create_operational_iq
.element_array_addr
);
3718 put_unaligned_le64((u64
)queue_group
->iq_ci_bus_addr
[RAID_PATH
],
3719 &request
.data
.create_operational_iq
.ci_addr
);
3720 put_unaligned_le16(ctrl_info
->num_elements_per_iq
,
3721 &request
.data
.create_operational_iq
.num_elements
);
3722 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
/ 16,
3723 &request
.data
.create_operational_iq
.element_length
);
3724 request
.data
.create_operational_iq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3726 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3729 dev_err(&ctrl_info
->pci_dev
->dev
,
3730 "error creating inbound RAID queue\n");
3734 queue_group
->iq_pi
[RAID_PATH
] = ctrl_info
->iomem_base
+
3735 PQI_DEVICE_REGISTERS_OFFSET
+
3737 &response
.data
.create_operational_iq
.iq_pi_offset
);
3740 * Create IQ (Inbound Queue - host to device queue) for
3741 * Advanced I/O (AIO) path.
3743 memset(&request
, 0, sizeof(request
));
3744 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3745 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3746 &request
.header
.iu_length
);
3747 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ
;
3748 put_unaligned_le16(queue_group
->iq_id
[AIO_PATH
],
3749 &request
.data
.create_operational_iq
.queue_id
);
3750 put_unaligned_le64((u64
)queue_group
->
3751 iq_element_array_bus_addr
[AIO_PATH
],
3752 &request
.data
.create_operational_iq
.element_array_addr
);
3753 put_unaligned_le64((u64
)queue_group
->iq_ci_bus_addr
[AIO_PATH
],
3754 &request
.data
.create_operational_iq
.ci_addr
);
3755 put_unaligned_le16(ctrl_info
->num_elements_per_iq
,
3756 &request
.data
.create_operational_iq
.num_elements
);
3757 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
/ 16,
3758 &request
.data
.create_operational_iq
.element_length
);
3759 request
.data
.create_operational_iq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3761 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3764 dev_err(&ctrl_info
->pci_dev
->dev
,
3765 "error creating inbound AIO queue\n");
3766 goto delete_inbound_queue_raid
;
3769 queue_group
->iq_pi
[AIO_PATH
] = ctrl_info
->iomem_base
+
3770 PQI_DEVICE_REGISTERS_OFFSET
+
3772 &response
.data
.create_operational_iq
.iq_pi_offset
);
3775 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3776 * assumed to be for RAID path I/O unless we change the queue's
3779 memset(&request
, 0, sizeof(request
));
3780 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3781 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3782 &request
.header
.iu_length
);
3783 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY
;
3784 put_unaligned_le16(queue_group
->iq_id
[AIO_PATH
],
3785 &request
.data
.change_operational_iq_properties
.queue_id
);
3786 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE
,
3787 &request
.data
.change_operational_iq_properties
.vendor_specific
);
3789 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3792 dev_err(&ctrl_info
->pci_dev
->dev
,
3793 "error changing queue property\n");
3794 goto delete_inbound_queue_aio
;
3798 * Create OQ (Outbound Queue - device to host queue).
3800 memset(&request
, 0, sizeof(request
));
3801 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3802 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3803 &request
.header
.iu_length
);
3804 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ
;
3805 put_unaligned_le16(queue_group
->oq_id
,
3806 &request
.data
.create_operational_oq
.queue_id
);
3807 put_unaligned_le64((u64
)queue_group
->oq_element_array_bus_addr
,
3808 &request
.data
.create_operational_oq
.element_array_addr
);
3809 put_unaligned_le64((u64
)queue_group
->oq_pi_bus_addr
,
3810 &request
.data
.create_operational_oq
.pi_addr
);
3811 put_unaligned_le16(ctrl_info
->num_elements_per_oq
,
3812 &request
.data
.create_operational_oq
.num_elements
);
3813 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
/ 16,
3814 &request
.data
.create_operational_oq
.element_length
);
3815 request
.data
.create_operational_oq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3816 put_unaligned_le16(queue_group
->int_msg_num
,
3817 &request
.data
.create_operational_oq
.int_msg_num
);
3819 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3822 dev_err(&ctrl_info
->pci_dev
->dev
,
3823 "error creating outbound queue\n");
3824 goto delete_inbound_queue_aio
;
3827 queue_group
->oq_ci
= ctrl_info
->iomem_base
+
3828 PQI_DEVICE_REGISTERS_OFFSET
+
3830 &response
.data
.create_operational_oq
.oq_ci_offset
);
3832 ctrl_info
->num_active_queue_groups
++;
3836 delete_inbound_queue_aio
:
3837 pqi_delete_operational_queue(ctrl_info
, true,
3838 queue_group
->iq_id
[AIO_PATH
]);
3840 delete_inbound_queue_raid
:
3841 pqi_delete_operational_queue(ctrl_info
, true,
3842 queue_group
->iq_id
[RAID_PATH
]);
3847 static int pqi_create_queues(struct pqi_ctrl_info
*ctrl_info
)
3852 rc
= pqi_create_event_queue(ctrl_info
);
3854 dev_err(&ctrl_info
->pci_dev
->dev
,
3855 "error creating event queue\n");
3859 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3860 rc
= pqi_create_queue_group(ctrl_info
);
3862 dev_err(&ctrl_info
->pci_dev
->dev
,
3863 "error creating queue group number %u/%u\n",
3864 i
, ctrl_info
->num_queue_groups
);
3872 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3873 (offsetof(struct pqi_event_config, descriptors) + \
3874 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3876 static int pqi_configure_events(struct pqi_ctrl_info
*ctrl_info
)
3880 struct pqi_event_config
*event_config
;
3881 struct pqi_general_management_request request
;
3883 event_config
= kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3888 memset(&request
, 0, sizeof(request
));
3890 request
.header
.iu_type
= PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG
;
3891 put_unaligned_le16(offsetof(struct pqi_general_management_request
,
3892 data
.report_event_configuration
.sg_descriptors
[1]) -
3893 PQI_REQUEST_HEADER_LENGTH
, &request
.header
.iu_length
);
3894 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3895 &request
.data
.report_event_configuration
.buffer_length
);
3897 rc
= pqi_map_single(ctrl_info
->pci_dev
,
3898 request
.data
.report_event_configuration
.sg_descriptors
,
3899 event_config
, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3900 PCI_DMA_FROMDEVICE
);
3904 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
3905 0, NULL
, NO_TIMEOUT
);
3907 pqi_pci_unmap(ctrl_info
->pci_dev
,
3908 request
.data
.report_event_configuration
.sg_descriptors
, 1,
3909 PCI_DMA_FROMDEVICE
);
3914 for (i
= 0; i
< event_config
->num_event_descriptors
; i
++)
3915 put_unaligned_le16(ctrl_info
->event_queue
.oq_id
,
3916 &event_config
->descriptors
[i
].oq_id
);
3918 memset(&request
, 0, sizeof(request
));
3920 request
.header
.iu_type
= PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG
;
3921 put_unaligned_le16(offsetof(struct pqi_general_management_request
,
3922 data
.report_event_configuration
.sg_descriptors
[1]) -
3923 PQI_REQUEST_HEADER_LENGTH
, &request
.header
.iu_length
);
3924 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3925 &request
.data
.report_event_configuration
.buffer_length
);
3927 rc
= pqi_map_single(ctrl_info
->pci_dev
,
3928 request
.data
.report_event_configuration
.sg_descriptors
,
3929 event_config
, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3934 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
3937 pqi_pci_unmap(ctrl_info
->pci_dev
,
3938 request
.data
.report_event_configuration
.sg_descriptors
, 1,
3942 kfree(event_config
);
3947 static void pqi_free_all_io_requests(struct pqi_ctrl_info
*ctrl_info
)
3951 size_t sg_chain_buffer_length
;
3952 struct pqi_io_request
*io_request
;
3954 if (!ctrl_info
->io_request_pool
)
3957 dev
= &ctrl_info
->pci_dev
->dev
;
3958 sg_chain_buffer_length
= ctrl_info
->sg_chain_buffer_length
;
3959 io_request
= ctrl_info
->io_request_pool
;
3961 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
3962 kfree(io_request
->iu
);
3963 if (!io_request
->sg_chain_buffer
)
3965 dma_free_coherent(dev
, sg_chain_buffer_length
,
3966 io_request
->sg_chain_buffer
,
3967 io_request
->sg_chain_buffer_dma_handle
);
3971 kfree(ctrl_info
->io_request_pool
);
3972 ctrl_info
->io_request_pool
= NULL
;
3975 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info
*ctrl_info
)
3977 ctrl_info
->error_buffer
= dma_zalloc_coherent(&ctrl_info
->pci_dev
->dev
,
3978 ctrl_info
->error_buffer_length
,
3979 &ctrl_info
->error_buffer_dma_handle
, GFP_KERNEL
);
3981 if (!ctrl_info
->error_buffer
)
3987 static int pqi_alloc_io_resources(struct pqi_ctrl_info
*ctrl_info
)
3990 void *sg_chain_buffer
;
3991 size_t sg_chain_buffer_length
;
3992 dma_addr_t sg_chain_buffer_dma_handle
;
3994 struct pqi_io_request
*io_request
;
3996 ctrl_info
->io_request_pool
= kzalloc(ctrl_info
->max_io_slots
*
3997 sizeof(ctrl_info
->io_request_pool
[0]), GFP_KERNEL
);
3999 if (!ctrl_info
->io_request_pool
) {
4000 dev_err(&ctrl_info
->pci_dev
->dev
,
4001 "failed to allocate I/O request pool\n");
4005 dev
= &ctrl_info
->pci_dev
->dev
;
4006 sg_chain_buffer_length
= ctrl_info
->sg_chain_buffer_length
;
4007 io_request
= ctrl_info
->io_request_pool
;
4009 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
4011 kmalloc(ctrl_info
->max_inbound_iu_length
, GFP_KERNEL
);
4013 if (!io_request
->iu
) {
4014 dev_err(&ctrl_info
->pci_dev
->dev
,
4015 "failed to allocate IU buffers\n");
4019 sg_chain_buffer
= dma_alloc_coherent(dev
,
4020 sg_chain_buffer_length
, &sg_chain_buffer_dma_handle
,
4023 if (!sg_chain_buffer
) {
4024 dev_err(&ctrl_info
->pci_dev
->dev
,
4025 "failed to allocate PQI scatter-gather chain buffers\n");
4029 io_request
->index
= i
;
4030 io_request
->sg_chain_buffer
= sg_chain_buffer
;
4031 io_request
->sg_chain_buffer_dma_handle
=
4032 sg_chain_buffer_dma_handle
;
4039 pqi_free_all_io_requests(ctrl_info
);
4045 * Calculate required resources that are sized based on max. outstanding
4046 * requests and max. transfer size.
4049 static void pqi_calculate_io_resources(struct pqi_ctrl_info
*ctrl_info
)
4051 u32 max_transfer_size
;
4054 ctrl_info
->scsi_ml_can_queue
=
4055 ctrl_info
->max_outstanding_requests
- PQI_RESERVED_IO_SLOTS
;
4056 ctrl_info
->max_io_slots
= ctrl_info
->max_outstanding_requests
;
4058 ctrl_info
->error_buffer_length
=
4059 ctrl_info
->max_io_slots
* PQI_ERROR_BUFFER_ELEMENT_LENGTH
;
4062 min(ctrl_info
->max_transfer_size
, PQI_MAX_TRANSFER_SIZE
);
4064 max_sg_entries
= max_transfer_size
/ PAGE_SIZE
;
4066 /* +1 to cover when the buffer is not page-aligned. */
4069 max_sg_entries
= min(ctrl_info
->max_sg_entries
, max_sg_entries
);
4071 max_transfer_size
= (max_sg_entries
- 1) * PAGE_SIZE
;
4073 ctrl_info
->sg_chain_buffer_length
=
4074 max_sg_entries
* sizeof(struct pqi_sg_descriptor
);
4075 ctrl_info
->sg_tablesize
= max_sg_entries
;
4076 ctrl_info
->max_sectors
= max_transfer_size
/ 512;
4079 static void pqi_calculate_queue_resources(struct pqi_ctrl_info
*ctrl_info
)
4082 int max_queue_groups
;
4083 int num_queue_groups
;
4084 u16 num_elements_per_iq
;
4085 u16 num_elements_per_oq
;
4087 max_queue_groups
= min(ctrl_info
->max_inbound_queues
/ 2,
4088 ctrl_info
->max_outbound_queues
- 1);
4089 max_queue_groups
= min(max_queue_groups
, PQI_MAX_QUEUE_GROUPS
);
4091 num_cpus
= num_online_cpus();
4092 num_queue_groups
= min(num_cpus
, ctrl_info
->max_msix_vectors
);
4093 num_queue_groups
= min(num_queue_groups
, max_queue_groups
);
4095 ctrl_info
->num_queue_groups
= num_queue_groups
;
4098 * Make sure that the max. inbound IU length is an even multiple
4099 * of our inbound element length.
4101 ctrl_info
->max_inbound_iu_length
=
4102 (ctrl_info
->max_inbound_iu_length_per_firmware
/
4103 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) *
4104 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
;
4106 num_elements_per_iq
=
4107 (ctrl_info
->max_inbound_iu_length
/
4108 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
4110 /* Add one because one element in each queue is unusable. */
4111 num_elements_per_iq
++;
4113 num_elements_per_iq
= min(num_elements_per_iq
,
4114 ctrl_info
->max_elements_per_iq
);
4116 num_elements_per_oq
= ((num_elements_per_iq
- 1) * 2) + 1;
4117 num_elements_per_oq
= min(num_elements_per_oq
,
4118 ctrl_info
->max_elements_per_oq
);
4120 ctrl_info
->num_elements_per_iq
= num_elements_per_iq
;
4121 ctrl_info
->num_elements_per_oq
= num_elements_per_oq
;
4123 ctrl_info
->max_sg_per_iu
=
4124 ((ctrl_info
->max_inbound_iu_length
-
4125 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) /
4126 sizeof(struct pqi_sg_descriptor
)) +
4127 PQI_MAX_EMBEDDED_SG_DESCRIPTORS
;
4130 static inline void pqi_set_sg_descriptor(
4131 struct pqi_sg_descriptor
*sg_descriptor
, struct scatterlist
*sg
)
4133 u64 address
= (u64
)sg_dma_address(sg
);
4134 unsigned int length
= sg_dma_len(sg
);
4136 put_unaligned_le64(address
, &sg_descriptor
->address
);
4137 put_unaligned_le32(length
, &sg_descriptor
->length
);
4138 put_unaligned_le32(0, &sg_descriptor
->flags
);
4141 static int pqi_build_raid_sg_list(struct pqi_ctrl_info
*ctrl_info
,
4142 struct pqi_raid_path_request
*request
, struct scsi_cmnd
*scmd
,
4143 struct pqi_io_request
*io_request
)
4149 unsigned int num_sg_in_iu
;
4150 unsigned int max_sg_per_iu
;
4151 struct scatterlist
*sg
;
4152 struct pqi_sg_descriptor
*sg_descriptor
;
4154 sg_count
= scsi_dma_map(scmd
);
4158 iu_length
= offsetof(struct pqi_raid_path_request
, sg_descriptors
) -
4159 PQI_REQUEST_HEADER_LENGTH
;
4164 sg
= scsi_sglist(scmd
);
4165 sg_descriptor
= request
->sg_descriptors
;
4166 max_sg_per_iu
= ctrl_info
->max_sg_per_iu
- 1;
4172 pqi_set_sg_descriptor(sg_descriptor
, sg
);
4179 if (i
== max_sg_per_iu
) {
4181 (u64
)io_request
->sg_chain_buffer_dma_handle
,
4182 &sg_descriptor
->address
);
4183 put_unaligned_le32((sg_count
- num_sg_in_iu
)
4184 * sizeof(*sg_descriptor
),
4185 &sg_descriptor
->length
);
4186 put_unaligned_le32(CISS_SG_CHAIN
,
4187 &sg_descriptor
->flags
);
4190 sg_descriptor
= io_request
->sg_chain_buffer
;
4195 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
4196 request
->partial
= chained
;
4197 iu_length
+= num_sg_in_iu
* sizeof(*sg_descriptor
);
4200 put_unaligned_le16(iu_length
, &request
->header
.iu_length
);
4205 static int pqi_build_aio_sg_list(struct pqi_ctrl_info
*ctrl_info
,
4206 struct pqi_aio_path_request
*request
, struct scsi_cmnd
*scmd
,
4207 struct pqi_io_request
*io_request
)
4213 unsigned int num_sg_in_iu
;
4214 unsigned int max_sg_per_iu
;
4215 struct scatterlist
*sg
;
4216 struct pqi_sg_descriptor
*sg_descriptor
;
4218 sg_count
= scsi_dma_map(scmd
);
4222 iu_length
= offsetof(struct pqi_aio_path_request
, sg_descriptors
) -
4223 PQI_REQUEST_HEADER_LENGTH
;
4229 sg
= scsi_sglist(scmd
);
4230 sg_descriptor
= request
->sg_descriptors
;
4231 max_sg_per_iu
= ctrl_info
->max_sg_per_iu
- 1;
4236 pqi_set_sg_descriptor(sg_descriptor
, sg
);
4243 if (i
== max_sg_per_iu
) {
4245 (u64
)io_request
->sg_chain_buffer_dma_handle
,
4246 &sg_descriptor
->address
);
4247 put_unaligned_le32((sg_count
- num_sg_in_iu
)
4248 * sizeof(*sg_descriptor
),
4249 &sg_descriptor
->length
);
4250 put_unaligned_le32(CISS_SG_CHAIN
,
4251 &sg_descriptor
->flags
);
4254 sg_descriptor
= io_request
->sg_chain_buffer
;
4259 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
4260 request
->partial
= chained
;
4261 iu_length
+= num_sg_in_iu
* sizeof(*sg_descriptor
);
4264 put_unaligned_le16(iu_length
, &request
->header
.iu_length
);
4265 request
->num_sg_descriptors
= num_sg_in_iu
;
4270 static void pqi_raid_io_complete(struct pqi_io_request
*io_request
,
4273 struct scsi_cmnd
*scmd
;
4275 scmd
= io_request
->scmd
;
4276 pqi_free_io_request(io_request
);
4277 scsi_dma_unmap(scmd
);
4278 pqi_scsi_done(scmd
);
4281 static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
4282 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
4283 struct pqi_queue_group
*queue_group
)
4287 struct pqi_io_request
*io_request
;
4288 struct pqi_raid_path_request
*request
;
4290 io_request
= pqi_alloc_io_request(ctrl_info
);
4291 io_request
->io_complete_callback
= pqi_raid_io_complete
;
4292 io_request
->scmd
= scmd
;
4294 scmd
->host_scribble
= (unsigned char *)io_request
;
4296 request
= io_request
->iu
;
4298 offsetof(struct pqi_raid_path_request
, sg_descriptors
));
4300 request
->header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
4301 put_unaligned_le32(scsi_bufflen(scmd
), &request
->buffer_length
);
4302 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4303 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4304 request
->error_index
= request
->request_id
;
4305 memcpy(request
->lun_number
, device
->scsi3addr
,
4306 sizeof(request
->lun_number
));
4308 cdb_length
= min_t(size_t, scmd
->cmd_len
, sizeof(request
->cdb
));
4309 memcpy(request
->cdb
, scmd
->cmnd
, cdb_length
);
4311 switch (cdb_length
) {
4316 /* No bytes in the Additional CDB bytes field */
4317 request
->additional_cdb_bytes_usage
=
4318 SOP_ADDITIONAL_CDB_BYTES_0
;
4321 /* 4 bytes in the Additional cdb field */
4322 request
->additional_cdb_bytes_usage
=
4323 SOP_ADDITIONAL_CDB_BYTES_4
;
4326 /* 8 bytes in the Additional cdb field */
4327 request
->additional_cdb_bytes_usage
=
4328 SOP_ADDITIONAL_CDB_BYTES_8
;
4331 /* 12 bytes in the Additional cdb field */
4332 request
->additional_cdb_bytes_usage
=
4333 SOP_ADDITIONAL_CDB_BYTES_12
;
4337 /* 16 bytes in the Additional cdb field */
4338 request
->additional_cdb_bytes_usage
=
4339 SOP_ADDITIONAL_CDB_BYTES_16
;
4343 switch (scmd
->sc_data_direction
) {
4345 request
->data_direction
= SOP_READ_FLAG
;
4347 case DMA_FROM_DEVICE
:
4348 request
->data_direction
= SOP_WRITE_FLAG
;
4351 request
->data_direction
= SOP_NO_DIRECTION_FLAG
;
4353 case DMA_BIDIRECTIONAL
:
4354 request
->data_direction
= SOP_BIDIRECTIONAL
;
4357 dev_err(&ctrl_info
->pci_dev
->dev
,
4358 "unknown data direction: %d\n",
4359 scmd
->sc_data_direction
);
4360 WARN_ON(scmd
->sc_data_direction
);
4364 rc
= pqi_build_raid_sg_list(ctrl_info
, request
, scmd
, io_request
);
4366 pqi_free_io_request(io_request
);
4367 return SCSI_MLQUEUE_HOST_BUSY
;
4370 pqi_start_io(ctrl_info
, queue_group
, RAID_PATH
, io_request
);
4375 static void pqi_aio_io_complete(struct pqi_io_request
*io_request
,
4378 struct scsi_cmnd
*scmd
;
4380 scmd
= io_request
->scmd
;
4381 scsi_dma_unmap(scmd
);
4382 if (io_request
->status
== -EAGAIN
)
4383 set_host_byte(scmd
, DID_IMM_RETRY
);
4384 pqi_free_io_request(io_request
);
4385 pqi_scsi_done(scmd
);
4388 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
4389 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
4390 struct pqi_queue_group
*queue_group
)
4392 return pqi_aio_submit_io(ctrl_info
, scmd
, device
->aio_handle
,
4393 scmd
->cmnd
, scmd
->cmd_len
, queue_group
, NULL
);
4396 static int pqi_aio_submit_io(struct pqi_ctrl_info
*ctrl_info
,
4397 struct scsi_cmnd
*scmd
, u32 aio_handle
, u8
*cdb
,
4398 unsigned int cdb_length
, struct pqi_queue_group
*queue_group
,
4399 struct pqi_encryption_info
*encryption_info
)
4402 struct pqi_io_request
*io_request
;
4403 struct pqi_aio_path_request
*request
;
4405 io_request
= pqi_alloc_io_request(ctrl_info
);
4406 io_request
->io_complete_callback
= pqi_aio_io_complete
;
4407 io_request
->scmd
= scmd
;
4409 scmd
->host_scribble
= (unsigned char *)io_request
;
4411 request
= io_request
->iu
;
4413 offsetof(struct pqi_raid_path_request
, sg_descriptors
));
4415 request
->header
.iu_type
= PQI_REQUEST_IU_AIO_PATH_IO
;
4416 put_unaligned_le32(aio_handle
, &request
->nexus_id
);
4417 put_unaligned_le32(scsi_bufflen(scmd
), &request
->buffer_length
);
4418 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4419 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4420 request
->error_index
= request
->request_id
;
4421 if (cdb_length
> sizeof(request
->cdb
))
4422 cdb_length
= sizeof(request
->cdb
);
4423 request
->cdb_length
= cdb_length
;
4424 memcpy(request
->cdb
, cdb
, cdb_length
);
4426 switch (scmd
->sc_data_direction
) {
4428 request
->data_direction
= SOP_READ_FLAG
;
4430 case DMA_FROM_DEVICE
:
4431 request
->data_direction
= SOP_WRITE_FLAG
;
4434 request
->data_direction
= SOP_NO_DIRECTION_FLAG
;
4436 case DMA_BIDIRECTIONAL
:
4437 request
->data_direction
= SOP_BIDIRECTIONAL
;
4440 dev_err(&ctrl_info
->pci_dev
->dev
,
4441 "unknown data direction: %d\n",
4442 scmd
->sc_data_direction
);
4443 WARN_ON(scmd
->sc_data_direction
);
4447 if (encryption_info
) {
4448 request
->encryption_enable
= true;
4449 put_unaligned_le16(encryption_info
->data_encryption_key_index
,
4450 &request
->data_encryption_key_index
);
4451 put_unaligned_le32(encryption_info
->encrypt_tweak_lower
,
4452 &request
->encrypt_tweak_lower
);
4453 put_unaligned_le32(encryption_info
->encrypt_tweak_upper
,
4454 &request
->encrypt_tweak_upper
);
4457 rc
= pqi_build_aio_sg_list(ctrl_info
, request
, scmd
, io_request
);
4459 pqi_free_io_request(io_request
);
4460 return SCSI_MLQUEUE_HOST_BUSY
;
4463 pqi_start_io(ctrl_info
, queue_group
, AIO_PATH
, io_request
);
4468 static int pqi_scsi_queue_command(struct Scsi_Host
*shost
,
4469 struct scsi_cmnd
*scmd
)
4472 struct pqi_ctrl_info
*ctrl_info
;
4473 struct pqi_scsi_dev
*device
;
4475 struct pqi_queue_group
*queue_group
;
4478 device
= scmd
->device
->hostdata
;
4479 ctrl_info
= shost_to_hba(shost
);
4481 if (pqi_ctrl_offline(ctrl_info
)) {
4482 set_host_byte(scmd
, DID_NO_CONNECT
);
4483 pqi_scsi_done(scmd
);
4488 * This is necessary because the SML doesn't zero out this field during
4493 hwq
= blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd
->request
));
4494 if (hwq
>= ctrl_info
->num_queue_groups
)
4497 queue_group
= &ctrl_info
->queue_groups
[hwq
];
4499 if (pqi_is_logical_device(device
)) {
4500 raid_bypassed
= false;
4501 if (device
->offload_enabled
&&
4502 !blk_rq_is_passthrough(scmd
->request
)) {
4503 rc
= pqi_raid_bypass_submit_scsi_cmd(ctrl_info
, device
,
4506 rc
== SCSI_MLQUEUE_HOST_BUSY
||
4507 rc
== SAM_STAT_CHECK_CONDITION
||
4508 rc
== SAM_STAT_RESERVATION_CONFLICT
)
4509 raid_bypassed
= true;
4512 rc
= pqi_raid_submit_scsi_cmd(ctrl_info
, device
, scmd
,
4515 if (device
->aio_enabled
)
4516 rc
= pqi_aio_submit_scsi_cmd(ctrl_info
, device
, scmd
,
4519 rc
= pqi_raid_submit_scsi_cmd(ctrl_info
, device
, scmd
,
4526 static void pqi_lun_reset_complete(struct pqi_io_request
*io_request
,
4529 struct completion
*waiting
= context
;
4534 #define PQI_LUN_RESET_TIMEOUT_SECS 10
4536 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info
*ctrl_info
,
4537 struct pqi_scsi_dev
*device
, struct completion
*wait
)
4540 unsigned int wait_secs
= 0;
4543 if (wait_for_completion_io_timeout(wait
,
4544 PQI_LUN_RESET_TIMEOUT_SECS
* HZ
)) {
4549 pqi_check_ctrl_health(ctrl_info
);
4550 if (pqi_ctrl_offline(ctrl_info
)) {
4555 wait_secs
+= PQI_LUN_RESET_TIMEOUT_SECS
;
4557 dev_err(&ctrl_info
->pci_dev
->dev
,
4558 "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4559 ctrl_info
->scsi_host
->host_no
, device
->bus
,
4560 device
->target
, device
->lun
, wait_secs
);
4566 static int pqi_lun_reset(struct pqi_ctrl_info
*ctrl_info
,
4567 struct pqi_scsi_dev
*device
)
4570 struct pqi_io_request
*io_request
;
4571 DECLARE_COMPLETION_ONSTACK(wait
);
4572 struct pqi_task_management_request
*request
;
4574 down(&ctrl_info
->lun_reset_sem
);
4576 io_request
= pqi_alloc_io_request(ctrl_info
);
4577 io_request
->io_complete_callback
= pqi_lun_reset_complete
;
4578 io_request
->context
= &wait
;
4580 request
= io_request
->iu
;
4581 memset(request
, 0, sizeof(*request
));
4583 request
->header
.iu_type
= PQI_REQUEST_IU_TASK_MANAGEMENT
;
4584 put_unaligned_le16(sizeof(*request
) - PQI_REQUEST_HEADER_LENGTH
,
4585 &request
->header
.iu_length
);
4586 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4587 memcpy(request
->lun_number
, device
->scsi3addr
,
4588 sizeof(request
->lun_number
));
4589 request
->task_management_function
= SOP_TASK_MANAGEMENT_LUN_RESET
;
4591 pqi_start_io(ctrl_info
,
4592 &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
], RAID_PATH
,
4595 rc
= pqi_wait_for_lun_reset_completion(ctrl_info
, device
, &wait
);
4597 rc
= io_request
->status
;
4599 pqi_free_io_request(io_request
);
4600 up(&ctrl_info
->lun_reset_sem
);
4605 /* Performs a reset at the LUN level. */
4607 static int pqi_device_reset(struct pqi_ctrl_info
*ctrl_info
,
4608 struct pqi_scsi_dev
*device
)
4612 pqi_check_ctrl_health(ctrl_info
);
4613 if (pqi_ctrl_offline(ctrl_info
))
4616 rc
= pqi_lun_reset(ctrl_info
, device
);
4618 return rc
== 0 ? SUCCESS
: FAILED
;
4621 static int pqi_eh_device_reset_handler(struct scsi_cmnd
*scmd
)
4624 struct pqi_ctrl_info
*ctrl_info
;
4625 struct pqi_scsi_dev
*device
;
4627 ctrl_info
= shost_to_hba(scmd
->device
->host
);
4628 device
= scmd
->device
->hostdata
;
4630 dev_err(&ctrl_info
->pci_dev
->dev
,
4631 "resetting scsi %d:%d:%d:%d\n",
4632 ctrl_info
->scsi_host
->host_no
,
4633 device
->bus
, device
->target
, device
->lun
);
4635 rc
= pqi_device_reset(ctrl_info
, device
);
4637 dev_err(&ctrl_info
->pci_dev
->dev
,
4638 "reset of scsi %d:%d:%d:%d: %s\n",
4639 ctrl_info
->scsi_host
->host_no
,
4640 device
->bus
, device
->target
, device
->lun
,
4641 rc
== SUCCESS
? "SUCCESS" : "FAILED");
4646 static int pqi_slave_alloc(struct scsi_device
*sdev
)
4648 struct pqi_scsi_dev
*device
;
4649 unsigned long flags
;
4650 struct pqi_ctrl_info
*ctrl_info
;
4651 struct scsi_target
*starget
;
4652 struct sas_rphy
*rphy
;
4654 ctrl_info
= shost_to_hba(sdev
->host
);
4656 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
4658 if (sdev_channel(sdev
) == PQI_PHYSICAL_DEVICE_BUS
) {
4659 starget
= scsi_target(sdev
);
4660 rphy
= target_to_rphy(starget
);
4661 device
= pqi_find_device_by_sas_rphy(ctrl_info
, rphy
);
4663 device
->target
= sdev_id(sdev
);
4664 device
->lun
= sdev
->lun
;
4665 device
->target_lun_valid
= true;
4668 device
= pqi_find_scsi_dev(ctrl_info
, sdev_channel(sdev
),
4669 sdev_id(sdev
), sdev
->lun
);
4672 if (device
&& device
->expose_device
) {
4673 sdev
->hostdata
= device
;
4674 device
->sdev
= sdev
;
4675 if (device
->queue_depth
) {
4676 device
->advertised_queue_depth
= device
->queue_depth
;
4677 scsi_change_queue_depth(sdev
,
4678 device
->advertised_queue_depth
);
4682 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
4687 static int pqi_slave_configure(struct scsi_device
*sdev
)
4689 struct pqi_scsi_dev
*device
;
4691 device
= sdev
->hostdata
;
4692 if (!device
->expose_device
)
4693 sdev
->no_uld_attach
= true;
4698 static int pqi_map_queues(struct Scsi_Host
*shost
)
4700 struct pqi_ctrl_info
*ctrl_info
= shost_to_hba(shost
);
4702 return blk_mq_pci_map_queues(&shost
->tag_set
, ctrl_info
->pci_dev
);
4705 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info
*ctrl_info
,
4708 struct pci_dev
*pci_dev
;
4709 u32 subsystem_vendor
;
4710 u32 subsystem_device
;
4711 cciss_pci_info_struct pciinfo
;
4716 pci_dev
= ctrl_info
->pci_dev
;
4718 pciinfo
.domain
= pci_domain_nr(pci_dev
->bus
);
4719 pciinfo
.bus
= pci_dev
->bus
->number
;
4720 pciinfo
.dev_fn
= pci_dev
->devfn
;
4721 subsystem_vendor
= pci_dev
->subsystem_vendor
;
4722 subsystem_device
= pci_dev
->subsystem_device
;
4723 pciinfo
.board_id
= ((subsystem_device
<< 16) & 0xffff0000) |
4726 if (copy_to_user(arg
, &pciinfo
, sizeof(pciinfo
)))
4732 static int pqi_getdrivver_ioctl(void __user
*arg
)
4739 version
= (DRIVER_MAJOR
<< 28) | (DRIVER_MINOR
<< 24) |
4740 (DRIVER_RELEASE
<< 16) | DRIVER_REVISION
;
4742 if (copy_to_user(arg
, &version
, sizeof(version
)))
4748 struct ciss_error_info
{
4751 size_t sense_data_length
;
4754 static void pqi_error_info_to_ciss(struct pqi_raid_error_info
*pqi_error_info
,
4755 struct ciss_error_info
*ciss_error_info
)
4757 int ciss_cmd_status
;
4758 size_t sense_data_length
;
4760 switch (pqi_error_info
->data_out_result
) {
4761 case PQI_DATA_IN_OUT_GOOD
:
4762 ciss_cmd_status
= CISS_CMD_STATUS_SUCCESS
;
4764 case PQI_DATA_IN_OUT_UNDERFLOW
:
4765 ciss_cmd_status
= CISS_CMD_STATUS_DATA_UNDERRUN
;
4767 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW
:
4768 ciss_cmd_status
= CISS_CMD_STATUS_DATA_OVERRUN
;
4770 case PQI_DATA_IN_OUT_PROTOCOL_ERROR
:
4771 case PQI_DATA_IN_OUT_BUFFER_ERROR
:
4772 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA
:
4773 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE
:
4774 case PQI_DATA_IN_OUT_ERROR
:
4775 ciss_cmd_status
= CISS_CMD_STATUS_PROTOCOL_ERROR
;
4777 case PQI_DATA_IN_OUT_HARDWARE_ERROR
:
4778 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR
:
4779 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT
:
4780 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED
:
4781 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED
:
4782 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED
:
4783 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST
:
4784 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION
:
4785 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED
:
4786 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ
:
4787 ciss_cmd_status
= CISS_CMD_STATUS_HARDWARE_ERROR
;
4789 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT
:
4790 ciss_cmd_status
= CISS_CMD_STATUS_UNSOLICITED_ABORT
;
4792 case PQI_DATA_IN_OUT_ABORTED
:
4793 ciss_cmd_status
= CISS_CMD_STATUS_ABORTED
;
4795 case PQI_DATA_IN_OUT_TIMEOUT
:
4796 ciss_cmd_status
= CISS_CMD_STATUS_TIMEOUT
;
4799 ciss_cmd_status
= CISS_CMD_STATUS_TARGET_STATUS
;
4804 get_unaligned_le16(&pqi_error_info
->sense_data_length
);
4805 if (sense_data_length
== 0)
4807 get_unaligned_le16(&pqi_error_info
->response_data_length
);
4808 if (sense_data_length
)
4809 if (sense_data_length
> sizeof(pqi_error_info
->data
))
4810 sense_data_length
= sizeof(pqi_error_info
->data
);
4812 ciss_error_info
->scsi_status
= pqi_error_info
->status
;
4813 ciss_error_info
->command_status
= ciss_cmd_status
;
4814 ciss_error_info
->sense_data_length
= sense_data_length
;
4817 static int pqi_passthru_ioctl(struct pqi_ctrl_info
*ctrl_info
, void __user
*arg
)
4820 char *kernel_buffer
= NULL
;
4822 size_t sense_data_length
;
4823 IOCTL_Command_struct iocommand
;
4824 struct pqi_raid_path_request request
;
4825 struct pqi_raid_error_info pqi_error_info
;
4826 struct ciss_error_info ciss_error_info
;
4828 if (pqi_ctrl_offline(ctrl_info
))
4832 if (!capable(CAP_SYS_RAWIO
))
4834 if (copy_from_user(&iocommand
, arg
, sizeof(iocommand
)))
4836 if (iocommand
.buf_size
< 1 &&
4837 iocommand
.Request
.Type
.Direction
!= XFER_NONE
)
4839 if (iocommand
.Request
.CDBLen
> sizeof(request
.cdb
))
4841 if (iocommand
.Request
.Type
.Type
!= TYPE_CMD
)
4844 switch (iocommand
.Request
.Type
.Direction
) {
4853 if (iocommand
.buf_size
> 0) {
4854 kernel_buffer
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
4857 if (iocommand
.Request
.Type
.Direction
& XFER_WRITE
) {
4858 if (copy_from_user(kernel_buffer
, iocommand
.buf
,
4859 iocommand
.buf_size
)) {
4864 memset(kernel_buffer
, 0, iocommand
.buf_size
);
4868 memset(&request
, 0, sizeof(request
));
4870 request
.header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
4871 iu_length
= offsetof(struct pqi_raid_path_request
, sg_descriptors
) -
4872 PQI_REQUEST_HEADER_LENGTH
;
4873 memcpy(request
.lun_number
, iocommand
.LUN_info
.LunAddrBytes
,
4874 sizeof(request
.lun_number
));
4875 memcpy(request
.cdb
, iocommand
.Request
.CDB
, iocommand
.Request
.CDBLen
);
4876 request
.additional_cdb_bytes_usage
= SOP_ADDITIONAL_CDB_BYTES_0
;
4878 switch (iocommand
.Request
.Type
.Direction
) {
4880 request
.data_direction
= SOP_NO_DIRECTION_FLAG
;
4883 request
.data_direction
= SOP_WRITE_FLAG
;
4886 request
.data_direction
= SOP_READ_FLAG
;
4890 request
.task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4892 if (iocommand
.buf_size
> 0) {
4893 put_unaligned_le32(iocommand
.buf_size
, &request
.buffer_length
);
4895 rc
= pqi_map_single(ctrl_info
->pci_dev
,
4896 &request
.sg_descriptors
[0], kernel_buffer
,
4897 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
4901 iu_length
+= sizeof(request
.sg_descriptors
[0]);
4904 put_unaligned_le16(iu_length
, &request
.header
.iu_length
);
4906 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
4907 PQI_SYNC_FLAGS_INTERRUPTABLE
, &pqi_error_info
, NO_TIMEOUT
);
4909 if (iocommand
.buf_size
> 0)
4910 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
4911 PCI_DMA_BIDIRECTIONAL
);
4913 memset(&iocommand
.error_info
, 0, sizeof(iocommand
.error_info
));
4916 pqi_error_info_to_ciss(&pqi_error_info
, &ciss_error_info
);
4917 iocommand
.error_info
.ScsiStatus
= ciss_error_info
.scsi_status
;
4918 iocommand
.error_info
.CommandStatus
=
4919 ciss_error_info
.command_status
;
4920 sense_data_length
= ciss_error_info
.sense_data_length
;
4921 if (sense_data_length
) {
4922 if (sense_data_length
>
4923 sizeof(iocommand
.error_info
.SenseInfo
))
4925 sizeof(iocommand
.error_info
.SenseInfo
);
4926 memcpy(iocommand
.error_info
.SenseInfo
,
4927 pqi_error_info
.data
, sense_data_length
);
4928 iocommand
.error_info
.SenseLen
= sense_data_length
;
4932 if (copy_to_user(arg
, &iocommand
, sizeof(iocommand
))) {
4937 if (rc
== 0 && iocommand
.buf_size
> 0 &&
4938 (iocommand
.Request
.Type
.Direction
& XFER_READ
)) {
4939 if (copy_to_user(iocommand
.buf
, kernel_buffer
,
4940 iocommand
.buf_size
)) {
4946 kfree(kernel_buffer
);
4951 static int pqi_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
4954 struct pqi_ctrl_info
*ctrl_info
;
4956 ctrl_info
= shost_to_hba(sdev
->host
);
4959 case CCISS_DEREGDISK
:
4960 case CCISS_REGNEWDISK
:
4962 rc
= pqi_scan_scsi_devices(ctrl_info
);
4964 case CCISS_GETPCIINFO
:
4965 rc
= pqi_getpciinfo_ioctl(ctrl_info
, arg
);
4967 case CCISS_GETDRIVVER
:
4968 rc
= pqi_getdrivver_ioctl(arg
);
4970 case CCISS_PASSTHRU
:
4971 rc
= pqi_passthru_ioctl(ctrl_info
, arg
);
4981 static ssize_t
pqi_version_show(struct device
*dev
,
4982 struct device_attribute
*attr
, char *buffer
)
4985 struct Scsi_Host
*shost
;
4986 struct pqi_ctrl_info
*ctrl_info
;
4988 shost
= class_to_shost(dev
);
4989 ctrl_info
= shost_to_hba(shost
);
4991 count
+= snprintf(buffer
+ count
, PAGE_SIZE
- count
,
4992 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP
);
4994 count
+= snprintf(buffer
+ count
, PAGE_SIZE
- count
,
4995 "firmware: %s\n", ctrl_info
->firmware_version
);
5000 static ssize_t
pqi_host_rescan_store(struct device
*dev
,
5001 struct device_attribute
*attr
, const char *buffer
, size_t count
)
5003 struct Scsi_Host
*shost
= class_to_shost(dev
);
5005 pqi_scan_start(shost
);
5010 static DEVICE_ATTR(version
, S_IRUGO
, pqi_version_show
, NULL
);
5011 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, pqi_host_rescan_store
);
5013 static struct device_attribute
*pqi_shost_attrs
[] = {
5019 static ssize_t
pqi_sas_address_show(struct device
*dev
,
5020 struct device_attribute
*attr
, char *buffer
)
5022 struct pqi_ctrl_info
*ctrl_info
;
5023 struct scsi_device
*sdev
;
5024 struct pqi_scsi_dev
*device
;
5025 unsigned long flags
;
5028 sdev
= to_scsi_device(dev
);
5029 ctrl_info
= shost_to_hba(sdev
->host
);
5031 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
5033 device
= sdev
->hostdata
;
5034 if (pqi_is_logical_device(device
)) {
5035 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
,
5039 sas_address
= device
->sas_address
;
5041 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
5043 return snprintf(buffer
, PAGE_SIZE
, "0x%016llx\n", sas_address
);
5046 static ssize_t
pqi_ssd_smart_path_enabled_show(struct device
*dev
,
5047 struct device_attribute
*attr
, char *buffer
)
5049 struct pqi_ctrl_info
*ctrl_info
;
5050 struct scsi_device
*sdev
;
5051 struct pqi_scsi_dev
*device
;
5052 unsigned long flags
;
5054 sdev
= to_scsi_device(dev
);
5055 ctrl_info
= shost_to_hba(sdev
->host
);
5057 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
5059 device
= sdev
->hostdata
;
5060 buffer
[0] = device
->offload_enabled
? '1' : '0';
5064 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
5069 static DEVICE_ATTR(sas_address
, S_IRUGO
, pqi_sas_address_show
, NULL
);
5070 static DEVICE_ATTR(ssd_smart_path_enabled
, S_IRUGO
,
5071 pqi_ssd_smart_path_enabled_show
, NULL
);
5073 static struct device_attribute
*pqi_sdev_attrs
[] = {
5074 &dev_attr_sas_address
,
5075 &dev_attr_ssd_smart_path_enabled
,
5079 static struct scsi_host_template pqi_driver_template
= {
5080 .module
= THIS_MODULE
,
5081 .name
= DRIVER_NAME_SHORT
,
5082 .proc_name
= DRIVER_NAME_SHORT
,
5083 .queuecommand
= pqi_scsi_queue_command
,
5084 .scan_start
= pqi_scan_start
,
5085 .scan_finished
= pqi_scan_finished
,
5087 .use_clustering
= ENABLE_CLUSTERING
,
5088 .eh_device_reset_handler
= pqi_eh_device_reset_handler
,
5090 .slave_alloc
= pqi_slave_alloc
,
5091 .slave_configure
= pqi_slave_configure
,
5092 .map_queues
= pqi_map_queues
,
5093 .sdev_attrs
= pqi_sdev_attrs
,
5094 .shost_attrs
= pqi_shost_attrs
,
5097 static int pqi_register_scsi(struct pqi_ctrl_info
*ctrl_info
)
5100 struct Scsi_Host
*shost
;
5102 shost
= scsi_host_alloc(&pqi_driver_template
, sizeof(ctrl_info
));
5104 dev_err(&ctrl_info
->pci_dev
->dev
,
5105 "scsi_host_alloc failed for controller %u\n",
5106 ctrl_info
->ctrl_id
);
5111 shost
->n_io_port
= 0;
5112 shost
->this_id
= -1;
5113 shost
->max_channel
= PQI_MAX_BUS
;
5114 shost
->max_cmd_len
= MAX_COMMAND_SIZE
;
5115 shost
->max_lun
= ~0;
5117 shost
->max_sectors
= ctrl_info
->max_sectors
;
5118 shost
->can_queue
= ctrl_info
->scsi_ml_can_queue
;
5119 shost
->cmd_per_lun
= shost
->can_queue
;
5120 shost
->sg_tablesize
= ctrl_info
->sg_tablesize
;
5121 shost
->transportt
= pqi_sas_transport_template
;
5122 shost
->irq
= pci_irq_vector(ctrl_info
->pci_dev
, 0);
5123 shost
->unique_id
= shost
->irq
;
5124 shost
->nr_hw_queues
= ctrl_info
->num_queue_groups
;
5125 shost
->hostdata
[0] = (unsigned long)ctrl_info
;
5127 rc
= scsi_add_host(shost
, &ctrl_info
->pci_dev
->dev
);
5129 dev_err(&ctrl_info
->pci_dev
->dev
,
5130 "scsi_add_host failed for controller %u\n",
5131 ctrl_info
->ctrl_id
);
5135 rc
= pqi_add_sas_host(shost
, ctrl_info
);
5137 dev_err(&ctrl_info
->pci_dev
->dev
,
5138 "add SAS host failed for controller %u\n",
5139 ctrl_info
->ctrl_id
);
5143 ctrl_info
->scsi_host
= shost
;
5148 scsi_remove_host(shost
);
5150 scsi_host_put(shost
);
5155 static void pqi_unregister_scsi(struct pqi_ctrl_info
*ctrl_info
)
5157 struct Scsi_Host
*shost
;
5159 pqi_delete_sas_host(ctrl_info
);
5161 shost
= ctrl_info
->scsi_host
;
5165 scsi_remove_host(shost
);
5166 scsi_host_put(shost
);
5169 #define PQI_RESET_ACTION_RESET 0x1
5171 #define PQI_RESET_TYPE_NO_RESET 0x0
5172 #define PQI_RESET_TYPE_SOFT_RESET 0x1
5173 #define PQI_RESET_TYPE_FIRM_RESET 0x2
5174 #define PQI_RESET_TYPE_HARD_RESET 0x3
5176 static int pqi_reset(struct pqi_ctrl_info
*ctrl_info
)
5181 reset_params
= (PQI_RESET_ACTION_RESET
<< 5) |
5182 PQI_RESET_TYPE_HARD_RESET
;
5184 writel(reset_params
,
5185 &ctrl_info
->pqi_registers
->device_reset
);
5187 rc
= pqi_wait_for_pqi_mode_ready(ctrl_info
);
5189 dev_err(&ctrl_info
->pci_dev
->dev
,
5190 "PQI reset failed\n");
5195 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info
*ctrl_info
)
5198 struct bmic_identify_controller
*identify
;
5200 identify
= kmalloc(sizeof(*identify
), GFP_KERNEL
);
5204 rc
= pqi_identify_controller(ctrl_info
, identify
);
5208 memcpy(ctrl_info
->firmware_version
, identify
->firmware_version
,
5209 sizeof(identify
->firmware_version
));
5210 ctrl_info
->firmware_version
[sizeof(identify
->firmware_version
)] = '\0';
5211 snprintf(ctrl_info
->firmware_version
+
5212 strlen(ctrl_info
->firmware_version
),
5213 sizeof(ctrl_info
->firmware_version
),
5214 "-%u", get_unaligned_le16(&identify
->firmware_build_number
));
5222 static int pqi_kdump_init(struct pqi_ctrl_info
*ctrl_info
)
5224 if (!sis_is_firmware_running(ctrl_info
))
5227 if (pqi_get_ctrl_mode(ctrl_info
) == PQI_MODE
) {
5228 sis_disable_msix(ctrl_info
);
5229 if (pqi_reset(ctrl_info
) == 0)
5230 sis_reenable_sis_mode(ctrl_info
);
5236 static int pqi_ctrl_init(struct pqi_ctrl_info
*ctrl_info
)
5240 if (reset_devices
) {
5241 rc
= pqi_kdump_init(ctrl_info
);
5247 * When the controller comes out of reset, it is always running
5248 * in legacy SIS mode. This is so that it can be compatible
5249 * with legacy drivers shipped with OSes. So we have to talk
5250 * to it using SIS commands at first. Once we are satisified
5251 * that the controller supports PQI, we transition it into PQI
5256 * Wait until the controller is ready to start accepting SIS
5259 rc
= sis_wait_for_ctrl_ready(ctrl_info
);
5261 dev_err(&ctrl_info
->pci_dev
->dev
,
5262 "error initializing SIS interface\n");
5267 * Get the controller properties. This allows us to determine
5268 * whether or not it supports PQI mode.
5270 rc
= sis_get_ctrl_properties(ctrl_info
);
5272 dev_err(&ctrl_info
->pci_dev
->dev
,
5273 "error obtaining controller properties\n");
5277 rc
= sis_get_pqi_capabilities(ctrl_info
);
5279 dev_err(&ctrl_info
->pci_dev
->dev
,
5280 "error obtaining controller capabilities\n");
5284 if (ctrl_info
->max_outstanding_requests
> PQI_MAX_OUTSTANDING_REQUESTS
)
5285 ctrl_info
->max_outstanding_requests
=
5286 PQI_MAX_OUTSTANDING_REQUESTS
;
5288 pqi_calculate_io_resources(ctrl_info
);
5290 rc
= pqi_alloc_error_buffer(ctrl_info
);
5292 dev_err(&ctrl_info
->pci_dev
->dev
,
5293 "failed to allocate PQI error buffer\n");
5298 * If the function we are about to call succeeds, the
5299 * controller will transition from legacy SIS mode
5302 rc
= sis_init_base_struct_addr(ctrl_info
);
5304 dev_err(&ctrl_info
->pci_dev
->dev
,
5305 "error initializing PQI mode\n");
5309 /* Wait for the controller to complete the SIS -> PQI transition. */
5310 rc
= pqi_wait_for_pqi_mode_ready(ctrl_info
);
5312 dev_err(&ctrl_info
->pci_dev
->dev
,
5313 "transition to PQI mode failed\n");
5317 /* From here on, we are running in PQI mode. */
5318 ctrl_info
->pqi_mode_enabled
= true;
5319 pqi_save_ctrl_mode(ctrl_info
, PQI_MODE
);
5321 rc
= pqi_alloc_admin_queues(ctrl_info
);
5323 dev_err(&ctrl_info
->pci_dev
->dev
,
5324 "error allocating admin queues\n");
5328 rc
= pqi_create_admin_queues(ctrl_info
);
5330 dev_err(&ctrl_info
->pci_dev
->dev
,
5331 "error creating admin queues\n");
5335 rc
= pqi_report_device_capability(ctrl_info
);
5337 dev_err(&ctrl_info
->pci_dev
->dev
,
5338 "obtaining device capability failed\n");
5342 rc
= pqi_validate_device_capability(ctrl_info
);
5346 pqi_calculate_queue_resources(ctrl_info
);
5348 rc
= pqi_enable_msix_interrupts(ctrl_info
);
5352 if (ctrl_info
->num_msix_vectors_enabled
< ctrl_info
->num_queue_groups
) {
5353 ctrl_info
->max_msix_vectors
=
5354 ctrl_info
->num_msix_vectors_enabled
;
5355 pqi_calculate_queue_resources(ctrl_info
);
5358 rc
= pqi_alloc_io_resources(ctrl_info
);
5362 rc
= pqi_alloc_operational_queues(ctrl_info
);
5366 pqi_init_operational_queues(ctrl_info
);
5368 rc
= pqi_request_irqs(ctrl_info
);
5372 rc
= pqi_create_queues(ctrl_info
);
5376 sis_enable_msix(ctrl_info
);
5378 rc
= pqi_configure_events(ctrl_info
);
5380 dev_err(&ctrl_info
->pci_dev
->dev
,
5381 "error configuring events\n");
5385 pqi_start_heartbeat_timer(ctrl_info
);
5387 ctrl_info
->controller_online
= true;
5389 /* Register with the SCSI subsystem. */
5390 rc
= pqi_register_scsi(ctrl_info
);
5394 rc
= pqi_get_ctrl_firmware_version(ctrl_info
);
5396 dev_err(&ctrl_info
->pci_dev
->dev
,
5397 "error obtaining firmware version\n");
5401 rc
= pqi_write_driver_version_to_host_wellness(ctrl_info
);
5403 dev_err(&ctrl_info
->pci_dev
->dev
,
5404 "error updating host wellness\n");
5408 pqi_schedule_update_time_worker(ctrl_info
);
5410 pqi_scan_scsi_devices(ctrl_info
);
5415 static int pqi_pci_init(struct pqi_ctrl_info
*ctrl_info
)
5420 rc
= pci_enable_device(ctrl_info
->pci_dev
);
5422 dev_err(&ctrl_info
->pci_dev
->dev
,
5423 "failed to enable PCI device\n");
5427 if (sizeof(dma_addr_t
) > 4)
5428 mask
= DMA_BIT_MASK(64);
5430 mask
= DMA_BIT_MASK(32);
5432 rc
= dma_set_mask(&ctrl_info
->pci_dev
->dev
, mask
);
5434 dev_err(&ctrl_info
->pci_dev
->dev
, "failed to set DMA mask\n");
5435 goto disable_device
;
5438 rc
= pci_request_regions(ctrl_info
->pci_dev
, DRIVER_NAME_SHORT
);
5440 dev_err(&ctrl_info
->pci_dev
->dev
,
5441 "failed to obtain PCI resources\n");
5442 goto disable_device
;
5445 ctrl_info
->iomem_base
= ioremap_nocache(pci_resource_start(
5446 ctrl_info
->pci_dev
, 0),
5447 sizeof(struct pqi_ctrl_registers
));
5448 if (!ctrl_info
->iomem_base
) {
5449 dev_err(&ctrl_info
->pci_dev
->dev
,
5450 "failed to map memory for controller registers\n");
5452 goto release_regions
;
5455 ctrl_info
->registers
= ctrl_info
->iomem_base
;
5456 ctrl_info
->pqi_registers
= &ctrl_info
->registers
->pqi_registers
;
5458 /* Enable bus mastering. */
5459 pci_set_master(ctrl_info
->pci_dev
);
5461 pci_set_drvdata(ctrl_info
->pci_dev
, ctrl_info
);
5466 pci_release_regions(ctrl_info
->pci_dev
);
5468 pci_disable_device(ctrl_info
->pci_dev
);
5473 static void pqi_cleanup_pci_init(struct pqi_ctrl_info
*ctrl_info
)
5475 iounmap(ctrl_info
->iomem_base
);
5476 pci_release_regions(ctrl_info
->pci_dev
);
5477 pci_disable_device(ctrl_info
->pci_dev
);
5478 pci_set_drvdata(ctrl_info
->pci_dev
, NULL
);
5481 static struct pqi_ctrl_info
*pqi_alloc_ctrl_info(int numa_node
)
5483 struct pqi_ctrl_info
*ctrl_info
;
5485 ctrl_info
= kzalloc_node(sizeof(struct pqi_ctrl_info
),
5486 GFP_KERNEL
, numa_node
);
5490 mutex_init(&ctrl_info
->scan_mutex
);
5492 INIT_LIST_HEAD(&ctrl_info
->scsi_device_list
);
5493 spin_lock_init(&ctrl_info
->scsi_device_list_lock
);
5495 INIT_WORK(&ctrl_info
->event_work
, pqi_event_worker
);
5496 atomic_set(&ctrl_info
->num_interrupts
, 0);
5498 INIT_DELAYED_WORK(&ctrl_info
->rescan_work
, pqi_rescan_worker
);
5499 INIT_DELAYED_WORK(&ctrl_info
->update_time_work
, pqi_update_time_worker
);
5501 sema_init(&ctrl_info
->sync_request_sem
,
5502 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS
);
5503 sema_init(&ctrl_info
->lun_reset_sem
, PQI_RESERVED_IO_SLOTS_LUN_RESET
);
5505 ctrl_info
->ctrl_id
= atomic_inc_return(&pqi_controller_count
) - 1;
5506 ctrl_info
->max_msix_vectors
= PQI_MAX_MSIX_VECTORS
;
5511 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info
*ctrl_info
)
5516 static void pqi_free_interrupts(struct pqi_ctrl_info
*ctrl_info
)
5520 for (i
= 0; i
< ctrl_info
->num_msix_vectors_initialized
; i
++) {
5521 free_irq(pci_irq_vector(ctrl_info
->pci_dev
, i
),
5522 &ctrl_info
->queue_groups
[i
]);
5525 pci_free_irq_vectors(ctrl_info
->pci_dev
);
5528 static void pqi_free_ctrl_resources(struct pqi_ctrl_info
*ctrl_info
)
5530 pqi_stop_heartbeat_timer(ctrl_info
);
5531 pqi_free_interrupts(ctrl_info
);
5532 if (ctrl_info
->queue_memory_base
)
5533 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
5534 ctrl_info
->queue_memory_length
,
5535 ctrl_info
->queue_memory_base
,
5536 ctrl_info
->queue_memory_base_dma_handle
);
5537 if (ctrl_info
->admin_queue_memory_base
)
5538 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
5539 ctrl_info
->admin_queue_memory_length
,
5540 ctrl_info
->admin_queue_memory_base
,
5541 ctrl_info
->admin_queue_memory_base_dma_handle
);
5542 pqi_free_all_io_requests(ctrl_info
);
5543 if (ctrl_info
->error_buffer
)
5544 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
5545 ctrl_info
->error_buffer_length
,
5546 ctrl_info
->error_buffer
,
5547 ctrl_info
->error_buffer_dma_handle
);
5548 if (ctrl_info
->iomem_base
)
5549 pqi_cleanup_pci_init(ctrl_info
);
5550 pqi_free_ctrl_info(ctrl_info
);
5553 static void pqi_remove_ctrl(struct pqi_ctrl_info
*ctrl_info
)
5555 cancel_delayed_work_sync(&ctrl_info
->rescan_work
);
5556 cancel_delayed_work_sync(&ctrl_info
->update_time_work
);
5557 pqi_remove_all_scsi_devices(ctrl_info
);
5558 pqi_unregister_scsi(ctrl_info
);
5560 if (ctrl_info
->pqi_mode_enabled
) {
5561 sis_disable_msix(ctrl_info
);
5562 if (pqi_reset(ctrl_info
) == 0)
5563 sis_reenable_sis_mode(ctrl_info
);
5565 pqi_free_ctrl_resources(ctrl_info
);
5568 static void pqi_print_ctrl_info(struct pci_dev
*pdev
,
5569 const struct pci_device_id
*id
)
5571 char *ctrl_description
;
5573 if (id
->driver_data
) {
5574 ctrl_description
= (char *)id
->driver_data
;
5576 switch (id
->subvendor
) {
5577 case PCI_VENDOR_ID_HP
:
5578 ctrl_description
= hpe_branded_controller
;
5580 case PCI_VENDOR_ID_ADAPTEC2
:
5582 ctrl_description
= microsemi_branded_controller
;
5587 dev_info(&pdev
->dev
, "%s found\n", ctrl_description
);
5590 static int pqi_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
5594 struct pqi_ctrl_info
*ctrl_info
;
5596 pqi_print_ctrl_info(pdev
, id
);
5598 if (pqi_disable_device_id_wildcards
&&
5599 id
->subvendor
== PCI_ANY_ID
&&
5600 id
->subdevice
== PCI_ANY_ID
) {
5601 dev_warn(&pdev
->dev
,
5602 "controller not probed because device ID wildcards are disabled\n");
5606 if (id
->subvendor
== PCI_ANY_ID
|| id
->subdevice
== PCI_ANY_ID
)
5607 dev_warn(&pdev
->dev
,
5608 "controller device ID matched using wildcards\n");
5610 node
= dev_to_node(&pdev
->dev
);
5611 if (node
== NUMA_NO_NODE
)
5612 set_dev_node(&pdev
->dev
, 0);
5614 ctrl_info
= pqi_alloc_ctrl_info(node
);
5617 "failed to allocate controller info block\n");
5621 ctrl_info
->pci_dev
= pdev
;
5623 rc
= pqi_pci_init(ctrl_info
);
5627 rc
= pqi_ctrl_init(ctrl_info
);
5634 pqi_remove_ctrl(ctrl_info
);
5639 static void pqi_pci_remove(struct pci_dev
*pdev
)
5641 struct pqi_ctrl_info
*ctrl_info
;
5643 ctrl_info
= pci_get_drvdata(pdev
);
5647 pqi_remove_ctrl(ctrl_info
);
5650 static void pqi_shutdown(struct pci_dev
*pdev
)
5653 struct pqi_ctrl_info
*ctrl_info
;
5655 ctrl_info
= pci_get_drvdata(pdev
);
5660 * Write all data in the controller's battery-backed cache to
5663 rc
= pqi_flush_cache(ctrl_info
);
5668 dev_warn(&pdev
->dev
,
5669 "unable to flush controller cache\n");
5672 /* Define the PCI IDs for the controllers that we support. */
5673 static const struct pci_device_id pqi_pci_id_table
[] = {
5675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5676 PCI_VENDOR_ID_ADAPTEC2
, 0x0110)
5679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5680 PCI_VENDOR_ID_HP
, 0x0600)
5683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5684 PCI_VENDOR_ID_HP
, 0x0601)
5687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5688 PCI_VENDOR_ID_HP
, 0x0602)
5691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5692 PCI_VENDOR_ID_HP
, 0x0603)
5695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5696 PCI_VENDOR_ID_HP
, 0x0650)
5699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5700 PCI_VENDOR_ID_HP
, 0x0651)
5703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5704 PCI_VENDOR_ID_HP
, 0x0652)
5707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5708 PCI_VENDOR_ID_HP
, 0x0653)
5711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5712 PCI_VENDOR_ID_HP
, 0x0654)
5715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5716 PCI_VENDOR_ID_HP
, 0x0655)
5719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5720 PCI_VENDOR_ID_HP
, 0x0700)
5723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5724 PCI_VENDOR_ID_HP
, 0x0701)
5727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5728 PCI_VENDOR_ID_ADAPTEC2
, 0x0800)
5731 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5732 PCI_VENDOR_ID_ADAPTEC2
, 0x0801)
5735 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5736 PCI_VENDOR_ID_ADAPTEC2
, 0x0802)
5739 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5740 PCI_VENDOR_ID_ADAPTEC2
, 0x0803)
5743 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5744 PCI_VENDOR_ID_ADAPTEC2
, 0x0804)
5747 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5748 PCI_VENDOR_ID_ADAPTEC2
, 0x0805)
5751 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5752 PCI_VENDOR_ID_ADAPTEC2
, 0x0900)
5755 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5756 PCI_VENDOR_ID_ADAPTEC2
, 0x0901)
5759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5760 PCI_VENDOR_ID_ADAPTEC2
, 0x0902)
5763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5764 PCI_VENDOR_ID_ADAPTEC2
, 0x0903)
5767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5768 PCI_VENDOR_ID_ADAPTEC2
, 0x0904)
5771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5772 PCI_VENDOR_ID_ADAPTEC2
, 0x0905)
5775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5776 PCI_VENDOR_ID_ADAPTEC2
, 0x0906)
5779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5780 PCI_VENDOR_ID_HP
, 0x1001)
5783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5784 PCI_VENDOR_ID_HP
, 0x1100)
5787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5788 PCI_VENDOR_ID_HP
, 0x1101)
5791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5792 PCI_VENDOR_ID_HP
, 0x1102)
5795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5796 PCI_VENDOR_ID_HP
, 0x1150)
5799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5800 PCI_ANY_ID
, PCI_ANY_ID
)
5805 MODULE_DEVICE_TABLE(pci
, pqi_pci_id_table
);
5807 static struct pci_driver pqi_pci_driver
= {
5808 .name
= DRIVER_NAME_SHORT
,
5809 .id_table
= pqi_pci_id_table
,
5810 .probe
= pqi_pci_probe
,
5811 .remove
= pqi_pci_remove
,
5812 .shutdown
= pqi_shutdown
,
5815 static int __init
pqi_init(void)
5819 pr_info(DRIVER_NAME
"\n");
5821 pqi_sas_transport_template
=
5822 sas_attach_transport(&pqi_sas_transport_functions
);
5823 if (!pqi_sas_transport_template
)
5826 rc
= pci_register_driver(&pqi_pci_driver
);
5828 sas_release_transport(pqi_sas_transport_template
);
5833 static void __exit
pqi_cleanup(void)
5835 pci_unregister_driver(&pqi_pci_driver
);
5836 sas_release_transport(pqi_sas_transport_template
);
5839 module_init(pqi_init
);
5840 module_exit(pqi_cleanup
);
5842 static void __attribute__((unused
)) verify_structures(void)
5844 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5845 sis_host_to_ctrl_doorbell
) != 0x20);
5846 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5847 sis_interrupt_mask
) != 0x34);
5848 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5849 sis_ctrl_to_host_doorbell
) != 0x9c);
5850 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5851 sis_ctrl_to_host_doorbell_clear
) != 0xa0);
5852 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5853 sis_driver_scratch
) != 0xb0);
5854 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5855 sis_firmware_status
) != 0xbc);
5856 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5857 sis_mailbox
) != 0x1000);
5858 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5859 pqi_registers
) != 0x4000);
5861 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5863 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5865 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5866 response_queue_id
) != 0x4);
5867 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5869 BUILD_BUG_ON(sizeof(struct pqi_iu_header
) != 0x8);
5871 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5873 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5874 service_response
) != 0x1);
5875 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5876 data_present
) != 0x2);
5877 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5879 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5880 residual_count
) != 0x4);
5881 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5882 data_length
) != 0x8);
5883 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5885 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5887 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info
) != 0x10c);
5889 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5890 data_in_result
) != 0x0);
5891 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5892 data_out_result
) != 0x1);
5893 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5895 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5897 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5898 status_qualifier
) != 0x6);
5899 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5900 sense_data_length
) != 0x8);
5901 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5902 response_data_length
) != 0xa);
5903 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5904 data_in_transferred
) != 0xc);
5905 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5906 data_out_transferred
) != 0x10);
5907 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5909 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info
) != 0x114);
5911 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5913 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5914 function_and_status_code
) != 0x8);
5915 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5916 max_admin_iq_elements
) != 0x10);
5917 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5918 max_admin_oq_elements
) != 0x11);
5919 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5920 admin_iq_element_length
) != 0x12);
5921 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5922 admin_oq_element_length
) != 0x13);
5923 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5924 max_reset_timeout
) != 0x14);
5925 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5926 legacy_intx_status
) != 0x18);
5927 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5928 legacy_intx_mask_set
) != 0x1c);
5929 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5930 legacy_intx_mask_clear
) != 0x20);
5931 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5932 device_status
) != 0x40);
5933 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5934 admin_iq_pi_offset
) != 0x48);
5935 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5936 admin_oq_ci_offset
) != 0x50);
5937 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5938 admin_iq_element_array_addr
) != 0x58);
5939 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5940 admin_oq_element_array_addr
) != 0x60);
5941 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5942 admin_iq_ci_addr
) != 0x68);
5943 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5944 admin_oq_pi_addr
) != 0x70);
5945 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5946 admin_iq_num_elements
) != 0x78);
5947 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5948 admin_oq_num_elements
) != 0x79);
5949 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5950 admin_queue_int_msg_num
) != 0x7a);
5951 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5952 device_error
) != 0x80);
5953 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5954 error_details
) != 0x88);
5955 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5956 device_reset
) != 0x90);
5957 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5958 power_action
) != 0x94);
5959 BUILD_BUG_ON(sizeof(struct pqi_device_registers
) != 0x100);
5961 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5962 header
.iu_type
) != 0);
5963 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5964 header
.iu_length
) != 2);
5965 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5966 header
.work_area
) != 6);
5967 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5969 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5970 function_code
) != 10);
5971 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5972 data
.report_device_capability
.buffer_length
) != 44);
5973 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5974 data
.report_device_capability
.sg_descriptor
) != 48);
5975 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5976 data
.create_operational_iq
.queue_id
) != 12);
5977 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5978 data
.create_operational_iq
.element_array_addr
) != 16);
5979 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5980 data
.create_operational_iq
.ci_addr
) != 24);
5981 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5982 data
.create_operational_iq
.num_elements
) != 32);
5983 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5984 data
.create_operational_iq
.element_length
) != 34);
5985 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5986 data
.create_operational_iq
.queue_protocol
) != 36);
5987 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5988 data
.create_operational_oq
.queue_id
) != 12);
5989 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5990 data
.create_operational_oq
.element_array_addr
) != 16);
5991 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5992 data
.create_operational_oq
.pi_addr
) != 24);
5993 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5994 data
.create_operational_oq
.num_elements
) != 32);
5995 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5996 data
.create_operational_oq
.element_length
) != 34);
5997 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5998 data
.create_operational_oq
.queue_protocol
) != 36);
5999 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6000 data
.create_operational_oq
.int_msg_num
) != 40);
6001 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6002 data
.create_operational_oq
.coalescing_count
) != 42);
6003 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6004 data
.create_operational_oq
.min_coalescing_time
) != 44);
6005 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6006 data
.create_operational_oq
.max_coalescing_time
) != 48);
6007 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6008 data
.delete_operational_queue
.queue_id
) != 12);
6009 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request
) != 64);
6010 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request
,
6011 data
.create_operational_iq
) != 64 - 11);
6012 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request
,
6013 data
.create_operational_oq
) != 64 - 11);
6014 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request
,
6015 data
.delete_operational_queue
) != 64 - 11);
6017 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6018 header
.iu_type
) != 0);
6019 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6020 header
.iu_length
) != 2);
6021 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6022 header
.work_area
) != 6);
6023 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6025 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6026 function_code
) != 10);
6027 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6029 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6030 data
.create_operational_iq
.status_descriptor
) != 12);
6031 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6032 data
.create_operational_iq
.iq_pi_offset
) != 16);
6033 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6034 data
.create_operational_oq
.status_descriptor
) != 12);
6035 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6036 data
.create_operational_oq
.oq_ci_offset
) != 16);
6037 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response
) != 64);
6039 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6040 header
.iu_type
) != 0);
6041 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6042 header
.iu_length
) != 2);
6043 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6044 header
.response_queue_id
) != 4);
6045 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6046 header
.work_area
) != 6);
6047 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6049 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6051 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6052 buffer_length
) != 12);
6053 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6055 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6056 protocol_specific
) != 24);
6057 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6058 error_index
) != 27);
6059 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6061 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6062 sg_descriptors
) != 64);
6063 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request
) !=
6064 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
6066 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6067 header
.iu_type
) != 0);
6068 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6069 header
.iu_length
) != 2);
6070 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6071 header
.response_queue_id
) != 4);
6072 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6073 header
.work_area
) != 6);
6074 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6076 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6078 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6079 buffer_length
) != 16);
6080 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6081 data_encryption_key_index
) != 22);
6082 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6083 encrypt_tweak_lower
) != 24);
6084 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6085 encrypt_tweak_upper
) != 28);
6086 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6088 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6089 error_index
) != 48);
6090 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6091 num_sg_descriptors
) != 50);
6092 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6094 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6096 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6097 sg_descriptors
) != 64);
6098 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request
) !=
6099 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
6101 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6102 header
.iu_type
) != 0);
6103 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6104 header
.iu_length
) != 2);
6105 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6107 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6108 error_index
) != 10);
6110 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6111 header
.iu_type
) != 0);
6112 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6113 header
.iu_length
) != 2);
6114 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6115 header
.response_queue_id
) != 4);
6116 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6118 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6119 data
.report_event_configuration
.buffer_length
) != 12);
6120 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6121 data
.report_event_configuration
.sg_descriptors
) != 16);
6122 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6123 data
.set_event_configuration
.global_event_oq_id
) != 10);
6124 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6125 data
.set_event_configuration
.buffer_length
) != 12);
6126 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6127 data
.set_event_configuration
.sg_descriptors
) != 16);
6129 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor
,
6130 max_inbound_iu_length
) != 6);
6131 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor
,
6132 max_outbound_iu_length
) != 14);
6133 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor
) != 16);
6135 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6137 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6138 iq_arbitration_priority_support_bitmask
) != 8);
6139 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6140 maximum_aw_a
) != 9);
6141 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6142 maximum_aw_b
) != 10);
6143 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6144 maximum_aw_c
) != 11);
6145 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6146 max_inbound_queues
) != 16);
6147 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6148 max_elements_per_iq
) != 18);
6149 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6150 max_iq_element_length
) != 24);
6151 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6152 min_iq_element_length
) != 26);
6153 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6154 max_outbound_queues
) != 30);
6155 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6156 max_elements_per_oq
) != 32);
6157 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6158 intr_coalescing_time_granularity
) != 34);
6159 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6160 max_oq_element_length
) != 36);
6161 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6162 min_oq_element_length
) != 38);
6163 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6164 iu_layer_descriptors
) != 64);
6165 BUILD_BUG_ON(sizeof(struct pqi_device_capability
) != 576);
6167 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor
,
6169 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor
,
6171 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor
) != 4);
6173 BUILD_BUG_ON(offsetof(struct pqi_event_config
,
6174 num_event_descriptors
) != 2);
6175 BUILD_BUG_ON(offsetof(struct pqi_event_config
,
6178 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6179 header
.iu_type
) != 0);
6180 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6181 header
.iu_length
) != 2);
6182 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6184 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6186 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6187 additional_event_id
) != 12);
6188 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6190 BUILD_BUG_ON(sizeof(struct pqi_event_response
) != 32);
6192 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6193 header
.iu_type
) != 0);
6194 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6195 header
.iu_length
) != 2);
6196 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6198 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6200 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6201 additional_event_id
) != 12);
6202 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request
) != 16);
6204 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6205 header
.iu_type
) != 0);
6206 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6207 header
.iu_length
) != 2);
6208 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6210 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6212 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6214 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6215 protocol_specific
) != 24);
6216 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6217 outbound_queue_id_to_manage
) != 26);
6218 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6219 request_id_to_manage
) != 28);
6220 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6221 task_management_function
) != 30);
6222 BUILD_BUG_ON(sizeof(struct pqi_task_management_request
) != 32);
6224 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6225 header
.iu_type
) != 0);
6226 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6227 header
.iu_length
) != 2);
6228 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6230 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6232 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6233 additional_response_info
) != 12);
6234 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6235 response_code
) != 15);
6236 BUILD_BUG_ON(sizeof(struct pqi_task_management_response
) != 16);
6238 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6239 configured_logical_drive_count
) != 0);
6240 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6241 configuration_signature
) != 1);
6242 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6243 firmware_version
) != 5);
6244 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6245 extended_logical_unit_count
) != 154);
6246 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6247 firmware_build_number
) != 190);
6248 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6249 controller_mode
) != 292);
6251 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS
> 255);
6252 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS
> 255);
6253 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH
%
6254 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6255 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH
%
6256 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6257 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
> 1048560);
6258 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
%
6259 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6260 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
> 1048560);
6261 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
%
6262 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6264 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS
>= PQI_MAX_OUTSTANDING_REQUESTS
);