]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/smartpqi/smartpqi_init.c
scsi: mpt: Remove deprecated create_singlethread_workqueue
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
6c223761
KB
1/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_transport_sas.h>
33#include <asm/unaligned.h>
34#include "smartpqi.h"
35#include "smartpqi_sis.h"
36
37#if !defined(BUILD_TIMESTAMP)
38#define BUILD_TIMESTAMP
39#endif
40
41#define DRIVER_VERSION "0.9.9-100"
42#define DRIVER_MAJOR 0
43#define DRIVER_MINOR 9
44#define DRIVER_RELEASE 9
45#define DRIVER_REVISION 100
46
47#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
48#define DRIVER_NAME_SHORT "smartpqi"
49
50MODULE_AUTHOR("Microsemi");
51MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
52 DRIVER_VERSION);
53MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
54MODULE_VERSION(DRIVER_VERSION);
55MODULE_LICENSE("GPL");
56
57#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
58
59static char *hpe_branded_controller = "HPE Smart Array Controller";
60static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
61
62static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
63static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
64static void pqi_scan_start(struct Scsi_Host *shost);
65static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
66 struct pqi_queue_group *queue_group, enum pqi_io_path path,
67 struct pqi_io_request *io_request);
68static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
69 struct pqi_iu_header *request, unsigned int flags,
70 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
71static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
72 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
73 unsigned int cdb_length, struct pqi_queue_group *queue_group,
74 struct pqi_encryption_info *encryption_info);
75
76/* for flags argument to pqi_submit_raid_request_synchronous() */
77#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
78
79static struct scsi_transport_template *pqi_sas_transport_template;
80
81static atomic_t pqi_controller_count = ATOMIC_INIT(0);
82
83static int pqi_disable_device_id_wildcards;
84module_param_named(disable_device_id_wildcards,
85 pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
86MODULE_PARM_DESC(disable_device_id_wildcards,
87 "Disable device ID wildcards.");
88
89static char *raid_levels[] = {
90 "RAID-0",
91 "RAID-4",
92 "RAID-1(1+0)",
93 "RAID-5",
94 "RAID-5+1",
95 "RAID-ADG",
96 "RAID-1(ADM)",
97};
98
99static char *pqi_raid_level_to_string(u8 raid_level)
100{
101 if (raid_level < ARRAY_SIZE(raid_levels))
102 return raid_levels[raid_level];
103
104 return "";
105}
106
107#define SA_RAID_0 0
108#define SA_RAID_4 1
109#define SA_RAID_1 2 /* also used for RAID 10 */
110#define SA_RAID_5 3 /* also used for RAID 50 */
111#define SA_RAID_51 4
112#define SA_RAID_6 5 /* also used for RAID 60 */
113#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
114#define SA_RAID_MAX SA_RAID_ADM
115#define SA_RAID_UNKNOWN 0xff
116
117static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
118{
119 scmd->scsi_done(scmd);
120}
121
122static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
123{
124 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
125}
126
127static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
128{
129 void *hostdata = shost_priv(shost);
130
131 return *((struct pqi_ctrl_info **)hostdata);
132}
133
134static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
135{
136 return !device->is_physical_device;
137}
138
139static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
140{
141 return !ctrl_info->controller_online;
142}
143
144static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
145{
146 if (ctrl_info->controller_online)
147 if (!sis_is_firmware_running(ctrl_info))
148 pqi_take_ctrl_offline(ctrl_info);
149}
150
151static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
152{
153 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
154}
155
156#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
157
158static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
159{
160 schedule_delayed_work(&ctrl_info->rescan_work,
161 PQI_RESCAN_WORK_INTERVAL);
162}
163
164static int pqi_map_single(struct pci_dev *pci_dev,
165 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
166 size_t buffer_length, int data_direction)
167{
168 dma_addr_t bus_address;
169
170 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
171 return 0;
172
173 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
174 data_direction);
175 if (pci_dma_mapping_error(pci_dev, bus_address))
176 return -ENOMEM;
177
178 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
179 put_unaligned_le32(buffer_length, &sg_descriptor->length);
180 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
181
182 return 0;
183}
184
185static void pqi_pci_unmap(struct pci_dev *pci_dev,
186 struct pqi_sg_descriptor *descriptors, int num_descriptors,
187 int data_direction)
188{
189 int i;
190
191 if (data_direction == PCI_DMA_NONE)
192 return;
193
194 for (i = 0; i < num_descriptors; i++)
195 pci_unmap_single(pci_dev,
196 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
197 get_unaligned_le32(&descriptors[i].length),
198 data_direction);
199}
200
201static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
202 struct pqi_raid_path_request *request, u8 cmd,
203 u8 *scsi3addr, void *buffer, size_t buffer_length,
204 u16 vpd_page, int *pci_direction)
205{
206 u8 *cdb;
207 int pci_dir;
208
209 memset(request, 0, sizeof(*request));
210
211 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
212 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
213 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
214 &request->header.iu_length);
215 put_unaligned_le32(buffer_length, &request->buffer_length);
216 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
217 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
218 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
219
220 cdb = request->cdb;
221
222 switch (cmd) {
223 case INQUIRY:
224 request->data_direction = SOP_READ_FLAG;
225 cdb[0] = INQUIRY;
226 if (vpd_page & VPD_PAGE) {
227 cdb[1] = 0x1;
228 cdb[2] = (u8)vpd_page;
229 }
230 cdb[4] = (u8)buffer_length;
231 break;
232 case CISS_REPORT_LOG:
233 case CISS_REPORT_PHYS:
234 request->data_direction = SOP_READ_FLAG;
235 cdb[0] = cmd;
236 if (cmd == CISS_REPORT_PHYS)
237 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
238 else
239 cdb[1] = CISS_REPORT_LOG_EXTENDED;
240 put_unaligned_be32(buffer_length, &cdb[6]);
241 break;
242 case CISS_GET_RAID_MAP:
243 request->data_direction = SOP_READ_FLAG;
244 cdb[0] = CISS_READ;
245 cdb[1] = CISS_GET_RAID_MAP;
246 put_unaligned_be32(buffer_length, &cdb[6]);
247 break;
248 case SA_CACHE_FLUSH:
249 request->data_direction = SOP_WRITE_FLAG;
250 cdb[0] = BMIC_WRITE;
251 cdb[6] = BMIC_CACHE_FLUSH;
252 put_unaligned_be16(buffer_length, &cdb[7]);
253 break;
254 case BMIC_IDENTIFY_CONTROLLER:
255 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
256 request->data_direction = SOP_READ_FLAG;
257 cdb[0] = BMIC_READ;
258 cdb[6] = cmd;
259 put_unaligned_be16(buffer_length, &cdb[7]);
260 break;
261 case BMIC_WRITE_HOST_WELLNESS:
262 request->data_direction = SOP_WRITE_FLAG;
263 cdb[0] = BMIC_WRITE;
264 cdb[6] = cmd;
265 put_unaligned_be16(buffer_length, &cdb[7]);
266 break;
267 default:
268 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
269 cmd);
270 WARN_ON(cmd);
271 break;
272 }
273
274 switch (request->data_direction) {
275 case SOP_READ_FLAG:
276 pci_dir = PCI_DMA_FROMDEVICE;
277 break;
278 case SOP_WRITE_FLAG:
279 pci_dir = PCI_DMA_TODEVICE;
280 break;
281 case SOP_NO_DIRECTION_FLAG:
282 pci_dir = PCI_DMA_NONE;
283 break;
284 default:
285 pci_dir = PCI_DMA_BIDIRECTIONAL;
286 break;
287 }
288
289 *pci_direction = pci_dir;
290
291 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
292 buffer, buffer_length, pci_dir);
293}
294
295static struct pqi_io_request *pqi_alloc_io_request(
296 struct pqi_ctrl_info *ctrl_info)
297{
298 struct pqi_io_request *io_request;
299 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
300
301 while (1) {
302 io_request = &ctrl_info->io_request_pool[i];
303 if (atomic_inc_return(&io_request->refcount) == 1)
304 break;
305 atomic_dec(&io_request->refcount);
306 i = (i + 1) % ctrl_info->max_io_slots;
307 }
308
309 /* benignly racy */
310 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
311
312 io_request->scmd = NULL;
313 io_request->status = 0;
314 io_request->error_info = NULL;
315
316 return io_request;
317}
318
319static void pqi_free_io_request(struct pqi_io_request *io_request)
320{
321 atomic_dec(&io_request->refcount);
322}
323
324static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
325 struct bmic_identify_controller *buffer)
326{
327 int rc;
328 int pci_direction;
329 struct pqi_raid_path_request request;
330
331 rc = pqi_build_raid_path_request(ctrl_info, &request,
332 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
333 sizeof(*buffer), 0, &pci_direction);
334 if (rc)
335 return rc;
336
337 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
338 NULL, NO_TIMEOUT);
339
340 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
341 pci_direction);
342
343 return rc;
344}
345
346static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
347 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
348{
349 int rc;
350 int pci_direction;
351 struct pqi_raid_path_request request;
352
353 rc = pqi_build_raid_path_request(ctrl_info, &request,
354 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
355 &pci_direction);
356 if (rc)
357 return rc;
358
359 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
360 NULL, NO_TIMEOUT);
361
362 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
363 pci_direction);
364
365 return rc;
366}
367
368static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
369 struct pqi_scsi_dev *device,
370 struct bmic_identify_physical_device *buffer,
371 size_t buffer_length)
372{
373 int rc;
374 int pci_direction;
375 u16 bmic_device_index;
376 struct pqi_raid_path_request request;
377
378 rc = pqi_build_raid_path_request(ctrl_info, &request,
379 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
380 buffer_length, 0, &pci_direction);
381 if (rc)
382 return rc;
383
384 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
385 request.cdb[2] = (u8)bmic_device_index;
386 request.cdb[9] = (u8)(bmic_device_index >> 8);
387
388 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
389 0, NULL, NO_TIMEOUT);
390
391 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
392 pci_direction);
393
394 return rc;
395}
396
397#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
398#define PQI_FLUSH_CACHE_TIMEOUT (30 * 1000)
399
400static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
401{
402 int rc;
403 struct pqi_raid_path_request request;
404 int pci_direction;
405 u8 *buffer;
406
407 /*
408 * Don't bother trying to flush the cache if the controller is
409 * locked up.
410 */
411 if (pqi_ctrl_offline(ctrl_info))
412 return -ENXIO;
413
414 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
415 if (!buffer)
416 return -ENOMEM;
417
418 rc = pqi_build_raid_path_request(ctrl_info, &request,
419 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
420 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
421 if (rc)
422 goto out;
423
424 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
425 0, NULL, PQI_FLUSH_CACHE_TIMEOUT);
426
427 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
428 pci_direction);
429
430out:
431 kfree(buffer);
432
433 return rc;
434}
435
436static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
437 void *buffer, size_t buffer_length)
438{
439 int rc;
440 struct pqi_raid_path_request request;
441 int pci_direction;
442
443 rc = pqi_build_raid_path_request(ctrl_info, &request,
444 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
445 buffer_length, 0, &pci_direction);
446 if (rc)
447 return rc;
448
449 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
450 0, NULL, NO_TIMEOUT);
451
452 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
453 pci_direction);
454
455 return rc;
456}
457
458#pragma pack(1)
459
460struct bmic_host_wellness_driver_version {
461 u8 start_tag[4];
462 u8 driver_version_tag[2];
463 __le16 driver_version_length;
464 char driver_version[32];
465 u8 end_tag[2];
466};
467
468#pragma pack()
469
470static int pqi_write_driver_version_to_host_wellness(
471 struct pqi_ctrl_info *ctrl_info)
472{
473 int rc;
474 struct bmic_host_wellness_driver_version *buffer;
475 size_t buffer_length;
476
477 buffer_length = sizeof(*buffer);
478
479 buffer = kmalloc(buffer_length, GFP_KERNEL);
480 if (!buffer)
481 return -ENOMEM;
482
483 buffer->start_tag[0] = '<';
484 buffer->start_tag[1] = 'H';
485 buffer->start_tag[2] = 'W';
486 buffer->start_tag[3] = '>';
487 buffer->driver_version_tag[0] = 'D';
488 buffer->driver_version_tag[1] = 'V';
489 put_unaligned_le16(sizeof(buffer->driver_version),
490 &buffer->driver_version_length);
491 strncpy(buffer->driver_version, DRIVER_VERSION,
492 sizeof(buffer->driver_version) - 1);
493 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
494 buffer->end_tag[0] = 'Z';
495 buffer->end_tag[1] = 'Z';
496
497 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
498
499 kfree(buffer);
500
501 return rc;
502}
503
504#pragma pack(1)
505
506struct bmic_host_wellness_time {
507 u8 start_tag[4];
508 u8 time_tag[2];
509 __le16 time_length;
510 u8 time[8];
511 u8 dont_write_tag[2];
512 u8 end_tag[2];
513};
514
515#pragma pack()
516
517static int pqi_write_current_time_to_host_wellness(
518 struct pqi_ctrl_info *ctrl_info)
519{
520 int rc;
521 struct bmic_host_wellness_time *buffer;
522 size_t buffer_length;
523 time64_t local_time;
524 unsigned int year;
525 struct timeval time;
526 struct rtc_time tm;
527
528 buffer_length = sizeof(*buffer);
529
530 buffer = kmalloc(buffer_length, GFP_KERNEL);
531 if (!buffer)
532 return -ENOMEM;
533
534 buffer->start_tag[0] = '<';
535 buffer->start_tag[1] = 'H';
536 buffer->start_tag[2] = 'W';
537 buffer->start_tag[3] = '>';
538 buffer->time_tag[0] = 'T';
539 buffer->time_tag[1] = 'D';
540 put_unaligned_le16(sizeof(buffer->time),
541 &buffer->time_length);
542
543 do_gettimeofday(&time);
544 local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
545 rtc_time64_to_tm(local_time, &tm);
546 year = tm.tm_year + 1900;
547
548 buffer->time[0] = bin2bcd(tm.tm_hour);
549 buffer->time[1] = bin2bcd(tm.tm_min);
550 buffer->time[2] = bin2bcd(tm.tm_sec);
551 buffer->time[3] = 0;
552 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
553 buffer->time[5] = bin2bcd(tm.tm_mday);
554 buffer->time[6] = bin2bcd(year / 100);
555 buffer->time[7] = bin2bcd(year % 100);
556
557 buffer->dont_write_tag[0] = 'D';
558 buffer->dont_write_tag[1] = 'W';
559 buffer->end_tag[0] = 'Z';
560 buffer->end_tag[1] = 'Z';
561
562 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
563
564 kfree(buffer);
565
566 return rc;
567}
568
569#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
570
571static void pqi_update_time_worker(struct work_struct *work)
572{
573 int rc;
574 struct pqi_ctrl_info *ctrl_info;
575
576 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
577 update_time_work);
578
579 if (!ctrl_info) {
580 printk("%s: NULL controller pointer.\n", __func__);
581 return;
582 }
583 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
584 if (rc)
585 dev_warn(&ctrl_info->pci_dev->dev,
586 "error updating time on controller\n");
587
588 schedule_delayed_work(&ctrl_info->update_time_work,
589 PQI_UPDATE_TIME_WORK_INTERVAL);
590}
591
592static inline void pqi_schedule_update_time_worker(
593 struct pqi_ctrl_info *ctrl_info)
594{
595 schedule_delayed_work(&ctrl_info->update_time_work, 120);
596}
597
598static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
599 void *buffer, size_t buffer_length)
600{
601 int rc;
602 int pci_direction;
603 struct pqi_raid_path_request request;
604
605 rc = pqi_build_raid_path_request(ctrl_info, &request,
606 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
607 if (rc)
608 return rc;
609
610 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
611 NULL, NO_TIMEOUT);
612
613 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
614 pci_direction);
615
616 return rc;
617}
618
619static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
620 void **buffer)
621{
622 int rc;
623 size_t lun_list_length;
624 size_t lun_data_length;
625 size_t new_lun_list_length;
626 void *lun_data = NULL;
627 struct report_lun_header *report_lun_header;
628
629 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
630 if (!report_lun_header) {
631 rc = -ENOMEM;
632 goto out;
633 }
634
635 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
636 sizeof(*report_lun_header));
637 if (rc)
638 goto out;
639
640 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
641
642again:
643 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
644
645 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
646 if (!lun_data) {
647 rc = -ENOMEM;
648 goto out;
649 }
650
651 if (lun_list_length == 0) {
652 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
653 goto out;
654 }
655
656 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
657 if (rc)
658 goto out;
659
660 new_lun_list_length = get_unaligned_be32(
661 &((struct report_lun_header *)lun_data)->list_length);
662
663 if (new_lun_list_length > lun_list_length) {
664 lun_list_length = new_lun_list_length;
665 kfree(lun_data);
666 goto again;
667 }
668
669out:
670 kfree(report_lun_header);
671
672 if (rc) {
673 kfree(lun_data);
674 lun_data = NULL;
675 }
676
677 *buffer = lun_data;
678
679 return rc;
680}
681
682static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
683 void **buffer)
684{
685 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
686 buffer);
687}
688
689static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
690 void **buffer)
691{
692 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
693}
694
695static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
696 struct report_phys_lun_extended **physdev_list,
697 struct report_log_lun_extended **logdev_list)
698{
699 int rc;
700 size_t logdev_list_length;
701 size_t logdev_data_length;
702 struct report_log_lun_extended *internal_logdev_list;
703 struct report_log_lun_extended *logdev_data;
704 struct report_lun_header report_lun_header;
705
706 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
707 if (rc)
708 dev_err(&ctrl_info->pci_dev->dev,
709 "report physical LUNs failed\n");
710
711 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
712 if (rc)
713 dev_err(&ctrl_info->pci_dev->dev,
714 "report logical LUNs failed\n");
715
716 /*
717 * Tack the controller itself onto the end of the logical device list.
718 */
719
720 logdev_data = *logdev_list;
721
722 if (logdev_data) {
723 logdev_list_length =
724 get_unaligned_be32(&logdev_data->header.list_length);
725 } else {
726 memset(&report_lun_header, 0, sizeof(report_lun_header));
727 logdev_data =
728 (struct report_log_lun_extended *)&report_lun_header;
729 logdev_list_length = 0;
730 }
731
732 logdev_data_length = sizeof(struct report_lun_header) +
733 logdev_list_length;
734
735 internal_logdev_list = kmalloc(logdev_data_length +
736 sizeof(struct report_log_lun_extended), GFP_KERNEL);
737 if (!internal_logdev_list) {
738 kfree(*logdev_list);
739 *logdev_list = NULL;
740 return -ENOMEM;
741 }
742
743 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
744 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
745 sizeof(struct report_log_lun_extended_entry));
746 put_unaligned_be32(logdev_list_length +
747 sizeof(struct report_log_lun_extended_entry),
748 &internal_logdev_list->header.list_length);
749
750 kfree(*logdev_list);
751 *logdev_list = internal_logdev_list;
752
753 return 0;
754}
755
756static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
757 int bus, int target, int lun)
758{
759 device->bus = bus;
760 device->target = target;
761 device->lun = lun;
762}
763
764static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
765{
766 u8 *scsi3addr;
767 u32 lunid;
768
769 scsi3addr = device->scsi3addr;
770 lunid = get_unaligned_le32(scsi3addr);
771
772 if (pqi_is_hba_lunid(scsi3addr)) {
773 /* The specified device is the controller. */
774 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
775 device->target_lun_valid = true;
776 return;
777 }
778
779 if (pqi_is_logical_device(device)) {
780 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
781 lunid & 0x3fff);
782 device->target_lun_valid = true;
783 return;
784 }
785
786 /*
787 * Defer target and LUN assignment for non-controller physical devices
788 * because the SAS transport layer will make these assignments later.
789 */
790 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
791}
792
793static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
794 struct pqi_scsi_dev *device)
795{
796 int rc;
797 u8 raid_level;
798 u8 *buffer;
799
800 raid_level = SA_RAID_UNKNOWN;
801
802 buffer = kmalloc(64, GFP_KERNEL);
803 if (buffer) {
804 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
805 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
806 if (rc == 0) {
807 raid_level = buffer[8];
808 if (raid_level > SA_RAID_MAX)
809 raid_level = SA_RAID_UNKNOWN;
810 }
811 kfree(buffer);
812 }
813
814 device->raid_level = raid_level;
815}
816
817static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
818 struct pqi_scsi_dev *device, struct raid_map *raid_map)
819{
820 char *err_msg;
821 u32 raid_map_size;
822 u32 r5or6_blocks_per_row;
823 unsigned int num_phys_disks;
824 unsigned int num_raid_map_entries;
825
826 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
827
828 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
829 err_msg = "RAID map too small";
830 goto bad_raid_map;
831 }
832
833 if (raid_map_size > sizeof(*raid_map)) {
834 err_msg = "RAID map too large";
835 goto bad_raid_map;
836 }
837
838 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
839 (get_unaligned_le16(&raid_map->data_disks_per_row) +
840 get_unaligned_le16(&raid_map->metadata_disks_per_row));
841 num_raid_map_entries = num_phys_disks *
842 get_unaligned_le16(&raid_map->row_cnt);
843
844 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
845 err_msg = "invalid number of map entries in RAID map";
846 goto bad_raid_map;
847 }
848
849 if (device->raid_level == SA_RAID_1) {
850 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
851 err_msg = "invalid RAID-1 map";
852 goto bad_raid_map;
853 }
854 } else if (device->raid_level == SA_RAID_ADM) {
855 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
856 err_msg = "invalid RAID-1(ADM) map";
857 goto bad_raid_map;
858 }
859 } else if ((device->raid_level == SA_RAID_5 ||
860 device->raid_level == SA_RAID_6) &&
861 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
862 /* RAID 50/60 */
863 r5or6_blocks_per_row =
864 get_unaligned_le16(&raid_map->strip_size) *
865 get_unaligned_le16(&raid_map->data_disks_per_row);
866 if (r5or6_blocks_per_row == 0) {
867 err_msg = "invalid RAID-5 or RAID-6 map";
868 goto bad_raid_map;
869 }
870 }
871
872 return 0;
873
874bad_raid_map:
875 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
876
877 return -EINVAL;
878}
879
880static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
881 struct pqi_scsi_dev *device)
882{
883 int rc;
884 int pci_direction;
885 struct pqi_raid_path_request request;
886 struct raid_map *raid_map;
887
888 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
889 if (!raid_map)
890 return -ENOMEM;
891
892 rc = pqi_build_raid_path_request(ctrl_info, &request,
893 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
894 sizeof(*raid_map), 0, &pci_direction);
895 if (rc)
896 goto error;
897
898 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
899 NULL, NO_TIMEOUT);
900
901 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
902 pci_direction);
903
904 if (rc)
905 goto error;
906
907 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
908 if (rc)
909 goto error;
910
911 device->raid_map = raid_map;
912
913 return 0;
914
915error:
916 kfree(raid_map);
917
918 return rc;
919}
920
921static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
922 struct pqi_scsi_dev *device)
923{
924 int rc;
925 u8 *buffer;
926 u8 offload_status;
927
928 buffer = kmalloc(64, GFP_KERNEL);
929 if (!buffer)
930 return;
931
932 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
933 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
934 if (rc)
935 goto out;
936
937#define OFFLOAD_STATUS_BYTE 4
938#define OFFLOAD_CONFIGURED_BIT 0x1
939#define OFFLOAD_ENABLED_BIT 0x2
940
941 offload_status = buffer[OFFLOAD_STATUS_BYTE];
942 device->offload_configured =
943 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
944 if (device->offload_configured) {
945 device->offload_enabled_pending =
946 !!(offload_status & OFFLOAD_ENABLED_BIT);
947 if (pqi_get_raid_map(ctrl_info, device))
948 device->offload_enabled_pending = false;
949 }
950
951out:
952 kfree(buffer);
953}
954
955/*
956 * Use vendor-specific VPD to determine online/offline status of a volume.
957 */
958
959static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
960 struct pqi_scsi_dev *device)
961{
962 int rc;
963 size_t page_length;
964 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
965 bool volume_offline = true;
966 u32 volume_flags;
967 struct ciss_vpd_logical_volume_status *vpd;
968
969 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
970 if (!vpd)
971 goto no_buffer;
972
973 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
974 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
975 if (rc)
976 goto out;
977
978 page_length = offsetof(struct ciss_vpd_logical_volume_status,
979 volume_status) + vpd->page_length;
980 if (page_length < sizeof(*vpd))
981 goto out;
982
983 volume_status = vpd->volume_status;
984 volume_flags = get_unaligned_be32(&vpd->flags);
985 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
986
987out:
988 kfree(vpd);
989no_buffer:
990 device->volume_status = volume_status;
991 device->volume_offline = volume_offline;
992}
993
994static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
995 struct pqi_scsi_dev *device)
996{
997 int rc;
998 u8 *buffer;
999
1000 buffer = kmalloc(64, GFP_KERNEL);
1001 if (!buffer)
1002 return -ENOMEM;
1003
1004 /* Send an inquiry to the device to see what it is. */
1005 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1006 if (rc)
1007 goto out;
1008
1009 scsi_sanitize_inquiry_string(&buffer[8], 8);
1010 scsi_sanitize_inquiry_string(&buffer[16], 16);
1011
1012 device->devtype = buffer[0] & 0x1f;
1013 memcpy(device->vendor, &buffer[8],
1014 sizeof(device->vendor));
1015 memcpy(device->model, &buffer[16],
1016 sizeof(device->model));
1017
1018 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1019 pqi_get_raid_level(ctrl_info, device);
1020 pqi_get_offload_status(ctrl_info, device);
1021 pqi_get_volume_status(ctrl_info, device);
1022 }
1023
1024out:
1025 kfree(buffer);
1026
1027 return rc;
1028}
1029
1030static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1031 struct pqi_scsi_dev *device,
1032 struct bmic_identify_physical_device *id_phys)
1033{
1034 int rc;
1035
1036 memset(id_phys, 0, sizeof(*id_phys));
1037
1038 rc = pqi_identify_physical_device(ctrl_info, device,
1039 id_phys, sizeof(*id_phys));
1040 if (rc) {
1041 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1042 return;
1043 }
1044
1045 device->queue_depth =
1046 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1047 device->device_type = id_phys->device_type;
1048 device->active_path_index = id_phys->active_path_number;
1049 device->path_map = id_phys->redundant_path_present_map;
1050 memcpy(&device->box,
1051 &id_phys->alternate_paths_phys_box_on_port,
1052 sizeof(device->box));
1053 memcpy(&device->phys_connector,
1054 &id_phys->alternate_paths_phys_connector,
1055 sizeof(device->phys_connector));
1056 device->bay = id_phys->phys_bay_in_box;
1057}
1058
1059static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1060 struct pqi_scsi_dev *device)
1061{
1062 char *status;
1063 static const char unknown_state_str[] =
1064 "Volume is in an unknown state (%u)";
1065 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1066
1067 switch (device->volume_status) {
1068 case CISS_LV_OK:
1069 status = "Volume online";
1070 break;
1071 case CISS_LV_FAILED:
1072 status = "Volume failed";
1073 break;
1074 case CISS_LV_NOT_CONFIGURED:
1075 status = "Volume not configured";
1076 break;
1077 case CISS_LV_DEGRADED:
1078 status = "Volume degraded";
1079 break;
1080 case CISS_LV_READY_FOR_RECOVERY:
1081 status = "Volume ready for recovery operation";
1082 break;
1083 case CISS_LV_UNDERGOING_RECOVERY:
1084 status = "Volume undergoing recovery";
1085 break;
1086 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1087 status = "Wrong physical drive was replaced";
1088 break;
1089 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1090 status = "A physical drive not properly connected";
1091 break;
1092 case CISS_LV_HARDWARE_OVERHEATING:
1093 status = "Hardware is overheating";
1094 break;
1095 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1096 status = "Hardware has overheated";
1097 break;
1098 case CISS_LV_UNDERGOING_EXPANSION:
1099 status = "Volume undergoing expansion";
1100 break;
1101 case CISS_LV_NOT_AVAILABLE:
1102 status = "Volume waiting for transforming volume";
1103 break;
1104 case CISS_LV_QUEUED_FOR_EXPANSION:
1105 status = "Volume queued for expansion";
1106 break;
1107 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1108 status = "Volume disabled due to SCSI ID conflict";
1109 break;
1110 case CISS_LV_EJECTED:
1111 status = "Volume has been ejected";
1112 break;
1113 case CISS_LV_UNDERGOING_ERASE:
1114 status = "Volume undergoing background erase";
1115 break;
1116 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1117 status = "Volume ready for predictive spare rebuild";
1118 break;
1119 case CISS_LV_UNDERGOING_RPI:
1120 status = "Volume undergoing rapid parity initialization";
1121 break;
1122 case CISS_LV_PENDING_RPI:
1123 status = "Volume queued for rapid parity initialization";
1124 break;
1125 case CISS_LV_ENCRYPTED_NO_KEY:
1126 status = "Encrypted volume inaccessible - key not present";
1127 break;
1128 case CISS_LV_UNDERGOING_ENCRYPTION:
1129 status = "Volume undergoing encryption process";
1130 break;
1131 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1132 status = "Volume undergoing encryption re-keying process";
1133 break;
1134 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1135 status =
1136 "Encrypted volume inaccessible - disabled on ctrl";
1137 break;
1138 case CISS_LV_PENDING_ENCRYPTION:
1139 status = "Volume pending migration to encrypted state";
1140 break;
1141 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1142 status = "Volume pending encryption rekeying";
1143 break;
1144 case CISS_LV_NOT_SUPPORTED:
1145 status = "Volume not supported on this controller";
1146 break;
1147 case CISS_LV_STATUS_UNAVAILABLE:
1148 status = "Volume status not available";
1149 break;
1150 default:
1151 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1152 unknown_state_str, device->volume_status);
1153 status = unknown_state_buffer;
1154 break;
1155 }
1156
1157 dev_info(&ctrl_info->pci_dev->dev,
1158 "scsi %d:%d:%d:%d %s\n",
1159 ctrl_info->scsi_host->host_no,
1160 device->bus, device->target, device->lun, status);
1161}
1162
1163static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1164 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1165{
1166 struct pqi_scsi_dev *device;
1167
1168 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1169 scsi_device_list_entry) {
1170 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1171 continue;
1172 if (pqi_is_logical_device(device))
1173 continue;
1174 if (device->aio_handle == aio_handle)
1175 return device;
1176 }
1177
1178 return NULL;
1179}
1180
1181static void pqi_update_logical_drive_queue_depth(
1182 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1183{
1184 unsigned int i;
1185 struct raid_map *raid_map;
1186 struct raid_map_disk_data *disk_data;
1187 struct pqi_scsi_dev *phys_disk;
1188 unsigned int num_phys_disks;
1189 unsigned int num_raid_map_entries;
1190 unsigned int queue_depth;
1191
1192 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1193
1194 raid_map = logical_drive->raid_map;
1195 if (!raid_map)
1196 return;
1197
1198 disk_data = raid_map->disk_data;
1199 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1200 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1201 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1202 num_raid_map_entries = num_phys_disks *
1203 get_unaligned_le16(&raid_map->row_cnt);
1204
1205 queue_depth = 0;
1206 for (i = 0; i < num_raid_map_entries; i++) {
1207 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1208 disk_data[i].aio_handle);
1209
1210 if (!phys_disk) {
1211 dev_warn(&ctrl_info->pci_dev->dev,
1212 "failed to find physical disk for logical drive %016llx\n",
1213 get_unaligned_be64(logical_drive->scsi3addr));
1214 logical_drive->offload_enabled = false;
1215 logical_drive->offload_enabled_pending = false;
1216 kfree(raid_map);
1217 logical_drive->raid_map = NULL;
1218 return;
1219 }
1220
1221 queue_depth += phys_disk->queue_depth;
1222 }
1223
1224 logical_drive->queue_depth = queue_depth;
1225}
1226
1227static void pqi_update_all_logical_drive_queue_depths(
1228 struct pqi_ctrl_info *ctrl_info)
1229{
1230 struct pqi_scsi_dev *device;
1231
1232 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1233 scsi_device_list_entry) {
1234 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1235 continue;
1236 if (!pqi_is_logical_device(device))
1237 continue;
1238 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1239 }
1240}
1241
1242static void pqi_rescan_worker(struct work_struct *work)
1243{
1244 struct pqi_ctrl_info *ctrl_info;
1245
1246 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1247 rescan_work);
1248
1249 pqi_scan_scsi_devices(ctrl_info);
1250}
1251
1252static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1253 struct pqi_scsi_dev *device)
1254{
1255 int rc;
1256
1257 if (pqi_is_logical_device(device))
1258 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1259 device->target, device->lun);
1260 else
1261 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1262
1263 return rc;
1264}
1265
1266static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1267 struct pqi_scsi_dev *device)
1268{
1269 if (pqi_is_logical_device(device))
1270 scsi_remove_device(device->sdev);
1271 else
1272 pqi_remove_sas_device(device);
1273}
1274
1275/* Assumes the SCSI device list lock is held. */
1276
1277static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1278 int bus, int target, int lun)
1279{
1280 struct pqi_scsi_dev *device;
1281
1282 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1283 scsi_device_list_entry)
1284 if (device->bus == bus && device->target == target &&
1285 device->lun == lun)
1286 return device;
1287
1288 return NULL;
1289}
1290
1291static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1292 struct pqi_scsi_dev *dev2)
1293{
1294 if (dev1->is_physical_device != dev2->is_physical_device)
1295 return false;
1296
1297 if (dev1->is_physical_device)
1298 return dev1->wwid == dev2->wwid;
1299
1300 return memcmp(dev1->volume_id, dev2->volume_id,
1301 sizeof(dev1->volume_id)) == 0;
1302}
1303
1304enum pqi_find_result {
1305 DEVICE_NOT_FOUND,
1306 DEVICE_CHANGED,
1307 DEVICE_SAME,
1308};
1309
1310static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1311 struct pqi_scsi_dev *device_to_find,
1312 struct pqi_scsi_dev **matching_device)
1313{
1314 struct pqi_scsi_dev *device;
1315
1316 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1317 scsi_device_list_entry) {
1318 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1319 device->scsi3addr)) {
1320 *matching_device = device;
1321 if (pqi_device_equal(device_to_find, device)) {
1322 if (device_to_find->volume_offline)
1323 return DEVICE_CHANGED;
1324 return DEVICE_SAME;
1325 }
1326 return DEVICE_CHANGED;
1327 }
1328 }
1329
1330 return DEVICE_NOT_FOUND;
1331}
1332
1333static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1334 char *action, struct pqi_scsi_dev *device)
1335{
1336 dev_info(&ctrl_info->pci_dev->dev,
1337 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1338 action,
1339 ctrl_info->scsi_host->host_no,
1340 device->bus,
1341 device->target,
1342 device->lun,
1343 scsi_device_type(device->devtype),
1344 device->vendor,
1345 device->model,
1346 pqi_raid_level_to_string(device->raid_level),
1347 device->offload_configured ? '+' : '-',
1348 device->offload_enabled_pending ? '+' : '-',
1349 device->expose_device ? '+' : '-',
1350 device->queue_depth);
1351}
1352
1353/* Assumes the SCSI device list lock is held. */
1354
1355static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1356 struct pqi_scsi_dev *new_device)
1357{
1358 existing_device->devtype = new_device->devtype;
1359 existing_device->device_type = new_device->device_type;
1360 existing_device->bus = new_device->bus;
1361 if (new_device->target_lun_valid) {
1362 existing_device->target = new_device->target;
1363 existing_device->lun = new_device->lun;
1364 existing_device->target_lun_valid = true;
1365 }
1366
1367 /* By definition, the scsi3addr and wwid fields are already the same. */
1368
1369 existing_device->is_physical_device = new_device->is_physical_device;
1370 existing_device->expose_device = new_device->expose_device;
1371 existing_device->no_uld_attach = new_device->no_uld_attach;
1372 existing_device->aio_enabled = new_device->aio_enabled;
1373 memcpy(existing_device->vendor, new_device->vendor,
1374 sizeof(existing_device->vendor));
1375 memcpy(existing_device->model, new_device->model,
1376 sizeof(existing_device->model));
1377 existing_device->sas_address = new_device->sas_address;
1378 existing_device->raid_level = new_device->raid_level;
1379 existing_device->queue_depth = new_device->queue_depth;
1380 existing_device->aio_handle = new_device->aio_handle;
1381 existing_device->volume_status = new_device->volume_status;
1382 existing_device->active_path_index = new_device->active_path_index;
1383 existing_device->path_map = new_device->path_map;
1384 existing_device->bay = new_device->bay;
1385 memcpy(existing_device->box, new_device->box,
1386 sizeof(existing_device->box));
1387 memcpy(existing_device->phys_connector, new_device->phys_connector,
1388 sizeof(existing_device->phys_connector));
1389 existing_device->offload_configured = new_device->offload_configured;
1390 existing_device->offload_enabled = false;
1391 existing_device->offload_enabled_pending =
1392 new_device->offload_enabled_pending;
1393 existing_device->offload_to_mirror = 0;
1394 kfree(existing_device->raid_map);
1395 existing_device->raid_map = new_device->raid_map;
1396
1397 /* To prevent this from being freed later. */
1398 new_device->raid_map = NULL;
1399}
1400
1401static inline void pqi_free_device(struct pqi_scsi_dev *device)
1402{
1403 if (device) {
1404 kfree(device->raid_map);
1405 kfree(device);
1406 }
1407}
1408
1409/*
1410 * Called when exposing a new device to the OS fails in order to re-adjust
1411 * our internal SCSI device list to match the SCSI ML's view.
1412 */
1413
1414static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1415 struct pqi_scsi_dev *device)
1416{
1417 unsigned long flags;
1418
1419 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1420 list_del(&device->scsi_device_list_entry);
1421 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1422
1423 /* Allow the device structure to be freed later. */
1424 device->keep_device = false;
1425}
1426
1427static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1428 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1429{
1430 int rc;
1431 unsigned int i;
1432 unsigned long flags;
1433 enum pqi_find_result find_result;
1434 struct pqi_scsi_dev *device;
1435 struct pqi_scsi_dev *next;
1436 struct pqi_scsi_dev *matching_device;
1437 struct list_head add_list;
1438 struct list_head delete_list;
1439
1440 INIT_LIST_HEAD(&add_list);
1441 INIT_LIST_HEAD(&delete_list);
1442
1443 /*
1444 * The idea here is to do as little work as possible while holding the
1445 * spinlock. That's why we go to great pains to defer anything other
1446 * than updating the internal device list until after we release the
1447 * spinlock.
1448 */
1449
1450 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1451
1452 /* Assume that all devices in the existing list have gone away. */
1453 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1454 scsi_device_list_entry)
1455 device->device_gone = true;
1456
1457 for (i = 0; i < num_new_devices; i++) {
1458 device = new_device_list[i];
1459
1460 find_result = pqi_scsi_find_entry(ctrl_info, device,
1461 &matching_device);
1462
1463 switch (find_result) {
1464 case DEVICE_SAME:
1465 /*
1466 * The newly found device is already in the existing
1467 * device list.
1468 */
1469 device->new_device = false;
1470 matching_device->device_gone = false;
1471 pqi_scsi_update_device(matching_device, device);
1472 break;
1473 case DEVICE_NOT_FOUND:
1474 /*
1475 * The newly found device is NOT in the existing device
1476 * list.
1477 */
1478 device->new_device = true;
1479 break;
1480 case DEVICE_CHANGED:
1481 /*
1482 * The original device has gone away and we need to add
1483 * the new device.
1484 */
1485 device->new_device = true;
1486 break;
1487 default:
1488 WARN_ON(find_result);
1489 break;
1490 }
1491 }
1492
1493 /* Process all devices that have gone away. */
1494 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1495 scsi_device_list_entry) {
1496 if (device->device_gone) {
1497 list_del(&device->scsi_device_list_entry);
1498 list_add_tail(&device->delete_list_entry, &delete_list);
1499 }
1500 }
1501
1502 /* Process all new devices. */
1503 for (i = 0; i < num_new_devices; i++) {
1504 device = new_device_list[i];
1505 if (!device->new_device)
1506 continue;
1507 if (device->volume_offline)
1508 continue;
1509 list_add_tail(&device->scsi_device_list_entry,
1510 &ctrl_info->scsi_device_list);
1511 list_add_tail(&device->add_list_entry, &add_list);
1512 /* To prevent this device structure from being freed later. */
1513 device->keep_device = true;
1514 }
1515
1516 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1517
1518 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1519 scsi_device_list_entry)
1520 device->offload_enabled =
1521 device->offload_enabled_pending;
1522
1523 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1524
1525 /* Remove all devices that have gone away. */
1526 list_for_each_entry_safe(device, next, &delete_list,
1527 delete_list_entry) {
1528 if (device->sdev)
1529 pqi_remove_device(ctrl_info, device);
1530 if (device->volume_offline) {
1531 pqi_dev_info(ctrl_info, "offline", device);
1532 pqi_show_volume_status(ctrl_info, device);
1533 } else {
1534 pqi_dev_info(ctrl_info, "removed", device);
1535 }
1536 list_del(&device->delete_list_entry);
1537 pqi_free_device(device);
1538 }
1539
1540 /*
1541 * Notify the SCSI ML if the queue depth of any existing device has
1542 * changed.
1543 */
1544 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1545 scsi_device_list_entry) {
1546 if (device->sdev && device->queue_depth !=
1547 device->advertised_queue_depth) {
1548 device->advertised_queue_depth = device->queue_depth;
1549 scsi_change_queue_depth(device->sdev,
1550 device->advertised_queue_depth);
1551 }
1552 }
1553
1554 /* Expose any new devices. */
1555 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1556 if (device->expose_device && !device->sdev) {
1557 rc = pqi_add_device(ctrl_info, device);
1558 if (rc) {
1559 dev_warn(&ctrl_info->pci_dev->dev,
1560 "scsi %d:%d:%d:%d addition failed, device not added\n",
1561 ctrl_info->scsi_host->host_no,
1562 device->bus, device->target,
1563 device->lun);
1564 pqi_fixup_botched_add(ctrl_info, device);
1565 continue;
1566 }
1567 }
1568 pqi_dev_info(ctrl_info, "added", device);
1569 }
1570}
1571
1572static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1573{
1574 bool is_supported = false;
1575
1576 switch (device->devtype) {
1577 case TYPE_DISK:
1578 case TYPE_ZBC:
1579 case TYPE_TAPE:
1580 case TYPE_MEDIUM_CHANGER:
1581 case TYPE_ENCLOSURE:
1582 is_supported = true;
1583 break;
1584 case TYPE_RAID:
1585 /*
1586 * Only support the HBA controller itself as a RAID
1587 * controller. If it's a RAID controller other than
1588 * the HBA itself (an external RAID controller, MSA500
1589 * or similar), we don't support it.
1590 */
1591 if (pqi_is_hba_lunid(device->scsi3addr))
1592 is_supported = true;
1593 break;
1594 }
1595
1596 return is_supported;
1597}
1598
1599static inline bool pqi_skip_device(u8 *scsi3addr,
1600 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1601{
1602 u8 device_flags;
1603
1604 if (!MASKED_DEVICE(scsi3addr))
1605 return false;
1606
1607 /* The device is masked. */
1608
1609 device_flags = phys_lun_ext_entry->device_flags;
1610
1611 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1612 /*
1613 * It's a non-disk device. We ignore all devices of this type
1614 * when they're masked.
1615 */
1616 return true;
1617 }
1618
1619 return false;
1620}
1621
1622static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1623{
1624 /* Expose all devices except for physical devices that are masked. */
1625 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1626 return false;
1627
1628 return true;
1629}
1630
1631static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1632{
1633 int i;
1634 int rc;
1635 struct list_head new_device_list_head;
1636 struct report_phys_lun_extended *physdev_list = NULL;
1637 struct report_log_lun_extended *logdev_list = NULL;
1638 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1639 struct report_log_lun_extended_entry *log_lun_ext_entry;
1640 struct bmic_identify_physical_device *id_phys = NULL;
1641 u32 num_physicals;
1642 u32 num_logicals;
1643 struct pqi_scsi_dev **new_device_list = NULL;
1644 struct pqi_scsi_dev *device;
1645 struct pqi_scsi_dev *next;
1646 unsigned int num_new_devices;
1647 unsigned int num_valid_devices;
1648 bool is_physical_device;
1649 u8 *scsi3addr;
1650 static char *out_of_memory_msg =
1651 "out of memory, device discovery stopped";
1652
1653 INIT_LIST_HEAD(&new_device_list_head);
1654
1655 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1656 if (rc)
1657 goto out;
1658
1659 if (physdev_list)
1660 num_physicals =
1661 get_unaligned_be32(&physdev_list->header.list_length)
1662 / sizeof(physdev_list->lun_entries[0]);
1663 else
1664 num_physicals = 0;
1665
1666 if (logdev_list)
1667 num_logicals =
1668 get_unaligned_be32(&logdev_list->header.list_length)
1669 / sizeof(logdev_list->lun_entries[0]);
1670 else
1671 num_logicals = 0;
1672
1673 if (num_physicals) {
1674 /*
1675 * We need this buffer for calls to pqi_get_physical_disk_info()
1676 * below. We allocate it here instead of inside
1677 * pqi_get_physical_disk_info() because it's a fairly large
1678 * buffer.
1679 */
1680 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1681 if (!id_phys) {
1682 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1683 out_of_memory_msg);
1684 rc = -ENOMEM;
1685 goto out;
1686 }
1687 }
1688
1689 num_new_devices = num_physicals + num_logicals;
1690
1691 new_device_list = kmalloc(sizeof(*new_device_list) *
1692 num_new_devices, GFP_KERNEL);
1693 if (!new_device_list) {
1694 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1695 rc = -ENOMEM;
1696 goto out;
1697 }
1698
1699 for (i = 0; i < num_new_devices; i++) {
1700 device = kzalloc(sizeof(*device), GFP_KERNEL);
1701 if (!device) {
1702 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1703 out_of_memory_msg);
1704 rc = -ENOMEM;
1705 goto out;
1706 }
1707 list_add_tail(&device->new_device_list_entry,
1708 &new_device_list_head);
1709 }
1710
1711 device = NULL;
1712 num_valid_devices = 0;
1713
1714 for (i = 0; i < num_new_devices; i++) {
1715
1716 if (i < num_physicals) {
1717 is_physical_device = true;
1718 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1719 log_lun_ext_entry = NULL;
1720 scsi3addr = phys_lun_ext_entry->lunid;
1721 } else {
1722 is_physical_device = false;
1723 phys_lun_ext_entry = NULL;
1724 log_lun_ext_entry =
1725 &logdev_list->lun_entries[i - num_physicals];
1726 scsi3addr = log_lun_ext_entry->lunid;
1727 }
1728
1729 if (is_physical_device &&
1730 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1731 continue;
1732
1733 if (device)
1734 device = list_next_entry(device, new_device_list_entry);
1735 else
1736 device = list_first_entry(&new_device_list_head,
1737 struct pqi_scsi_dev, new_device_list_entry);
1738
1739 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1740 device->is_physical_device = is_physical_device;
1741 device->raid_level = SA_RAID_UNKNOWN;
1742
1743 /* Gather information about the device. */
1744 rc = pqi_get_device_info(ctrl_info, device);
1745 if (rc == -ENOMEM) {
1746 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1747 out_of_memory_msg);
1748 goto out;
1749 }
1750 if (rc) {
1751 dev_warn(&ctrl_info->pci_dev->dev,
1752 "obtaining device info failed, skipping device %016llx\n",
1753 get_unaligned_be64(device->scsi3addr));
1754 rc = 0;
1755 continue;
1756 }
1757
1758 if (!pqi_is_supported_device(device))
1759 continue;
1760
1761 pqi_assign_bus_target_lun(device);
1762
1763 device->expose_device = pqi_expose_device(device);
1764
1765 if (device->is_physical_device) {
1766 device->wwid = phys_lun_ext_entry->wwid;
1767 if ((phys_lun_ext_entry->device_flags &
1768 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1769 phys_lun_ext_entry->aio_handle)
1770 device->aio_enabled = true;
1771 } else {
1772 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1773 sizeof(device->volume_id));
1774 }
1775
1776 switch (device->devtype) {
1777 case TYPE_DISK:
1778 case TYPE_ZBC:
1779 case TYPE_ENCLOSURE:
1780 if (device->is_physical_device) {
1781 device->sas_address =
1782 get_unaligned_be64(&device->wwid);
1783 if (device->devtype == TYPE_DISK ||
1784 device->devtype == TYPE_ZBC) {
1785 device->aio_handle =
1786 phys_lun_ext_entry->aio_handle;
1787 pqi_get_physical_disk_info(ctrl_info,
1788 device, id_phys);
1789 }
1790 }
1791 break;
1792 }
1793
1794 new_device_list[num_valid_devices++] = device;
1795 }
1796
1797 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1798
1799out:
1800 list_for_each_entry_safe(device, next, &new_device_list_head,
1801 new_device_list_entry) {
1802 if (device->keep_device)
1803 continue;
1804 list_del(&device->new_device_list_entry);
1805 pqi_free_device(device);
1806 }
1807
1808 kfree(new_device_list);
1809 kfree(physdev_list);
1810 kfree(logdev_list);
1811 kfree(id_phys);
1812
1813 return rc;
1814}
1815
1816static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1817{
1818 unsigned long flags;
1819 struct pqi_scsi_dev *device;
1820 struct pqi_scsi_dev *next;
1821
1822 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1823
1824 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1825 scsi_device_list_entry) {
1826 if (device->sdev)
1827 pqi_remove_device(ctrl_info, device);
1828 list_del(&device->scsi_device_list_entry);
1829 pqi_free_device(device);
1830 }
1831
1832 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1833}
1834
1835static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1836{
1837 int rc;
1838
1839 if (pqi_ctrl_offline(ctrl_info))
1840 return -ENXIO;
1841
1842 mutex_lock(&ctrl_info->scan_mutex);
1843
1844 rc = pqi_update_scsi_devices(ctrl_info);
1845 if (rc)
1846 pqi_schedule_rescan_worker(ctrl_info);
1847
1848 mutex_unlock(&ctrl_info->scan_mutex);
1849
1850 return rc;
1851}
1852
1853static void pqi_scan_start(struct Scsi_Host *shost)
1854{
1855 pqi_scan_scsi_devices(shost_to_hba(shost));
1856}
1857
1858/* Returns TRUE if scan is finished. */
1859
1860static int pqi_scan_finished(struct Scsi_Host *shost,
1861 unsigned long elapsed_time)
1862{
1863 struct pqi_ctrl_info *ctrl_info;
1864
1865 ctrl_info = shost_priv(shost);
1866
1867 return !mutex_is_locked(&ctrl_info->scan_mutex);
1868}
1869
1870static inline void pqi_set_encryption_info(
1871 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1872 u64 first_block)
1873{
1874 u32 volume_blk_size;
1875
1876 /*
1877 * Set the encryption tweak values based on logical block address.
1878 * If the block size is 512, the tweak value is equal to the LBA.
1879 * For other block sizes, tweak value is (LBA * block size) / 512.
1880 */
1881 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1882 if (volume_blk_size != 512)
1883 first_block = (first_block * volume_blk_size) / 512;
1884
1885 encryption_info->data_encryption_key_index =
1886 get_unaligned_le16(&raid_map->data_encryption_key_index);
1887 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1888 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1889}
1890
1891/*
1892 * Attempt to perform offload RAID mapping for a logical volume I/O.
1893 */
1894
1895#define PQI_RAID_BYPASS_INELIGIBLE 1
1896
1897static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1898 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1899 struct pqi_queue_group *queue_group)
1900{
1901 struct raid_map *raid_map;
1902 bool is_write = false;
1903 u32 map_index;
1904 u64 first_block;
1905 u64 last_block;
1906 u32 block_cnt;
1907 u32 blocks_per_row;
1908 u64 first_row;
1909 u64 last_row;
1910 u32 first_row_offset;
1911 u32 last_row_offset;
1912 u32 first_column;
1913 u32 last_column;
1914 u64 r0_first_row;
1915 u64 r0_last_row;
1916 u32 r5or6_blocks_per_row;
1917 u64 r5or6_first_row;
1918 u64 r5or6_last_row;
1919 u32 r5or6_first_row_offset;
1920 u32 r5or6_last_row_offset;
1921 u32 r5or6_first_column;
1922 u32 r5or6_last_column;
1923 u16 data_disks_per_row;
1924 u32 total_disks_per_row;
1925 u16 layout_map_count;
1926 u32 stripesize;
1927 u16 strip_size;
1928 u32 first_group;
1929 u32 last_group;
1930 u32 current_group;
1931 u32 map_row;
1932 u32 aio_handle;
1933 u64 disk_block;
1934 u32 disk_block_cnt;
1935 u8 cdb[16];
1936 u8 cdb_length;
1937 int offload_to_mirror;
1938 struct pqi_encryption_info *encryption_info_ptr;
1939 struct pqi_encryption_info encryption_info;
1940#if BITS_PER_LONG == 32
1941 u64 tmpdiv;
1942#endif
1943
1944 /* Check for valid opcode, get LBA and block count. */
1945 switch (scmd->cmnd[0]) {
1946 case WRITE_6:
1947 is_write = true;
1948 /* fall through */
1949 case READ_6:
1950 first_block = (u64)get_unaligned_be16(&scmd->cmnd[2]);
1951 block_cnt = (u32)scmd->cmnd[4];
1952 if (block_cnt == 0)
1953 block_cnt = 256;
1954 break;
1955 case WRITE_10:
1956 is_write = true;
1957 /* fall through */
1958 case READ_10:
1959 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1960 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1961 break;
1962 case WRITE_12:
1963 is_write = true;
1964 /* fall through */
1965 case READ_12:
1966 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1967 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1968 break;
1969 case WRITE_16:
1970 is_write = true;
1971 /* fall through */
1972 case READ_16:
1973 first_block = get_unaligned_be64(&scmd->cmnd[2]);
1974 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1975 break;
1976 default:
1977 /* Process via normal I/O path. */
1978 return PQI_RAID_BYPASS_INELIGIBLE;
1979 }
1980
1981 /* Check for write to non-RAID-0. */
1982 if (is_write && device->raid_level != SA_RAID_0)
1983 return PQI_RAID_BYPASS_INELIGIBLE;
1984
1985 if (unlikely(block_cnt == 0))
1986 return PQI_RAID_BYPASS_INELIGIBLE;
1987
1988 last_block = first_block + block_cnt - 1;
1989 raid_map = device->raid_map;
1990
1991 /* Check for invalid block or wraparound. */
1992 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
1993 last_block < first_block)
1994 return PQI_RAID_BYPASS_INELIGIBLE;
1995
1996 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
1997 strip_size = get_unaligned_le16(&raid_map->strip_size);
1998 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
1999
2000 /* Calculate stripe information for the request. */
2001 blocks_per_row = data_disks_per_row * strip_size;
2002#if BITS_PER_LONG == 32
2003 tmpdiv = first_block;
2004 do_div(tmpdiv, blocks_per_row);
2005 first_row = tmpdiv;
2006 tmpdiv = last_block;
2007 do_div(tmpdiv, blocks_per_row);
2008 last_row = tmpdiv;
2009 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2010 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2011 tmpdiv = first_row_offset;
2012 do_div(tmpdiv, strip_size);
2013 first_column = tmpdiv;
2014 tmpdiv = last_row_offset;
2015 do_div(tmpdiv, strip_size);
2016 last_column = tmpdiv;
2017#else
2018 first_row = first_block / blocks_per_row;
2019 last_row = last_block / blocks_per_row;
2020 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2021 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2022 first_column = first_row_offset / strip_size;
2023 last_column = last_row_offset / strip_size;
2024#endif
2025
2026 /* If this isn't a single row/column then give to the controller. */
2027 if (first_row != last_row || first_column != last_column)
2028 return PQI_RAID_BYPASS_INELIGIBLE;
2029
2030 /* Proceeding with driver mapping. */
2031 total_disks_per_row = data_disks_per_row +
2032 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2033 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2034 get_unaligned_le16(&raid_map->row_cnt);
2035 map_index = (map_row * total_disks_per_row) + first_column;
2036
2037 /* RAID 1 */
2038 if (device->raid_level == SA_RAID_1) {
2039 if (device->offload_to_mirror)
2040 map_index += data_disks_per_row;
2041 device->offload_to_mirror = !device->offload_to_mirror;
2042 } else if (device->raid_level == SA_RAID_ADM) {
2043 /* RAID ADM */
2044 /*
2045 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2046 * divisible by 3.
2047 */
2048 offload_to_mirror = device->offload_to_mirror;
2049 if (offload_to_mirror == 0) {
2050 /* use physical disk in the first mirrored group. */
2051 map_index %= data_disks_per_row;
2052 } else {
2053 do {
2054 /*
2055 * Determine mirror group that map_index
2056 * indicates.
2057 */
2058 current_group = map_index / data_disks_per_row;
2059
2060 if (offload_to_mirror != current_group) {
2061 if (current_group <
2062 layout_map_count - 1) {
2063 /*
2064 * Select raid index from
2065 * next group.
2066 */
2067 map_index += data_disks_per_row;
2068 current_group++;
2069 } else {
2070 /*
2071 * Select raid index from first
2072 * group.
2073 */
2074 map_index %= data_disks_per_row;
2075 current_group = 0;
2076 }
2077 }
2078 } while (offload_to_mirror != current_group);
2079 }
2080
2081 /* Set mirror group to use next time. */
2082 offload_to_mirror =
2083 (offload_to_mirror >= layout_map_count - 1) ?
2084 0 : offload_to_mirror + 1;
2085 WARN_ON(offload_to_mirror >= layout_map_count);
2086 device->offload_to_mirror = offload_to_mirror;
2087 /*
2088 * Avoid direct use of device->offload_to_mirror within this
2089 * function since multiple threads might simultaneously
2090 * increment it beyond the range of device->layout_map_count -1.
2091 */
2092 } else if ((device->raid_level == SA_RAID_5 ||
2093 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2094 /* RAID 50/60 */
2095 /* Verify first and last block are in same RAID group */
2096 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2097 stripesize = r5or6_blocks_per_row * layout_map_count;
2098#if BITS_PER_LONG == 32
2099 tmpdiv = first_block;
2100 first_group = do_div(tmpdiv, stripesize);
2101 tmpdiv = first_group;
2102 do_div(tmpdiv, r5or6_blocks_per_row);
2103 first_group = tmpdiv;
2104 tmpdiv = last_block;
2105 last_group = do_div(tmpdiv, stripesize);
2106 tmpdiv = last_group;
2107 do_div(tmpdiv, r5or6_blocks_per_row);
2108 last_group = tmpdiv;
2109#else
2110 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2111 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2112#endif
2113 if (first_group != last_group)
2114 return PQI_RAID_BYPASS_INELIGIBLE;
2115
2116 /* Verify request is in a single row of RAID 5/6 */
2117#if BITS_PER_LONG == 32
2118 tmpdiv = first_block;
2119 do_div(tmpdiv, stripesize);
2120 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2121 tmpdiv = last_block;
2122 do_div(tmpdiv, stripesize);
2123 r5or6_last_row = r0_last_row = tmpdiv;
2124#else
2125 first_row = r5or6_first_row = r0_first_row =
2126 first_block / stripesize;
2127 r5or6_last_row = r0_last_row = last_block / stripesize;
2128#endif
2129 if (r5or6_first_row != r5or6_last_row)
2130 return PQI_RAID_BYPASS_INELIGIBLE;
2131
2132 /* Verify request is in a single column */
2133#if BITS_PER_LONG == 32
2134 tmpdiv = first_block;
2135 first_row_offset = do_div(tmpdiv, stripesize);
2136 tmpdiv = first_row_offset;
2137 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2138 r5or6_first_row_offset = first_row_offset;
2139 tmpdiv = last_block;
2140 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2141 tmpdiv = r5or6_last_row_offset;
2142 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2143 tmpdiv = r5or6_first_row_offset;
2144 do_div(tmpdiv, strip_size);
2145 first_column = r5or6_first_column = tmpdiv;
2146 tmpdiv = r5or6_last_row_offset;
2147 do_div(tmpdiv, strip_size);
2148 r5or6_last_column = tmpdiv;
2149#else
2150 first_row_offset = r5or6_first_row_offset =
2151 (u32)((first_block % stripesize) %
2152 r5or6_blocks_per_row);
2153
2154 r5or6_last_row_offset =
2155 (u32)((last_block % stripesize) %
2156 r5or6_blocks_per_row);
2157
2158 first_column = r5or6_first_row_offset / strip_size;
2159 r5or6_first_column = first_column;
2160 r5or6_last_column = r5or6_last_row_offset / strip_size;
2161#endif
2162 if (r5or6_first_column != r5or6_last_column)
2163 return PQI_RAID_BYPASS_INELIGIBLE;
2164
2165 /* Request is eligible */
2166 map_row =
2167 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2168 get_unaligned_le16(&raid_map->row_cnt);
2169
2170 map_index = (first_group *
2171 (get_unaligned_le16(&raid_map->row_cnt) *
2172 total_disks_per_row)) +
2173 (map_row * total_disks_per_row) + first_column;
2174 }
2175
2176 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2177 return PQI_RAID_BYPASS_INELIGIBLE;
2178
2179 aio_handle = raid_map->disk_data[map_index].aio_handle;
2180 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2181 first_row * strip_size +
2182 (first_row_offset - first_column * strip_size);
2183 disk_block_cnt = block_cnt;
2184
2185 /* Handle differing logical/physical block sizes. */
2186 if (raid_map->phys_blk_shift) {
2187 disk_block <<= raid_map->phys_blk_shift;
2188 disk_block_cnt <<= raid_map->phys_blk_shift;
2189 }
2190
2191 if (unlikely(disk_block_cnt > 0xffff))
2192 return PQI_RAID_BYPASS_INELIGIBLE;
2193
2194 /* Build the new CDB for the physical disk I/O. */
2195 if (disk_block > 0xffffffff) {
2196 cdb[0] = is_write ? WRITE_16 : READ_16;
2197 cdb[1] = 0;
2198 put_unaligned_be64(disk_block, &cdb[2]);
2199 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2200 cdb[14] = 0;
2201 cdb[15] = 0;
2202 cdb_length = 16;
2203 } else {
2204 cdb[0] = is_write ? WRITE_10 : READ_10;
2205 cdb[1] = 0;
2206 put_unaligned_be32((u32)disk_block, &cdb[2]);
2207 cdb[6] = 0;
2208 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2209 cdb[9] = 0;
2210 cdb_length = 10;
2211 }
2212
2213 if (get_unaligned_le16(&raid_map->flags) &
2214 RAID_MAP_ENCRYPTION_ENABLED) {
2215 pqi_set_encryption_info(&encryption_info, raid_map,
2216 first_block);
2217 encryption_info_ptr = &encryption_info;
2218 } else {
2219 encryption_info_ptr = NULL;
2220 }
2221
2222 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2223 cdb, cdb_length, queue_group, encryption_info_ptr);
2224}
2225
2226#define PQI_STATUS_IDLE 0x0
2227
2228#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2229#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2230
2231#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2232#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2233#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2234#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2235#define PQI_DEVICE_STATE_ERROR 0x4
2236
2237#define PQI_MODE_READY_TIMEOUT_SECS 30
2238#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2239
2240static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2241{
2242 struct pqi_device_registers __iomem *pqi_registers;
2243 unsigned long timeout;
2244 u64 signature;
2245 u8 status;
2246
2247 pqi_registers = ctrl_info->pqi_registers;
2248 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2249
2250 while (1) {
2251 signature = readq(&pqi_registers->signature);
2252 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2253 sizeof(signature)) == 0)
2254 break;
2255 if (time_after(jiffies, timeout)) {
2256 dev_err(&ctrl_info->pci_dev->dev,
2257 "timed out waiting for PQI signature\n");
2258 return -ETIMEDOUT;
2259 }
2260 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2261 }
2262
2263 while (1) {
2264 status = readb(&pqi_registers->function_and_status_code);
2265 if (status == PQI_STATUS_IDLE)
2266 break;
2267 if (time_after(jiffies, timeout)) {
2268 dev_err(&ctrl_info->pci_dev->dev,
2269 "timed out waiting for PQI IDLE\n");
2270 return -ETIMEDOUT;
2271 }
2272 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2273 }
2274
2275 while (1) {
2276 if (readl(&pqi_registers->device_status) ==
2277 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2278 break;
2279 if (time_after(jiffies, timeout)) {
2280 dev_err(&ctrl_info->pci_dev->dev,
2281 "timed out waiting for PQI all registers ready\n");
2282 return -ETIMEDOUT;
2283 }
2284 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2285 }
2286
2287 return 0;
2288}
2289
2290static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2291{
2292 struct pqi_scsi_dev *device;
2293
2294 device = io_request->scmd->device->hostdata;
2295 device->offload_enabled = false;
2296}
2297
2298static inline void pqi_take_device_offline(struct scsi_device *sdev)
2299{
2300 struct pqi_ctrl_info *ctrl_info;
2301
2302 if (scsi_device_online(sdev)) {
2303 scsi_device_set_state(sdev, SDEV_OFFLINE);
2304 ctrl_info = shost_to_hba(sdev->host);
2305 schedule_delayed_work(&ctrl_info->rescan_work, 0);
2306 }
2307}
2308
2309static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2310{
2311 u8 scsi_status;
2312 u8 host_byte;
2313 struct scsi_cmnd *scmd;
2314 struct pqi_raid_error_info *error_info;
2315 size_t sense_data_length;
2316 int residual_count;
2317 int xfer_count;
2318 struct scsi_sense_hdr sshdr;
2319
2320 scmd = io_request->scmd;
2321 if (!scmd)
2322 return;
2323
2324 error_info = io_request->error_info;
2325 scsi_status = error_info->status;
2326 host_byte = DID_OK;
2327
2328 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2329 xfer_count =
2330 get_unaligned_le32(&error_info->data_out_transferred);
2331 residual_count = scsi_bufflen(scmd) - xfer_count;
2332 scsi_set_resid(scmd, residual_count);
2333 if (xfer_count < scmd->underflow)
2334 host_byte = DID_SOFT_ERROR;
2335 }
2336
2337 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2338 if (sense_data_length == 0)
2339 sense_data_length =
2340 get_unaligned_le16(&error_info->response_data_length);
2341 if (sense_data_length) {
2342 if (sense_data_length > sizeof(error_info->data))
2343 sense_data_length = sizeof(error_info->data);
2344
2345 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2346 scsi_normalize_sense(error_info->data,
2347 sense_data_length, &sshdr) &&
2348 sshdr.sense_key == HARDWARE_ERROR &&
2349 sshdr.asc == 0x3e &&
2350 sshdr.ascq == 0x1) {
2351 pqi_take_device_offline(scmd->device);
2352 host_byte = DID_NO_CONNECT;
2353 }
2354
2355 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2356 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2357 memcpy(scmd->sense_buffer, error_info->data,
2358 sense_data_length);
2359 }
2360
2361 scmd->result = scsi_status;
2362 set_host_byte(scmd, host_byte);
2363}
2364
2365static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2366{
2367 u8 scsi_status;
2368 u8 host_byte;
2369 struct scsi_cmnd *scmd;
2370 struct pqi_aio_error_info *error_info;
2371 size_t sense_data_length;
2372 int residual_count;
2373 int xfer_count;
2374 bool device_offline;
2375
2376 scmd = io_request->scmd;
2377 error_info = io_request->error_info;
2378 host_byte = DID_OK;
2379 sense_data_length = 0;
2380 device_offline = false;
2381
2382 switch (error_info->service_response) {
2383 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2384 scsi_status = error_info->status;
2385 break;
2386 case PQI_AIO_SERV_RESPONSE_FAILURE:
2387 switch (error_info->status) {
2388 case PQI_AIO_STATUS_IO_ABORTED:
2389 scsi_status = SAM_STAT_TASK_ABORTED;
2390 break;
2391 case PQI_AIO_STATUS_UNDERRUN:
2392 scsi_status = SAM_STAT_GOOD;
2393 residual_count = get_unaligned_le32(
2394 &error_info->residual_count);
2395 scsi_set_resid(scmd, residual_count);
2396 xfer_count = scsi_bufflen(scmd) - residual_count;
2397 if (xfer_count < scmd->underflow)
2398 host_byte = DID_SOFT_ERROR;
2399 break;
2400 case PQI_AIO_STATUS_OVERRUN:
2401 scsi_status = SAM_STAT_GOOD;
2402 break;
2403 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2404 pqi_aio_path_disabled(io_request);
2405 scsi_status = SAM_STAT_GOOD;
2406 io_request->status = -EAGAIN;
2407 break;
2408 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2409 case PQI_AIO_STATUS_INVALID_DEVICE:
2410 device_offline = true;
2411 pqi_take_device_offline(scmd->device);
2412 host_byte = DID_NO_CONNECT;
2413 scsi_status = SAM_STAT_CHECK_CONDITION;
2414 break;
2415 case PQI_AIO_STATUS_IO_ERROR:
2416 default:
2417 scsi_status = SAM_STAT_CHECK_CONDITION;
2418 break;
2419 }
2420 break;
2421 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2422 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2423 scsi_status = SAM_STAT_GOOD;
2424 break;
2425 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2426 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2427 default:
2428 scsi_status = SAM_STAT_CHECK_CONDITION;
2429 break;
2430 }
2431
2432 if (error_info->data_present) {
2433 sense_data_length =
2434 get_unaligned_le16(&error_info->data_length);
2435 if (sense_data_length) {
2436 if (sense_data_length > sizeof(error_info->data))
2437 sense_data_length = sizeof(error_info->data);
2438 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2439 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2440 memcpy(scmd->sense_buffer, error_info->data,
2441 sense_data_length);
2442 }
2443 }
2444
2445 if (device_offline && sense_data_length == 0)
2446 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2447 0x3e, 0x1);
2448
2449 scmd->result = scsi_status;
2450 set_host_byte(scmd, host_byte);
2451}
2452
2453static void pqi_process_io_error(unsigned int iu_type,
2454 struct pqi_io_request *io_request)
2455{
2456 switch (iu_type) {
2457 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2458 pqi_process_raid_io_error(io_request);
2459 break;
2460 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2461 pqi_process_aio_io_error(io_request);
2462 break;
2463 }
2464}
2465
2466static int pqi_interpret_task_management_response(
2467 struct pqi_task_management_response *response)
2468{
2469 int rc;
2470
2471 switch (response->response_code) {
2472 case SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE:
2473 case SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED:
2474 rc = 0;
2475 break;
2476 default:
2477 rc = -EIO;
2478 break;
2479 }
2480
2481 return rc;
2482}
2483
2484static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2485 struct pqi_queue_group *queue_group)
2486{
2487 unsigned int num_responses;
2488 pqi_index_t oq_pi;
2489 pqi_index_t oq_ci;
2490 struct pqi_io_request *io_request;
2491 struct pqi_io_response *response;
2492 u16 request_id;
2493
2494 num_responses = 0;
2495 oq_ci = queue_group->oq_ci_copy;
2496
2497 while (1) {
2498 oq_pi = *queue_group->oq_pi;
2499 if (oq_pi == oq_ci)
2500 break;
2501
2502 num_responses++;
2503 response = queue_group->oq_element_array +
2504 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2505
2506 request_id = get_unaligned_le16(&response->request_id);
2507 WARN_ON(request_id >= ctrl_info->max_io_slots);
2508
2509 io_request = &ctrl_info->io_request_pool[request_id];
2510 WARN_ON(atomic_read(&io_request->refcount) == 0);
2511
2512 switch (response->header.iu_type) {
2513 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2514 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2515 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2516 break;
2517 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2518 io_request->status =
2519 pqi_interpret_task_management_response(
2520 (void *)response);
2521 break;
2522 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2523 pqi_aio_path_disabled(io_request);
2524 io_request->status = -EAGAIN;
2525 break;
2526 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2527 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2528 io_request->error_info = ctrl_info->error_buffer +
2529 (get_unaligned_le16(&response->error_index) *
2530 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2531 pqi_process_io_error(response->header.iu_type,
2532 io_request);
2533 break;
2534 default:
2535 dev_err(&ctrl_info->pci_dev->dev,
2536 "unexpected IU type: 0x%x\n",
2537 response->header.iu_type);
2538 WARN_ON(response->header.iu_type);
2539 break;
2540 }
2541
2542 io_request->io_complete_callback(io_request,
2543 io_request->context);
2544
2545 /*
2546 * Note that the I/O request structure CANNOT BE TOUCHED after
2547 * returning from the I/O completion callback!
2548 */
2549
2550 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2551 }
2552
2553 if (num_responses) {
2554 queue_group->oq_ci_copy = oq_ci;
2555 writel(oq_ci, queue_group->oq_ci);
2556 }
2557
2558 return num_responses;
2559}
2560
2561static inline unsigned int pqi_num_elements_free(unsigned int pi,
2562 unsigned int ci,
2563 unsigned int elements_in_queue)
2564{
2565 unsigned int num_elements_used;
2566
2567 if (pi >= ci)
2568 num_elements_used = pi - ci;
2569 else
2570 num_elements_used = elements_in_queue - ci + pi;
2571
2572 return elements_in_queue - num_elements_used - 1;
2573}
2574
2575#define PQI_EVENT_ACK_TIMEOUT 30
2576
2577static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2578 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2579{
2580 pqi_index_t iq_pi;
2581 pqi_index_t iq_ci;
2582 unsigned long flags;
2583 void *next_element;
2584 unsigned long timeout;
2585 struct pqi_queue_group *queue_group;
2586
2587 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2588 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2589
2590 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2591
2592 while (1) {
2593 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2594
2595 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2596 iq_ci = *queue_group->iq_ci[RAID_PATH];
2597
2598 if (pqi_num_elements_free(iq_pi, iq_ci,
2599 ctrl_info->num_elements_per_iq))
2600 break;
2601
2602 spin_unlock_irqrestore(
2603 &queue_group->submit_lock[RAID_PATH], flags);
2604
2605 if (time_after(jiffies, timeout)) {
2606 dev_err(&ctrl_info->pci_dev->dev,
2607 "sending event acknowledge timed out\n");
2608 return;
2609 }
2610 }
2611
2612 next_element = queue_group->iq_element_array[RAID_PATH] +
2613 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2614
2615 memcpy(next_element, iu, iu_length);
2616
2617 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2618
2619 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2620
2621 /*
2622 * This write notifies the controller that an IU is available to be
2623 * processed.
2624 */
2625 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2626
2627 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2628
2629 /*
2630 * We have to special-case this type of request because the firmware
2631 * does not generate an interrupt when this type of request completes.
2632 * Therefore, we have to poll until we see that the firmware has
2633 * consumed the request before we move on.
2634 */
2635
2636 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2637
2638 while (1) {
2639 if (*queue_group->iq_ci[RAID_PATH] == iq_pi)
2640 break;
2641 if (time_after(jiffies, timeout)) {
2642 dev_err(&ctrl_info->pci_dev->dev,
2643 "completing event acknowledge timed out\n");
2644 break;
2645 }
2646 usleep_range(1000, 2000);
2647 }
2648}
2649
2650static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2651 struct pqi_event *event)
2652{
2653 struct pqi_event_acknowledge_request request;
2654
2655 memset(&request, 0, sizeof(request));
2656
2657 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2658 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2659 &request.header.iu_length);
2660 request.event_type = event->event_type;
2661 request.event_id = event->event_id;
2662 request.additional_event_id = event->additional_event_id;
2663
2664 pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2665}
2666
2667static void pqi_event_worker(struct work_struct *work)
2668{
2669 unsigned int i;
2670 struct pqi_ctrl_info *ctrl_info;
2671 struct pqi_event *pending_event;
2672 bool got_non_heartbeat_event = false;
2673
2674 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2675
2676 pending_event = ctrl_info->pending_events;
2677 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2678 if (pending_event->pending) {
2679 pending_event->pending = false;
2680 pqi_acknowledge_event(ctrl_info, pending_event);
2681 if (i != PQI_EVENT_HEARTBEAT)
2682 got_non_heartbeat_event = true;
2683 }
2684 pending_event++;
2685 }
2686
2687 if (got_non_heartbeat_event)
2688 pqi_schedule_rescan_worker(ctrl_info);
2689}
2690
2691static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2692{
2693 unsigned int i;
2694 unsigned int path;
2695 struct pqi_queue_group *queue_group;
2696 unsigned long flags;
2697 struct pqi_io_request *io_request;
2698 struct pqi_io_request *next;
2699 struct scsi_cmnd *scmd;
2700
2701 ctrl_info->controller_online = false;
2702 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2703
2704 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2705 queue_group = &ctrl_info->queue_groups[i];
2706
2707 for (path = 0; path < 2; path++) {
2708 spin_lock_irqsave(
2709 &queue_group->submit_lock[path], flags);
2710
2711 list_for_each_entry_safe(io_request, next,
2712 &queue_group->request_list[path],
2713 request_list_entry) {
2714
2715 scmd = io_request->scmd;
2716 if (scmd) {
2717 set_host_byte(scmd, DID_NO_CONNECT);
2718 pqi_scsi_done(scmd);
2719 }
2720
2721 list_del(&io_request->request_list_entry);
2722 }
2723
2724 spin_unlock_irqrestore(
2725 &queue_group->submit_lock[path], flags);
2726 }
2727 }
2728}
2729
2730#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2731#define PQI_MAX_HEARTBEAT_REQUESTS 5
2732
2733static void pqi_heartbeat_timer_handler(unsigned long data)
2734{
2735 int num_interrupts;
2736 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2737
2738 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2739
2740 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2741 ctrl_info->num_heartbeats_requested++;
2742 if (ctrl_info->num_heartbeats_requested >
2743 PQI_MAX_HEARTBEAT_REQUESTS) {
2744 pqi_take_ctrl_offline(ctrl_info);
2745 return;
2746 }
2747 ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2748 schedule_work(&ctrl_info->event_work);
2749 } else {
2750 ctrl_info->num_heartbeats_requested = 0;
2751 }
2752
2753 ctrl_info->previous_num_interrupts = num_interrupts;
2754 mod_timer(&ctrl_info->heartbeat_timer,
2755 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2756}
2757
2758static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2759{
2760 ctrl_info->previous_num_interrupts =
2761 atomic_read(&ctrl_info->num_interrupts);
2762
2763 init_timer(&ctrl_info->heartbeat_timer);
2764 ctrl_info->heartbeat_timer.expires =
2765 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2766 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2767 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2768 add_timer(&ctrl_info->heartbeat_timer);
2769 ctrl_info->heartbeat_timer_started = true;
2770}
2771
2772static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2773{
2774 if (ctrl_info->heartbeat_timer_started)
2775 del_timer_sync(&ctrl_info->heartbeat_timer);
2776}
2777
2778static int pqi_event_type_to_event_index(unsigned int event_type)
2779{
2780 int index;
2781
2782 switch (event_type) {
2783 case PQI_EVENT_TYPE_HEARTBEAT:
2784 index = PQI_EVENT_HEARTBEAT;
2785 break;
2786 case PQI_EVENT_TYPE_HOTPLUG:
2787 index = PQI_EVENT_HOTPLUG;
2788 break;
2789 case PQI_EVENT_TYPE_HARDWARE:
2790 index = PQI_EVENT_HARDWARE;
2791 break;
2792 case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2793 index = PQI_EVENT_PHYSICAL_DEVICE;
2794 break;
2795 case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2796 index = PQI_EVENT_LOGICAL_DEVICE;
2797 break;
2798 case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2799 index = PQI_EVENT_AIO_STATE_CHANGE;
2800 break;
2801 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2802 index = PQI_EVENT_AIO_CONFIG_CHANGE;
2803 break;
2804 default:
2805 index = -1;
2806 break;
2807 }
2808
2809 return index;
2810}
2811
2812static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2813{
2814 unsigned int num_events;
2815 pqi_index_t oq_pi;
2816 pqi_index_t oq_ci;
2817 struct pqi_event_queue *event_queue;
2818 struct pqi_event_response *response;
2819 struct pqi_event *pending_event;
2820 bool need_delayed_work;
2821 int event_index;
2822
2823 event_queue = &ctrl_info->event_queue;
2824 num_events = 0;
2825 need_delayed_work = false;
2826 oq_ci = event_queue->oq_ci_copy;
2827
2828 while (1) {
2829 oq_pi = *event_queue->oq_pi;
2830 if (oq_pi == oq_ci)
2831 break;
2832
2833 num_events++;
2834 response = event_queue->oq_element_array +
2835 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2836
2837 event_index =
2838 pqi_event_type_to_event_index(response->event_type);
2839
2840 if (event_index >= 0) {
2841 if (response->request_acknowlege) {
2842 pending_event =
2843 &ctrl_info->pending_events[event_index];
2844 pending_event->event_type =
2845 response->event_type;
2846 pending_event->event_id = response->event_id;
2847 pending_event->additional_event_id =
2848 response->additional_event_id;
2849 if (event_index != PQI_EVENT_HEARTBEAT) {
2850 pending_event->pending = true;
2851 need_delayed_work = true;
2852 }
2853 }
2854 }
2855
2856 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2857 }
2858
2859 if (num_events) {
2860 event_queue->oq_ci_copy = oq_ci;
2861 writel(oq_ci, event_queue->oq_ci);
2862
2863 if (need_delayed_work)
2864 schedule_work(&ctrl_info->event_work);
2865 }
2866
2867 return num_events;
2868}
2869
2870static irqreturn_t pqi_irq_handler(int irq, void *data)
2871{
2872 struct pqi_ctrl_info *ctrl_info;
2873 struct pqi_queue_group *queue_group;
2874 unsigned int num_responses_handled;
2875
2876 queue_group = data;
2877 ctrl_info = queue_group->ctrl_info;
2878
2879 if (!ctrl_info || !queue_group->oq_ci)
2880 return IRQ_NONE;
2881
2882 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2883
2884 if (irq == ctrl_info->event_irq)
2885 num_responses_handled += pqi_process_event_intr(ctrl_info);
2886
2887 if (num_responses_handled)
2888 atomic_inc(&ctrl_info->num_interrupts);
2889
2890 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2891 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2892
2893 return IRQ_HANDLED;
2894}
2895
2896static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2897{
2898 int i;
2899 int rc;
2900
2901 ctrl_info->event_irq = ctrl_info->msix_vectors[0];
2902
2903 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2904 rc = request_irq(ctrl_info->msix_vectors[i],
2905 pqi_irq_handler, 0,
2906 DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
2907 if (rc) {
2908 dev_err(&ctrl_info->pci_dev->dev,
2909 "irq %u init failed with error %d\n",
2910 ctrl_info->msix_vectors[i], rc);
2911 return rc;
2912 }
2913 ctrl_info->num_msix_vectors_initialized++;
2914 }
2915
2916 return 0;
2917}
2918
2919static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2920{
2921 int i;
2922
2923 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2924 free_irq(ctrl_info->msix_vectors[i],
2925 ctrl_info->intr_data[i]);
2926}
2927
2928static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2929{
2930 unsigned int i;
2931 int max_vectors;
2932 int num_vectors_enabled;
2933 struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
2934
2935 max_vectors = ctrl_info->num_queue_groups;
2936
2937 for (i = 0; i < max_vectors; i++)
2938 msix_entries[i].entry = i;
2939
2940 num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
2941 msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
2942
2943 if (num_vectors_enabled < 0) {
2944 dev_err(&ctrl_info->pci_dev->dev,
2945 "MSI-X init failed with error %d\n",
2946 num_vectors_enabled);
2947 return num_vectors_enabled;
2948 }
2949
2950 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
2951 for (i = 0; i < num_vectors_enabled; i++) {
2952 ctrl_info->msix_vectors[i] = msix_entries[i].vector;
2953 ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
2954 }
2955
2956 return 0;
2957}
2958
2959static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2960{
2961 int i;
2962 int rc;
2963 int cpu;
2964
2965 cpu = cpumask_first(cpu_online_mask);
2966 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
2967 rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
2968 get_cpu_mask(cpu));
2969 if (rc)
2970 dev_err(&ctrl_info->pci_dev->dev,
2971 "error %d setting affinity hint for irq vector %u\n",
2972 rc, ctrl_info->msix_vectors[i]);
2973 cpu = cpumask_next(cpu, cpu_online_mask);
2974 }
2975}
2976
2977static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2978{
2979 int i;
2980
2981 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2982 irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
2983}
2984
2985static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2986{
2987 unsigned int i;
2988 size_t alloc_length;
2989 size_t element_array_length_per_iq;
2990 size_t element_array_length_per_oq;
2991 void *element_array;
2992 void *next_queue_index;
2993 void *aligned_pointer;
2994 unsigned int num_inbound_queues;
2995 unsigned int num_outbound_queues;
2996 unsigned int num_queue_indexes;
2997 struct pqi_queue_group *queue_group;
2998
2999 element_array_length_per_iq =
3000 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3001 ctrl_info->num_elements_per_iq;
3002 element_array_length_per_oq =
3003 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3004 ctrl_info->num_elements_per_oq;
3005 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3006 num_outbound_queues = ctrl_info->num_queue_groups;
3007 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3008
3009 aligned_pointer = NULL;
3010
3011 for (i = 0; i < num_inbound_queues; i++) {
3012 aligned_pointer = PTR_ALIGN(aligned_pointer,
3013 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3014 aligned_pointer += element_array_length_per_iq;
3015 }
3016
3017 for (i = 0; i < num_outbound_queues; i++) {
3018 aligned_pointer = PTR_ALIGN(aligned_pointer,
3019 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3020 aligned_pointer += element_array_length_per_oq;
3021 }
3022
3023 aligned_pointer = PTR_ALIGN(aligned_pointer,
3024 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3025 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3026 PQI_EVENT_OQ_ELEMENT_LENGTH;
3027
3028 for (i = 0; i < num_queue_indexes; i++) {
3029 aligned_pointer = PTR_ALIGN(aligned_pointer,
3030 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3031 aligned_pointer += sizeof(pqi_index_t);
3032 }
3033
3034 alloc_length = (size_t)aligned_pointer +
3035 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3036
3037 ctrl_info->queue_memory_base =
3038 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3039 alloc_length,
3040 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3041
3042 if (!ctrl_info->queue_memory_base) {
3043 dev_err(&ctrl_info->pci_dev->dev,
3044 "failed to allocate memory for PQI admin queues\n");
3045 return -ENOMEM;
3046 }
3047
3048 ctrl_info->queue_memory_length = alloc_length;
3049
3050 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3051 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3052
3053 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3054 queue_group = &ctrl_info->queue_groups[i];
3055 queue_group->iq_element_array[RAID_PATH] = element_array;
3056 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3057 ctrl_info->queue_memory_base_dma_handle +
3058 (element_array - ctrl_info->queue_memory_base);
3059 element_array += element_array_length_per_iq;
3060 element_array = PTR_ALIGN(element_array,
3061 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3062 queue_group->iq_element_array[AIO_PATH] = element_array;
3063 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3064 ctrl_info->queue_memory_base_dma_handle +
3065 (element_array - ctrl_info->queue_memory_base);
3066 element_array += element_array_length_per_iq;
3067 element_array = PTR_ALIGN(element_array,
3068 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3069 }
3070
3071 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3072 queue_group = &ctrl_info->queue_groups[i];
3073 queue_group->oq_element_array = element_array;
3074 queue_group->oq_element_array_bus_addr =
3075 ctrl_info->queue_memory_base_dma_handle +
3076 (element_array - ctrl_info->queue_memory_base);
3077 element_array += element_array_length_per_oq;
3078 element_array = PTR_ALIGN(element_array,
3079 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3080 }
3081
3082 ctrl_info->event_queue.oq_element_array = element_array;
3083 ctrl_info->event_queue.oq_element_array_bus_addr =
3084 ctrl_info->queue_memory_base_dma_handle +
3085 (element_array - ctrl_info->queue_memory_base);
3086 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3087 PQI_EVENT_OQ_ELEMENT_LENGTH;
3088
3089 next_queue_index = PTR_ALIGN(element_array,
3090 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3091
3092 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3093 queue_group = &ctrl_info->queue_groups[i];
3094 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3095 queue_group->iq_ci_bus_addr[RAID_PATH] =
3096 ctrl_info->queue_memory_base_dma_handle +
3097 (next_queue_index - ctrl_info->queue_memory_base);
3098 next_queue_index += sizeof(pqi_index_t);
3099 next_queue_index = PTR_ALIGN(next_queue_index,
3100 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3101 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3102 queue_group->iq_ci_bus_addr[AIO_PATH] =
3103 ctrl_info->queue_memory_base_dma_handle +
3104 (next_queue_index - ctrl_info->queue_memory_base);
3105 next_queue_index += sizeof(pqi_index_t);
3106 next_queue_index = PTR_ALIGN(next_queue_index,
3107 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3108 queue_group->oq_pi = next_queue_index;
3109 queue_group->oq_pi_bus_addr =
3110 ctrl_info->queue_memory_base_dma_handle +
3111 (next_queue_index - ctrl_info->queue_memory_base);
3112 next_queue_index += sizeof(pqi_index_t);
3113 next_queue_index = PTR_ALIGN(next_queue_index,
3114 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3115 }
3116
3117 ctrl_info->event_queue.oq_pi = next_queue_index;
3118 ctrl_info->event_queue.oq_pi_bus_addr =
3119 ctrl_info->queue_memory_base_dma_handle +
3120 (next_queue_index - ctrl_info->queue_memory_base);
3121
3122 return 0;
3123}
3124
3125static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3126{
3127 unsigned int i;
3128 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3129 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3130
3131 /*
3132 * Initialize the backpointers to the controller structure in
3133 * each operational queue group structure.
3134 */
3135 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3136 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3137
3138 /*
3139 * Assign IDs to all operational queues. Note that the IDs
3140 * assigned to operational IQs are independent of the IDs
3141 * assigned to operational OQs.
3142 */
3143 ctrl_info->event_queue.oq_id = next_oq_id++;
3144 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3145 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3146 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3147 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3148 }
3149
3150 /*
3151 * Assign MSI-X table entry indexes to all queues. Note that the
3152 * interrupt for the event queue is shared with the first queue group.
3153 */
3154 ctrl_info->event_queue.int_msg_num = 0;
3155 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3156 ctrl_info->queue_groups[i].int_msg_num = i;
3157
3158 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3159 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3160 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3161 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3162 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3163 }
3164}
3165
3166static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3167{
3168 size_t alloc_length;
3169 struct pqi_admin_queues_aligned *admin_queues_aligned;
3170 struct pqi_admin_queues *admin_queues;
3171
3172 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3173 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3174
3175 ctrl_info->admin_queue_memory_base =
3176 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3177 alloc_length,
3178 &ctrl_info->admin_queue_memory_base_dma_handle,
3179 GFP_KERNEL);
3180
3181 if (!ctrl_info->admin_queue_memory_base)
3182 return -ENOMEM;
3183
3184 ctrl_info->admin_queue_memory_length = alloc_length;
3185
3186 admin_queues = &ctrl_info->admin_queues;
3187 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3188 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3189 admin_queues->iq_element_array =
3190 &admin_queues_aligned->iq_element_array;
3191 admin_queues->oq_element_array =
3192 &admin_queues_aligned->oq_element_array;
3193 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3194 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3195
3196 admin_queues->iq_element_array_bus_addr =
3197 ctrl_info->admin_queue_memory_base_dma_handle +
3198 (admin_queues->iq_element_array -
3199 ctrl_info->admin_queue_memory_base);
3200 admin_queues->oq_element_array_bus_addr =
3201 ctrl_info->admin_queue_memory_base_dma_handle +
3202 (admin_queues->oq_element_array -
3203 ctrl_info->admin_queue_memory_base);
3204 admin_queues->iq_ci_bus_addr =
3205 ctrl_info->admin_queue_memory_base_dma_handle +
3206 ((void *)admin_queues->iq_ci -
3207 ctrl_info->admin_queue_memory_base);
3208 admin_queues->oq_pi_bus_addr =
3209 ctrl_info->admin_queue_memory_base_dma_handle +
3210 ((void *)admin_queues->oq_pi -
3211 ctrl_info->admin_queue_memory_base);
3212
3213 return 0;
3214}
3215
3216#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3217#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3218
3219static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3220{
3221 struct pqi_device_registers __iomem *pqi_registers;
3222 struct pqi_admin_queues *admin_queues;
3223 unsigned long timeout;
3224 u8 status;
3225 u32 reg;
3226
3227 pqi_registers = ctrl_info->pqi_registers;
3228 admin_queues = &ctrl_info->admin_queues;
3229
3230 writeq((u64)admin_queues->iq_element_array_bus_addr,
3231 &pqi_registers->admin_iq_element_array_addr);
3232 writeq((u64)admin_queues->oq_element_array_bus_addr,
3233 &pqi_registers->admin_oq_element_array_addr);
3234 writeq((u64)admin_queues->iq_ci_bus_addr,
3235 &pqi_registers->admin_iq_ci_addr);
3236 writeq((u64)admin_queues->oq_pi_bus_addr,
3237 &pqi_registers->admin_oq_pi_addr);
3238
3239 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3240 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3241 (admin_queues->int_msg_num << 16);
3242 writel(reg, &pqi_registers->admin_iq_num_elements);
3243 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3244 &pqi_registers->function_and_status_code);
3245
3246 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3247 while (1) {
3248 status = readb(&pqi_registers->function_and_status_code);
3249 if (status == PQI_STATUS_IDLE)
3250 break;
3251 if (time_after(jiffies, timeout))
3252 return -ETIMEDOUT;
3253 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3254 }
3255
3256 /*
3257 * The offset registers are not initialized to the correct
3258 * offsets until *after* the create admin queue pair command
3259 * completes successfully.
3260 */
3261 admin_queues->iq_pi = ctrl_info->iomem_base +
3262 PQI_DEVICE_REGISTERS_OFFSET +
3263 readq(&pqi_registers->admin_iq_pi_offset);
3264 admin_queues->oq_ci = ctrl_info->iomem_base +
3265 PQI_DEVICE_REGISTERS_OFFSET +
3266 readq(&pqi_registers->admin_oq_ci_offset);
3267
3268 return 0;
3269}
3270
3271static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3272 struct pqi_general_admin_request *request)
3273{
3274 struct pqi_admin_queues *admin_queues;
3275 void *next_element;
3276 pqi_index_t iq_pi;
3277
3278 admin_queues = &ctrl_info->admin_queues;
3279 iq_pi = admin_queues->iq_pi_copy;
3280
3281 next_element = admin_queues->iq_element_array +
3282 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3283
3284 memcpy(next_element, request, sizeof(*request));
3285
3286 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3287 admin_queues->iq_pi_copy = iq_pi;
3288
3289 /*
3290 * This write notifies the controller that an IU is available to be
3291 * processed.
3292 */
3293 writel(iq_pi, admin_queues->iq_pi);
3294}
3295
3296static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3297 struct pqi_general_admin_response *response)
3298{
3299 struct pqi_admin_queues *admin_queues;
3300 pqi_index_t oq_pi;
3301 pqi_index_t oq_ci;
3302 unsigned long timeout;
3303
3304 admin_queues = &ctrl_info->admin_queues;
3305 oq_ci = admin_queues->oq_ci_copy;
3306
3307 timeout = (3 * HZ) + jiffies;
3308
3309 while (1) {
3310 oq_pi = *admin_queues->oq_pi;
3311 if (oq_pi != oq_ci)
3312 break;
3313 if (time_after(jiffies, timeout)) {
3314 dev_err(&ctrl_info->pci_dev->dev,
3315 "timed out waiting for admin response\n");
3316 return -ETIMEDOUT;
3317 }
3318 usleep_range(1000, 2000);
3319 }
3320
3321 memcpy(response, admin_queues->oq_element_array +
3322 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3323
3324 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3325 admin_queues->oq_ci_copy = oq_ci;
3326 writel(oq_ci, admin_queues->oq_ci);
3327
3328 return 0;
3329}
3330
3331static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3332 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3333 struct pqi_io_request *io_request)
3334{
3335 struct pqi_io_request *next;
3336 void *next_element;
3337 pqi_index_t iq_pi;
3338 pqi_index_t iq_ci;
3339 size_t iu_length;
3340 unsigned long flags;
3341 unsigned int num_elements_needed;
3342 unsigned int num_elements_to_end_of_queue;
3343 size_t copy_count;
3344 struct pqi_iu_header *request;
3345
3346 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3347
3348 if (io_request)
3349 list_add_tail(&io_request->request_list_entry,
3350 &queue_group->request_list[path]);
3351
3352 iq_pi = queue_group->iq_pi_copy[path];
3353
3354 list_for_each_entry_safe(io_request, next,
3355 &queue_group->request_list[path], request_list_entry) {
3356
3357 request = io_request->iu;
3358
3359 iu_length = get_unaligned_le16(&request->iu_length) +
3360 PQI_REQUEST_HEADER_LENGTH;
3361 num_elements_needed =
3362 DIV_ROUND_UP(iu_length,
3363 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3364
3365 iq_ci = *queue_group->iq_ci[path];
3366
3367 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3368 ctrl_info->num_elements_per_iq))
3369 break;
3370
3371 put_unaligned_le16(queue_group->oq_id,
3372 &request->response_queue_id);
3373
3374 next_element = queue_group->iq_element_array[path] +
3375 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3376
3377 num_elements_to_end_of_queue =
3378 ctrl_info->num_elements_per_iq - iq_pi;
3379
3380 if (num_elements_needed <= num_elements_to_end_of_queue) {
3381 memcpy(next_element, request, iu_length);
3382 } else {
3383 copy_count = num_elements_to_end_of_queue *
3384 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3385 memcpy(next_element, request, copy_count);
3386 memcpy(queue_group->iq_element_array[path],
3387 (u8 *)request + copy_count,
3388 iu_length - copy_count);
3389 }
3390
3391 iq_pi = (iq_pi + num_elements_needed) %
3392 ctrl_info->num_elements_per_iq;
3393
3394 list_del(&io_request->request_list_entry);
3395 }
3396
3397 if (iq_pi != queue_group->iq_pi_copy[path]) {
3398 queue_group->iq_pi_copy[path] = iq_pi;
3399 /*
3400 * This write notifies the controller that one or more IUs are
3401 * available to be processed.
3402 */
3403 writel(iq_pi, queue_group->iq_pi[path]);
3404 }
3405
3406 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3407}
3408
3409static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3410 void *context)
3411{
3412 struct completion *waiting = context;
3413
3414 complete(waiting);
3415}
3416
3417static int pqi_submit_raid_request_synchronous_with_io_request(
3418 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3419 unsigned long timeout_msecs)
3420{
3421 int rc = 0;
3422 DECLARE_COMPLETION_ONSTACK(wait);
3423
3424 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3425 io_request->context = &wait;
3426
3427 pqi_start_io(ctrl_info,
3428 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3429 io_request);
3430
3431 if (timeout_msecs == NO_TIMEOUT) {
3432 wait_for_completion_io(&wait);
3433 } else {
3434 if (!wait_for_completion_io_timeout(&wait,
3435 msecs_to_jiffies(timeout_msecs))) {
3436 dev_warn(&ctrl_info->pci_dev->dev,
3437 "command timed out\n");
3438 rc = -ETIMEDOUT;
3439 }
3440 }
3441
3442 return rc;
3443}
3444
3445static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3446 struct pqi_iu_header *request, unsigned int flags,
3447 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3448{
3449 int rc;
3450 struct pqi_io_request *io_request;
3451 unsigned long start_jiffies;
3452 unsigned long msecs_blocked;
3453 size_t iu_length;
3454
3455 /*
3456 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3457 * are mutually exclusive.
3458 */
3459
3460 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3461 if (down_interruptible(&ctrl_info->sync_request_sem))
3462 return -ERESTARTSYS;
3463 } else {
3464 if (timeout_msecs == NO_TIMEOUT) {
3465 down(&ctrl_info->sync_request_sem);
3466 } else {
3467 start_jiffies = jiffies;
3468 if (down_timeout(&ctrl_info->sync_request_sem,
3469 msecs_to_jiffies(timeout_msecs)))
3470 return -ETIMEDOUT;
3471 msecs_blocked =
3472 jiffies_to_msecs(jiffies - start_jiffies);
3473 if (msecs_blocked >= timeout_msecs)
3474 return -ETIMEDOUT;
3475 timeout_msecs -= msecs_blocked;
3476 }
3477 }
3478
3479 io_request = pqi_alloc_io_request(ctrl_info);
3480
3481 put_unaligned_le16(io_request->index,
3482 &(((struct pqi_raid_path_request *)request)->request_id));
3483
3484 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3485 ((struct pqi_raid_path_request *)request)->error_index =
3486 ((struct pqi_raid_path_request *)request)->request_id;
3487
3488 iu_length = get_unaligned_le16(&request->iu_length) +
3489 PQI_REQUEST_HEADER_LENGTH;
3490 memcpy(io_request->iu, request, iu_length);
3491
3492 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3493 io_request, timeout_msecs);
3494
3495 if (error_info) {
3496 if (io_request->error_info)
3497 memcpy(error_info, io_request->error_info,
3498 sizeof(*error_info));
3499 else
3500 memset(error_info, 0, sizeof(*error_info));
3501 } else if (rc == 0 && io_request->error_info) {
3502 u8 scsi_status;
3503 struct pqi_raid_error_info *raid_error_info;
3504
3505 raid_error_info = io_request->error_info;
3506 scsi_status = raid_error_info->status;
3507
3508 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3509 raid_error_info->data_out_result ==
3510 PQI_DATA_IN_OUT_UNDERFLOW)
3511 scsi_status = SAM_STAT_GOOD;
3512
3513 if (scsi_status != SAM_STAT_GOOD)
3514 rc = -EIO;
3515 }
3516
3517 pqi_free_io_request(io_request);
3518
3519 up(&ctrl_info->sync_request_sem);
3520
3521 return rc;
3522}
3523
3524static int pqi_validate_admin_response(
3525 struct pqi_general_admin_response *response, u8 expected_function_code)
3526{
3527 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3528 return -EINVAL;
3529
3530 if (get_unaligned_le16(&response->header.iu_length) !=
3531 PQI_GENERAL_ADMIN_IU_LENGTH)
3532 return -EINVAL;
3533
3534 if (response->function_code != expected_function_code)
3535 return -EINVAL;
3536
3537 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3538 return -EINVAL;
3539
3540 return 0;
3541}
3542
3543static int pqi_submit_admin_request_synchronous(
3544 struct pqi_ctrl_info *ctrl_info,
3545 struct pqi_general_admin_request *request,
3546 struct pqi_general_admin_response *response)
3547{
3548 int rc;
3549
3550 pqi_submit_admin_request(ctrl_info, request);
3551
3552 rc = pqi_poll_for_admin_response(ctrl_info, response);
3553
3554 if (rc == 0)
3555 rc = pqi_validate_admin_response(response,
3556 request->function_code);
3557
3558 return rc;
3559}
3560
3561static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3562{
3563 int rc;
3564 struct pqi_general_admin_request request;
3565 struct pqi_general_admin_response response;
3566 struct pqi_device_capability *capability;
3567 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3568
3569 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3570 if (!capability)
3571 return -ENOMEM;
3572
3573 memset(&request, 0, sizeof(request));
3574
3575 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3576 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3577 &request.header.iu_length);
3578 request.function_code =
3579 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3580 put_unaligned_le32(sizeof(*capability),
3581 &request.data.report_device_capability.buffer_length);
3582
3583 rc = pqi_map_single(ctrl_info->pci_dev,
3584 &request.data.report_device_capability.sg_descriptor,
3585 capability, sizeof(*capability),
3586 PCI_DMA_FROMDEVICE);
3587 if (rc)
3588 goto out;
3589
3590 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3591 &response);
3592
3593 pqi_pci_unmap(ctrl_info->pci_dev,
3594 &request.data.report_device_capability.sg_descriptor, 1,
3595 PCI_DMA_FROMDEVICE);
3596
3597 if (rc)
3598 goto out;
3599
3600 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3601 rc = -EIO;
3602 goto out;
3603 }
3604
3605 ctrl_info->max_inbound_queues =
3606 get_unaligned_le16(&capability->max_inbound_queues);
3607 ctrl_info->max_elements_per_iq =
3608 get_unaligned_le16(&capability->max_elements_per_iq);
3609 ctrl_info->max_iq_element_length =
3610 get_unaligned_le16(&capability->max_iq_element_length)
3611 * 16;
3612 ctrl_info->max_outbound_queues =
3613 get_unaligned_le16(&capability->max_outbound_queues);
3614 ctrl_info->max_elements_per_oq =
3615 get_unaligned_le16(&capability->max_elements_per_oq);
3616 ctrl_info->max_oq_element_length =
3617 get_unaligned_le16(&capability->max_oq_element_length)
3618 * 16;
3619
3620 sop_iu_layer_descriptor =
3621 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3622
3623 ctrl_info->max_inbound_iu_length_per_firmware =
3624 get_unaligned_le16(
3625 &sop_iu_layer_descriptor->max_inbound_iu_length);
3626 ctrl_info->inbound_spanning_supported =
3627 sop_iu_layer_descriptor->inbound_spanning_supported;
3628 ctrl_info->outbound_spanning_supported =
3629 sop_iu_layer_descriptor->outbound_spanning_supported;
3630
3631out:
3632 kfree(capability);
3633
3634 return rc;
3635}
3636
3637static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3638{
3639 if (ctrl_info->max_iq_element_length <
3640 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3641 dev_err(&ctrl_info->pci_dev->dev,
3642 "max. inbound queue element length of %d is less than the required length of %d\n",
3643 ctrl_info->max_iq_element_length,
3644 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3645 return -EINVAL;
3646 }
3647
3648 if (ctrl_info->max_oq_element_length <
3649 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3650 dev_err(&ctrl_info->pci_dev->dev,
3651 "max. outbound queue element length of %d is less than the required length of %d\n",
3652 ctrl_info->max_oq_element_length,
3653 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3654 return -EINVAL;
3655 }
3656
3657 if (ctrl_info->max_inbound_iu_length_per_firmware <
3658 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3659 dev_err(&ctrl_info->pci_dev->dev,
3660 "max. inbound IU length of %u is less than the min. required length of %d\n",
3661 ctrl_info->max_inbound_iu_length_per_firmware,
3662 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3663 return -EINVAL;
3664 }
3665
3666 return 0;
3667}
3668
3669static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3670 bool inbound_queue, u16 queue_id)
3671{
3672 struct pqi_general_admin_request request;
3673 struct pqi_general_admin_response response;
3674
3675 memset(&request, 0, sizeof(request));
3676 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3677 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3678 &request.header.iu_length);
3679 if (inbound_queue)
3680 request.function_code =
3681 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3682 else
3683 request.function_code =
3684 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3685 put_unaligned_le16(queue_id,
3686 &request.data.delete_operational_queue.queue_id);
3687
3688 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3689 &response);
3690}
3691
3692static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3693{
3694 int rc;
3695 struct pqi_event_queue *event_queue;
3696 struct pqi_general_admin_request request;
3697 struct pqi_general_admin_response response;
3698
3699 event_queue = &ctrl_info->event_queue;
3700
3701 /*
3702 * Create OQ (Outbound Queue - device to host queue) to dedicate
3703 * to events.
3704 */
3705 memset(&request, 0, sizeof(request));
3706 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3707 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3708 &request.header.iu_length);
3709 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3710 put_unaligned_le16(event_queue->oq_id,
3711 &request.data.create_operational_oq.queue_id);
3712 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3713 &request.data.create_operational_oq.element_array_addr);
3714 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3715 &request.data.create_operational_oq.pi_addr);
3716 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3717 &request.data.create_operational_oq.num_elements);
3718 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3719 &request.data.create_operational_oq.element_length);
3720 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3721 put_unaligned_le16(event_queue->int_msg_num,
3722 &request.data.create_operational_oq.int_msg_num);
3723
3724 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3725 &response);
3726 if (rc)
3727 return rc;
3728
3729 event_queue->oq_ci = ctrl_info->iomem_base +
3730 PQI_DEVICE_REGISTERS_OFFSET +
3731 get_unaligned_le64(
3732 &response.data.create_operational_oq.oq_ci_offset);
3733
3734 return 0;
3735}
3736
3737static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3738{
3739 unsigned int i;
3740 int rc;
3741 struct pqi_queue_group *queue_group;
3742 struct pqi_general_admin_request request;
3743 struct pqi_general_admin_response response;
3744
3745 i = ctrl_info->num_active_queue_groups;
3746 queue_group = &ctrl_info->queue_groups[i];
3747
3748 /*
3749 * Create IQ (Inbound Queue - host to device queue) for
3750 * RAID path.
3751 */
3752 memset(&request, 0, sizeof(request));
3753 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3754 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3755 &request.header.iu_length);
3756 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3757 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3758 &request.data.create_operational_iq.queue_id);
3759 put_unaligned_le64(
3760 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3761 &request.data.create_operational_iq.element_array_addr);
3762 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3763 &request.data.create_operational_iq.ci_addr);
3764 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3765 &request.data.create_operational_iq.num_elements);
3766 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3767 &request.data.create_operational_iq.element_length);
3768 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3769
3770 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3771 &response);
3772 if (rc) {
3773 dev_err(&ctrl_info->pci_dev->dev,
3774 "error creating inbound RAID queue\n");
3775 return rc;
3776 }
3777
3778 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3779 PQI_DEVICE_REGISTERS_OFFSET +
3780 get_unaligned_le64(
3781 &response.data.create_operational_iq.iq_pi_offset);
3782
3783 /*
3784 * Create IQ (Inbound Queue - host to device queue) for
3785 * Advanced I/O (AIO) path.
3786 */
3787 memset(&request, 0, sizeof(request));
3788 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3789 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3790 &request.header.iu_length);
3791 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3792 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3793 &request.data.create_operational_iq.queue_id);
3794 put_unaligned_le64((u64)queue_group->
3795 iq_element_array_bus_addr[AIO_PATH],
3796 &request.data.create_operational_iq.element_array_addr);
3797 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3798 &request.data.create_operational_iq.ci_addr);
3799 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3800 &request.data.create_operational_iq.num_elements);
3801 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3802 &request.data.create_operational_iq.element_length);
3803 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3804
3805 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3806 &response);
3807 if (rc) {
3808 dev_err(&ctrl_info->pci_dev->dev,
3809 "error creating inbound AIO queue\n");
3810 goto delete_inbound_queue_raid;
3811 }
3812
3813 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3814 PQI_DEVICE_REGISTERS_OFFSET +
3815 get_unaligned_le64(
3816 &response.data.create_operational_iq.iq_pi_offset);
3817
3818 /*
3819 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3820 * assumed to be for RAID path I/O unless we change the queue's
3821 * property.
3822 */
3823 memset(&request, 0, sizeof(request));
3824 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3825 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3826 &request.header.iu_length);
3827 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3828 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3829 &request.data.change_operational_iq_properties.queue_id);
3830 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3831 &request.data.change_operational_iq_properties.vendor_specific);
3832
3833 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3834 &response);
3835 if (rc) {
3836 dev_err(&ctrl_info->pci_dev->dev,
3837 "error changing queue property\n");
3838 goto delete_inbound_queue_aio;
3839 }
3840
3841 /*
3842 * Create OQ (Outbound Queue - device to host queue).
3843 */
3844 memset(&request, 0, sizeof(request));
3845 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3846 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3847 &request.header.iu_length);
3848 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3849 put_unaligned_le16(queue_group->oq_id,
3850 &request.data.create_operational_oq.queue_id);
3851 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3852 &request.data.create_operational_oq.element_array_addr);
3853 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3854 &request.data.create_operational_oq.pi_addr);
3855 put_unaligned_le16(ctrl_info->num_elements_per_oq,
3856 &request.data.create_operational_oq.num_elements);
3857 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3858 &request.data.create_operational_oq.element_length);
3859 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3860 put_unaligned_le16(queue_group->int_msg_num,
3861 &request.data.create_operational_oq.int_msg_num);
3862
3863 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3864 &response);
3865 if (rc) {
3866 dev_err(&ctrl_info->pci_dev->dev,
3867 "error creating outbound queue\n");
3868 goto delete_inbound_queue_aio;
3869 }
3870
3871 queue_group->oq_ci = ctrl_info->iomem_base +
3872 PQI_DEVICE_REGISTERS_OFFSET +
3873 get_unaligned_le64(
3874 &response.data.create_operational_oq.oq_ci_offset);
3875
3876 ctrl_info->num_active_queue_groups++;
3877
3878 return 0;
3879
3880delete_inbound_queue_aio:
3881 pqi_delete_operational_queue(ctrl_info, true,
3882 queue_group->iq_id[AIO_PATH]);
3883
3884delete_inbound_queue_raid:
3885 pqi_delete_operational_queue(ctrl_info, true,
3886 queue_group->iq_id[RAID_PATH]);
3887
3888 return rc;
3889}
3890
3891static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3892{
3893 int rc;
3894 unsigned int i;
3895
3896 rc = pqi_create_event_queue(ctrl_info);
3897 if (rc) {
3898 dev_err(&ctrl_info->pci_dev->dev,
3899 "error creating event queue\n");
3900 return rc;
3901 }
3902
3903 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3904 rc = pqi_create_queue_group(ctrl_info);
3905 if (rc) {
3906 dev_err(&ctrl_info->pci_dev->dev,
3907 "error creating queue group number %u/%u\n",
3908 i, ctrl_info->num_queue_groups);
3909 return rc;
3910 }
3911 }
3912
3913 return 0;
3914}
3915
3916#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3917 (offsetof(struct pqi_event_config, descriptors) + \
3918 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3919
3920static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3921{
3922 int rc;
3923 unsigned int i;
3924 struct pqi_event_config *event_config;
3925 struct pqi_general_management_request request;
3926
3927 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3928 GFP_KERNEL);
3929 if (!event_config)
3930 return -ENOMEM;
3931
3932 memset(&request, 0, sizeof(request));
3933
3934 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3935 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3936 data.report_event_configuration.sg_descriptors[1]) -
3937 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3938 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3939 &request.data.report_event_configuration.buffer_length);
3940
3941 rc = pqi_map_single(ctrl_info->pci_dev,
3942 request.data.report_event_configuration.sg_descriptors,
3943 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3944 PCI_DMA_FROMDEVICE);
3945 if (rc)
3946 goto out;
3947
3948 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3949 0, NULL, NO_TIMEOUT);
3950
3951 pqi_pci_unmap(ctrl_info->pci_dev,
3952 request.data.report_event_configuration.sg_descriptors, 1,
3953 PCI_DMA_FROMDEVICE);
3954
3955 if (rc)
3956 goto out;
3957
3958 for (i = 0; i < event_config->num_event_descriptors; i++)
3959 put_unaligned_le16(ctrl_info->event_queue.oq_id,
3960 &event_config->descriptors[i].oq_id);
3961
3962 memset(&request, 0, sizeof(request));
3963
3964 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3965 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3966 data.report_event_configuration.sg_descriptors[1]) -
3967 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3968 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3969 &request.data.report_event_configuration.buffer_length);
3970
3971 rc = pqi_map_single(ctrl_info->pci_dev,
3972 request.data.report_event_configuration.sg_descriptors,
3973 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3974 PCI_DMA_TODEVICE);
3975 if (rc)
3976 goto out;
3977
3978 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
3979 NULL, NO_TIMEOUT);
3980
3981 pqi_pci_unmap(ctrl_info->pci_dev,
3982 request.data.report_event_configuration.sg_descriptors, 1,
3983 PCI_DMA_TODEVICE);
3984
3985out:
3986 kfree(event_config);
3987
3988 return rc;
3989}
3990
3991static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
3992{
3993 unsigned int i;
3994 struct device *dev;
3995 size_t sg_chain_buffer_length;
3996 struct pqi_io_request *io_request;
3997
3998 if (!ctrl_info->io_request_pool)
3999 return;
4000
4001 dev = &ctrl_info->pci_dev->dev;
4002 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4003 io_request = ctrl_info->io_request_pool;
4004
4005 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4006 kfree(io_request->iu);
4007 if (!io_request->sg_chain_buffer)
4008 break;
4009 dma_free_coherent(dev, sg_chain_buffer_length,
4010 io_request->sg_chain_buffer,
4011 io_request->sg_chain_buffer_dma_handle);
4012 io_request++;
4013 }
4014
4015 kfree(ctrl_info->io_request_pool);
4016 ctrl_info->io_request_pool = NULL;
4017}
4018
4019static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4020{
4021 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4022 ctrl_info->error_buffer_length,
4023 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4024
4025 if (!ctrl_info->error_buffer)
4026 return -ENOMEM;
4027
4028 return 0;
4029}
4030
4031static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4032{
4033 unsigned int i;
4034 void *sg_chain_buffer;
4035 size_t sg_chain_buffer_length;
4036 dma_addr_t sg_chain_buffer_dma_handle;
4037 struct device *dev;
4038 struct pqi_io_request *io_request;
4039
4040 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4041 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4042
4043 if (!ctrl_info->io_request_pool) {
4044 dev_err(&ctrl_info->pci_dev->dev,
4045 "failed to allocate I/O request pool\n");
4046 goto error;
4047 }
4048
4049 dev = &ctrl_info->pci_dev->dev;
4050 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4051 io_request = ctrl_info->io_request_pool;
4052
4053 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4054 io_request->iu =
4055 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4056
4057 if (!io_request->iu) {
4058 dev_err(&ctrl_info->pci_dev->dev,
4059 "failed to allocate IU buffers\n");
4060 goto error;
4061 }
4062
4063 sg_chain_buffer = dma_alloc_coherent(dev,
4064 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4065 GFP_KERNEL);
4066
4067 if (!sg_chain_buffer) {
4068 dev_err(&ctrl_info->pci_dev->dev,
4069 "failed to allocate PQI scatter-gather chain buffers\n");
4070 goto error;
4071 }
4072
4073 io_request->index = i;
4074 io_request->sg_chain_buffer = sg_chain_buffer;
4075 io_request->sg_chain_buffer_dma_handle =
4076 sg_chain_buffer_dma_handle;
4077 io_request++;
4078 }
4079
4080 return 0;
4081
4082error:
4083 pqi_free_all_io_requests(ctrl_info);
4084
4085 return -ENOMEM;
4086}
4087
4088/*
4089 * Calculate required resources that are sized based on max. outstanding
4090 * requests and max. transfer size.
4091 */
4092
4093static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4094{
4095 u32 max_transfer_size;
4096 u32 max_sg_entries;
4097
4098 ctrl_info->scsi_ml_can_queue =
4099 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4100 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4101
4102 ctrl_info->error_buffer_length =
4103 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4104
4105 max_transfer_size =
4106 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4107
4108 max_sg_entries = max_transfer_size / PAGE_SIZE;
4109
4110 /* +1 to cover when the buffer is not page-aligned. */
4111 max_sg_entries++;
4112
4113 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4114
4115 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4116
4117 ctrl_info->sg_chain_buffer_length =
4118 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4119 ctrl_info->sg_tablesize = max_sg_entries;
4120 ctrl_info->max_sectors = max_transfer_size / 512;
4121}
4122
4123static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4124{
4125 int num_cpus;
4126 int max_queue_groups;
4127 int num_queue_groups;
4128 u16 num_elements_per_iq;
4129 u16 num_elements_per_oq;
4130
4131 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4132 ctrl_info->max_outbound_queues - 1);
4133 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4134
4135 num_cpus = num_online_cpus();
4136 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4137 num_queue_groups = min(num_queue_groups, max_queue_groups);
4138
4139 ctrl_info->num_queue_groups = num_queue_groups;
4140
4141 if (ctrl_info->max_inbound_iu_length_per_firmware == 256 &&
4142 ctrl_info->outbound_spanning_supported) {
4143 /*
4144 * TEMPHACK
4145 * This is older f/w that doesn't actually support spanning.
4146 */
4147 ctrl_info->max_inbound_iu_length =
4148 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4149 } else {
4150 /*
4151 * Make sure that the max. inbound IU length is an even multiple
4152 * of our inbound element length.
4153 */
4154 ctrl_info->max_inbound_iu_length =
4155 (ctrl_info->max_inbound_iu_length_per_firmware /
4156 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4157 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4158 }
4159
4160 num_elements_per_iq =
4161 (ctrl_info->max_inbound_iu_length /
4162 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4163
4164 /* Add one because one element in each queue is unusable. */
4165 num_elements_per_iq++;
4166
4167 num_elements_per_iq = min(num_elements_per_iq,
4168 ctrl_info->max_elements_per_iq);
4169
4170 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4171 num_elements_per_oq = min(num_elements_per_oq,
4172 ctrl_info->max_elements_per_oq);
4173
4174 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4175 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4176
4177 ctrl_info->max_sg_per_iu =
4178 ((ctrl_info->max_inbound_iu_length -
4179 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4180 sizeof(struct pqi_sg_descriptor)) +
4181 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4182}
4183
4184static inline void pqi_set_sg_descriptor(
4185 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4186{
4187 u64 address = (u64)sg_dma_address(sg);
4188 unsigned int length = sg_dma_len(sg);
4189
4190 put_unaligned_le64(address, &sg_descriptor->address);
4191 put_unaligned_le32(length, &sg_descriptor->length);
4192 put_unaligned_le32(0, &sg_descriptor->flags);
4193}
4194
4195static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4196 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4197 struct pqi_io_request *io_request)
4198{
4199 int i;
4200 u16 iu_length;
4201 int sg_count;
4202 bool chained;
4203 unsigned int num_sg_in_iu;
4204 unsigned int max_sg_per_iu;
4205 struct scatterlist *sg;
4206 struct pqi_sg_descriptor *sg_descriptor;
4207
4208 sg_count = scsi_dma_map(scmd);
4209 if (sg_count < 0)
4210 return sg_count;
4211
4212 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4213 PQI_REQUEST_HEADER_LENGTH;
4214
4215 if (sg_count == 0)
4216 goto out;
4217
4218 sg = scsi_sglist(scmd);
4219 sg_descriptor = request->sg_descriptors;
4220 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4221 chained = false;
4222 num_sg_in_iu = 0;
4223 i = 0;
4224
4225 while (1) {
4226 pqi_set_sg_descriptor(sg_descriptor, sg);
4227 if (!chained)
4228 num_sg_in_iu++;
4229 i++;
4230 if (i == sg_count)
4231 break;
4232 sg_descriptor++;
4233 if (i == max_sg_per_iu) {
4234 put_unaligned_le64(
4235 (u64)io_request->sg_chain_buffer_dma_handle,
4236 &sg_descriptor->address);
4237 put_unaligned_le32((sg_count - num_sg_in_iu)
4238 * sizeof(*sg_descriptor),
4239 &sg_descriptor->length);
4240 put_unaligned_le32(CISS_SG_CHAIN,
4241 &sg_descriptor->flags);
4242 chained = true;
4243 num_sg_in_iu++;
4244 sg_descriptor = io_request->sg_chain_buffer;
4245 }
4246 sg = sg_next(sg);
4247 }
4248
4249 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4250 request->partial = chained;
4251 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4252
4253out:
4254 put_unaligned_le16(iu_length, &request->header.iu_length);
4255
4256 return 0;
4257}
4258
4259static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4260 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4261 struct pqi_io_request *io_request)
4262{
4263 int i;
4264 u16 iu_length;
4265 int sg_count;
4266 unsigned int num_sg_in_iu = 0;
4267 struct scatterlist *sg;
4268 struct pqi_sg_descriptor *sg_descriptor;
4269
4270 sg_count = scsi_dma_map(scmd);
4271 if (sg_count < 0)
4272 return sg_count;
4273 if (sg_count == 0)
4274 goto out;
4275
4276 if (sg_count <= ctrl_info->max_sg_per_iu) {
4277 sg_descriptor = &request->sg_descriptors[0];
4278 scsi_for_each_sg(scmd, sg, sg_count, i) {
4279 pqi_set_sg_descriptor(sg_descriptor, sg);
4280 sg_descriptor++;
4281 }
4282 put_unaligned_le32(CISS_SG_LAST,
4283 &request->sg_descriptors[sg_count - 1].flags);
4284 num_sg_in_iu = sg_count;
4285 } else {
4286 sg_descriptor = &request->sg_descriptors[0];
4287 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
4288 &sg_descriptor->address);
4289 put_unaligned_le32(sg_count * sizeof(*sg_descriptor),
4290 &sg_descriptor->length);
4291 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
4292
4293 sg_descriptor = io_request->sg_chain_buffer;
4294 scsi_for_each_sg(scmd, sg, sg_count, i) {
4295 pqi_set_sg_descriptor(sg_descriptor, sg);
4296 sg_descriptor++;
4297 }
4298 put_unaligned_le32(CISS_SG_LAST,
4299 &io_request->sg_chain_buffer[sg_count - 1].flags);
4300 num_sg_in_iu = 1;
4301 request->partial = 1;
4302 }
4303
4304out:
4305 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4306 PQI_REQUEST_HEADER_LENGTH;
4307 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4308 put_unaligned_le16(iu_length, &request->header.iu_length);
4309 request->num_sg_descriptors = num_sg_in_iu;
4310
4311 return 0;
4312}
4313
4314static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4315 void *context)
4316{
4317 struct scsi_cmnd *scmd;
4318
4319 scmd = io_request->scmd;
4320 pqi_free_io_request(io_request);
4321 scsi_dma_unmap(scmd);
4322 pqi_scsi_done(scmd);
4323}
4324
4325static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4326 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4327 struct pqi_queue_group *queue_group)
4328{
4329 int rc;
4330 size_t cdb_length;
4331 struct pqi_io_request *io_request;
4332 struct pqi_raid_path_request *request;
4333
4334 io_request = pqi_alloc_io_request(ctrl_info);
4335 io_request->io_complete_callback = pqi_raid_io_complete;
4336 io_request->scmd = scmd;
4337
4338 scmd->host_scribble = (unsigned char *)io_request;
4339
4340 request = io_request->iu;
4341 memset(request, 0,
4342 offsetof(struct pqi_raid_path_request, sg_descriptors));
4343
4344 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4345 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4346 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4347 put_unaligned_le16(io_request->index, &request->request_id);
4348 request->error_index = request->request_id;
4349 memcpy(request->lun_number, device->scsi3addr,
4350 sizeof(request->lun_number));
4351
4352 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4353 memcpy(request->cdb, scmd->cmnd, cdb_length);
4354
4355 switch (cdb_length) {
4356 case 6:
4357 case 10:
4358 case 12:
4359 case 16:
4360 /* No bytes in the Additional CDB bytes field */
4361 request->additional_cdb_bytes_usage =
4362 SOP_ADDITIONAL_CDB_BYTES_0;
4363 break;
4364 case 20:
4365 /* 4 bytes in the Additional cdb field */
4366 request->additional_cdb_bytes_usage =
4367 SOP_ADDITIONAL_CDB_BYTES_4;
4368 break;
4369 case 24:
4370 /* 8 bytes in the Additional cdb field */
4371 request->additional_cdb_bytes_usage =
4372 SOP_ADDITIONAL_CDB_BYTES_8;
4373 break;
4374 case 28:
4375 /* 12 bytes in the Additional cdb field */
4376 request->additional_cdb_bytes_usage =
4377 SOP_ADDITIONAL_CDB_BYTES_12;
4378 break;
4379 case 32:
4380 default:
4381 /* 16 bytes in the Additional cdb field */
4382 request->additional_cdb_bytes_usage =
4383 SOP_ADDITIONAL_CDB_BYTES_16;
4384 break;
4385 }
4386
4387 switch (scmd->sc_data_direction) {
4388 case DMA_TO_DEVICE:
4389 request->data_direction = SOP_READ_FLAG;
4390 break;
4391 case DMA_FROM_DEVICE:
4392 request->data_direction = SOP_WRITE_FLAG;
4393 break;
4394 case DMA_NONE:
4395 request->data_direction = SOP_NO_DIRECTION_FLAG;
4396 break;
4397 case DMA_BIDIRECTIONAL:
4398 request->data_direction = SOP_BIDIRECTIONAL;
4399 break;
4400 default:
4401 dev_err(&ctrl_info->pci_dev->dev,
4402 "unknown data direction: %d\n",
4403 scmd->sc_data_direction);
4404 WARN_ON(scmd->sc_data_direction);
4405 break;
4406 }
4407
4408 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4409 if (rc) {
4410 pqi_free_io_request(io_request);
4411 return SCSI_MLQUEUE_HOST_BUSY;
4412 }
4413
4414 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4415
4416 return 0;
4417}
4418
4419static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4420 void *context)
4421{
4422 struct scsi_cmnd *scmd;
4423
4424 scmd = io_request->scmd;
4425 scsi_dma_unmap(scmd);
4426 if (io_request->status == -EAGAIN)
4427 set_host_byte(scmd, DID_IMM_RETRY);
4428 pqi_free_io_request(io_request);
4429 pqi_scsi_done(scmd);
4430}
4431
4432static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4433 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4434 struct pqi_queue_group *queue_group)
4435{
4436 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4437 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4438}
4439
4440static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4441 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4442 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4443 struct pqi_encryption_info *encryption_info)
4444{
4445 int rc;
4446 struct pqi_io_request *io_request;
4447 struct pqi_aio_path_request *request;
4448
4449 io_request = pqi_alloc_io_request(ctrl_info);
4450 io_request->io_complete_callback = pqi_aio_io_complete;
4451 io_request->scmd = scmd;
4452
4453 scmd->host_scribble = (unsigned char *)io_request;
4454
4455 request = io_request->iu;
4456 memset(request, 0,
4457 offsetof(struct pqi_raid_path_request, sg_descriptors));
4458
4459 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4460 put_unaligned_le32(aio_handle, &request->nexus_id);
4461 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4462 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4463 put_unaligned_le16(io_request->index, &request->request_id);
4464 request->error_index = request->request_id;
4465 if (cdb_length > sizeof(request->cdb))
4466 cdb_length = sizeof(request->cdb);
4467 request->cdb_length = cdb_length;
4468 memcpy(request->cdb, cdb, cdb_length);
4469
4470 switch (scmd->sc_data_direction) {
4471 case DMA_TO_DEVICE:
4472 request->data_direction = SOP_READ_FLAG;
4473 break;
4474 case DMA_FROM_DEVICE:
4475 request->data_direction = SOP_WRITE_FLAG;
4476 break;
4477 case DMA_NONE:
4478 request->data_direction = SOP_NO_DIRECTION_FLAG;
4479 break;
4480 case DMA_BIDIRECTIONAL:
4481 request->data_direction = SOP_BIDIRECTIONAL;
4482 break;
4483 default:
4484 dev_err(&ctrl_info->pci_dev->dev,
4485 "unknown data direction: %d\n",
4486 scmd->sc_data_direction);
4487 WARN_ON(scmd->sc_data_direction);
4488 break;
4489 }
4490
4491 if (encryption_info) {
4492 request->encryption_enable = true;
4493 put_unaligned_le16(encryption_info->data_encryption_key_index,
4494 &request->data_encryption_key_index);
4495 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4496 &request->encrypt_tweak_lower);
4497 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4498 &request->encrypt_tweak_upper);
4499 }
4500
4501 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4502 if (rc) {
4503 pqi_free_io_request(io_request);
4504 return SCSI_MLQUEUE_HOST_BUSY;
4505 }
4506
4507 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4508
4509 return 0;
4510}
4511
4512static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4513 struct scsi_cmnd *scmd)
4514{
4515 int rc;
4516 struct pqi_ctrl_info *ctrl_info;
4517 struct pqi_scsi_dev *device;
4518 u16 hwq;
4519 struct pqi_queue_group *queue_group;
4520 bool raid_bypassed;
4521
4522 device = scmd->device->hostdata;
4523
4524 if (device->reset_in_progress) {
4525 set_host_byte(scmd, DID_RESET);
4526 pqi_scsi_done(scmd);
4527 return 0;
4528 }
4529
4530 ctrl_info = shost_to_hba(shost);
4531
4532 if (pqi_ctrl_offline(ctrl_info)) {
4533 set_host_byte(scmd, DID_NO_CONNECT);
4534 pqi_scsi_done(scmd);
4535 return 0;
4536 }
4537
4538 hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4539 if (hwq >= ctrl_info->num_queue_groups)
4540 hwq = 0;
4541
4542 queue_group = &ctrl_info->queue_groups[hwq];
4543
4544 if (pqi_is_logical_device(device)) {
4545 raid_bypassed = false;
4546 if (device->offload_enabled &&
4547 scmd->request->cmd_type == REQ_TYPE_FS) {
4548 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4549 scmd, queue_group);
4550 if (rc == 0 ||
4551 rc == SCSI_MLQUEUE_HOST_BUSY ||
4552 rc == SAM_STAT_CHECK_CONDITION ||
4553 rc == SAM_STAT_RESERVATION_CONFLICT)
4554 raid_bypassed = true;
4555 }
4556 if (!raid_bypassed)
4557 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4558 queue_group);
4559 } else {
4560 if (device->aio_enabled)
4561 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4562 queue_group);
4563 else
4564 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4565 queue_group);
4566 }
4567
4568 return rc;
4569}
4570
4571static inline void pqi_complete_queued_requests_queue_group(
4572 struct pqi_queue_group *queue_group,
4573 struct pqi_scsi_dev *device_in_reset)
4574{
4575 unsigned int path;
4576 unsigned long flags;
4577 struct pqi_io_request *io_request;
4578 struct pqi_io_request *next;
4579 struct scsi_cmnd *scmd;
4580 struct pqi_scsi_dev *device;
4581
4582 for (path = 0; path < 2; path++) {
4583 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4584
4585 list_for_each_entry_safe(io_request, next,
4586 &queue_group->request_list[path],
4587 request_list_entry) {
4588 scmd = io_request->scmd;
4589 if (!scmd)
4590 continue;
4591 device = scmd->device->hostdata;
4592 if (device == device_in_reset) {
4593 set_host_byte(scmd, DID_RESET);
4594 pqi_scsi_done(scmd);
4595 list_del(&io_request->
4596 request_list_entry);
4597 }
4598 }
4599
4600 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4601 }
4602}
4603
4604static void pqi_complete_queued_requests(struct pqi_ctrl_info *ctrl_info,
4605 struct pqi_scsi_dev *device_in_reset)
4606{
4607 unsigned int i;
4608 struct pqi_queue_group *queue_group;
4609
4610 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4611 queue_group = &ctrl_info->queue_groups[i];
4612 pqi_complete_queued_requests_queue_group(queue_group,
4613 device_in_reset);
4614 }
4615}
4616
4617static void pqi_reset_lun_complete(struct pqi_io_request *io_request,
4618 void *context)
4619{
4620 struct completion *waiting = context;
4621
4622 complete(waiting);
4623}
4624
4625static int pqi_reset_lun(struct pqi_ctrl_info *ctrl_info,
4626 struct pqi_scsi_dev *device)
4627{
4628 int rc;
4629 struct pqi_io_request *io_request;
4630 DECLARE_COMPLETION_ONSTACK(wait);
4631 struct pqi_task_management_request *request;
4632
4633 down(&ctrl_info->lun_reset_sem);
4634
4635 io_request = pqi_alloc_io_request(ctrl_info);
4636 io_request->io_complete_callback = pqi_reset_lun_complete;
4637 io_request->context = &wait;
4638
4639 request = io_request->iu;
4640 memset(request, 0, sizeof(*request));
4641
4642 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4643 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4644 &request->header.iu_length);
4645 put_unaligned_le16(io_request->index, &request->request_id);
4646 memcpy(request->lun_number, device->scsi3addr,
4647 sizeof(request->lun_number));
4648 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4649
4650 pqi_start_io(ctrl_info,
4651 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4652 io_request);
4653
4654 if (!wait_for_completion_io_timeout(&wait,
4655 msecs_to_jiffies(PQI_ABORT_TIMEOUT_MSECS))) {
4656 rc = -ETIMEDOUT;
4657 } else {
4658 rc = io_request->status;
4659 }
4660
4661 pqi_free_io_request(io_request);
4662 up(&ctrl_info->lun_reset_sem);
4663
4664 return rc;
4665}
4666
4667/* Performs a reset at the LUN level. */
4668
4669static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4670 struct pqi_scsi_dev *device)
4671{
4672 int rc;
4673
4674 pqi_check_ctrl_health(ctrl_info);
4675 if (pqi_ctrl_offline(ctrl_info))
4676 return FAILED;
4677
4678 device->reset_in_progress = true;
4679 pqi_complete_queued_requests(ctrl_info, device);
4680 rc = pqi_reset_lun(ctrl_info, device);
4681 device->reset_in_progress = false;
4682
4683 if (rc)
4684 return FAILED;
4685
4686 return SUCCESS;
4687}
4688
4689static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4690{
4691 int rc;
4692 struct pqi_ctrl_info *ctrl_info;
4693 struct pqi_scsi_dev *device;
4694
4695 ctrl_info = shost_to_hba(scmd->device->host);
4696
4697 device = scmd->device->hostdata;
4698
4699 dev_err(&ctrl_info->pci_dev->dev,
4700 "resetting scsi %d:%d:%d:%d\n",
4701 ctrl_info->scsi_host->host_no,
4702 device->bus, device->target, device->lun);
4703
4704 rc = pqi_device_reset(ctrl_info, device);
4705
4706 dev_err(&ctrl_info->pci_dev->dev,
4707 "reset of scsi %d:%d:%d:%d: %s\n",
4708 ctrl_info->scsi_host->host_no,
4709 device->bus, device->target, device->lun,
4710 rc == SUCCESS ? "SUCCESS" : "FAILED");
4711
4712 return rc;
4713}
4714
4715static int pqi_slave_alloc(struct scsi_device *sdev)
4716{
4717 struct pqi_scsi_dev *device;
4718 unsigned long flags;
4719 struct pqi_ctrl_info *ctrl_info;
4720 struct scsi_target *starget;
4721 struct sas_rphy *rphy;
4722
4723 ctrl_info = shost_to_hba(sdev->host);
4724
4725 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4726
4727 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4728 starget = scsi_target(sdev);
4729 rphy = target_to_rphy(starget);
4730 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4731 if (device) {
4732 device->target = sdev_id(sdev);
4733 device->lun = sdev->lun;
4734 device->target_lun_valid = true;
4735 }
4736 } else {
4737 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4738 sdev_id(sdev), sdev->lun);
4739 }
4740
4741 if (device && device->expose_device) {
4742 sdev->hostdata = device;
4743 device->sdev = sdev;
4744 if (device->queue_depth) {
4745 device->advertised_queue_depth = device->queue_depth;
4746 scsi_change_queue_depth(sdev,
4747 device->advertised_queue_depth);
4748 }
4749 }
4750
4751 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4752
4753 return 0;
4754}
4755
4756static int pqi_slave_configure(struct scsi_device *sdev)
4757{
4758 struct pqi_scsi_dev *device;
4759
4760 device = sdev->hostdata;
4761 if (!device->expose_device)
4762 sdev->no_uld_attach = true;
4763
4764 return 0;
4765}
4766
4767static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4768 void __user *arg)
4769{
4770 struct pci_dev *pci_dev;
4771 u32 subsystem_vendor;
4772 u32 subsystem_device;
4773 cciss_pci_info_struct pciinfo;
4774
4775 if (!arg)
4776 return -EINVAL;
4777
4778 pci_dev = ctrl_info->pci_dev;
4779
4780 pciinfo.domain = pci_domain_nr(pci_dev->bus);
4781 pciinfo.bus = pci_dev->bus->number;
4782 pciinfo.dev_fn = pci_dev->devfn;
4783 subsystem_vendor = pci_dev->subsystem_vendor;
4784 subsystem_device = pci_dev->subsystem_device;
4785 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4786 subsystem_vendor;
4787
4788 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4789 return -EFAULT;
4790
4791 return 0;
4792}
4793
4794static int pqi_getdrivver_ioctl(void __user *arg)
4795{
4796 u32 version;
4797
4798 if (!arg)
4799 return -EINVAL;
4800
4801 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4802 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
4803
4804 if (copy_to_user(arg, &version, sizeof(version)))
4805 return -EFAULT;
4806
4807 return 0;
4808}
4809
4810struct ciss_error_info {
4811 u8 scsi_status;
4812 int command_status;
4813 size_t sense_data_length;
4814};
4815
4816static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4817 struct ciss_error_info *ciss_error_info)
4818{
4819 int ciss_cmd_status;
4820 size_t sense_data_length;
4821
4822 switch (pqi_error_info->data_out_result) {
4823 case PQI_DATA_IN_OUT_GOOD:
4824 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4825 break;
4826 case PQI_DATA_IN_OUT_UNDERFLOW:
4827 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4828 break;
4829 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4830 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4831 break;
4832 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4833 case PQI_DATA_IN_OUT_BUFFER_ERROR:
4834 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4835 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4836 case PQI_DATA_IN_OUT_ERROR:
4837 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4838 break;
4839 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4840 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4841 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4842 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4843 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4844 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4845 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4846 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4847 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4848 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4849 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4850 break;
4851 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4852 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4853 break;
4854 case PQI_DATA_IN_OUT_ABORTED:
4855 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4856 break;
4857 case PQI_DATA_IN_OUT_TIMEOUT:
4858 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4859 break;
4860 default:
4861 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4862 break;
4863 }
4864
4865 sense_data_length =
4866 get_unaligned_le16(&pqi_error_info->sense_data_length);
4867 if (sense_data_length == 0)
4868 sense_data_length =
4869 get_unaligned_le16(&pqi_error_info->response_data_length);
4870 if (sense_data_length)
4871 if (sense_data_length > sizeof(pqi_error_info->data))
4872 sense_data_length = sizeof(pqi_error_info->data);
4873
4874 ciss_error_info->scsi_status = pqi_error_info->status;
4875 ciss_error_info->command_status = ciss_cmd_status;
4876 ciss_error_info->sense_data_length = sense_data_length;
4877}
4878
4879static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4880{
4881 int rc;
4882 char *kernel_buffer = NULL;
4883 u16 iu_length;
4884 size_t sense_data_length;
4885 IOCTL_Command_struct iocommand;
4886 struct pqi_raid_path_request request;
4887 struct pqi_raid_error_info pqi_error_info;
4888 struct ciss_error_info ciss_error_info;
4889
4890 if (pqi_ctrl_offline(ctrl_info))
4891 return -ENXIO;
4892 if (!arg)
4893 return -EINVAL;
4894 if (!capable(CAP_SYS_RAWIO))
4895 return -EPERM;
4896 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4897 return -EFAULT;
4898 if (iocommand.buf_size < 1 &&
4899 iocommand.Request.Type.Direction != XFER_NONE)
4900 return -EINVAL;
4901 if (iocommand.Request.CDBLen > sizeof(request.cdb))
4902 return -EINVAL;
4903 if (iocommand.Request.Type.Type != TYPE_CMD)
4904 return -EINVAL;
4905
4906 switch (iocommand.Request.Type.Direction) {
4907 case XFER_NONE:
4908 case XFER_WRITE:
4909 case XFER_READ:
4910 break;
4911 default:
4912 return -EINVAL;
4913 }
4914
4915 if (iocommand.buf_size > 0) {
4916 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4917 if (!kernel_buffer)
4918 return -ENOMEM;
4919 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4920 if (copy_from_user(kernel_buffer, iocommand.buf,
4921 iocommand.buf_size)) {
4922 rc = -EFAULT;
4923 goto out;
4924 }
4925 } else {
4926 memset(kernel_buffer, 0, iocommand.buf_size);
4927 }
4928 }
4929
4930 memset(&request, 0, sizeof(request));
4931
4932 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4933 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4934 PQI_REQUEST_HEADER_LENGTH;
4935 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4936 sizeof(request.lun_number));
4937 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4938 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4939
4940 switch (iocommand.Request.Type.Direction) {
4941 case XFER_NONE:
4942 request.data_direction = SOP_NO_DIRECTION_FLAG;
4943 break;
4944 case XFER_WRITE:
4945 request.data_direction = SOP_WRITE_FLAG;
4946 break;
4947 case XFER_READ:
4948 request.data_direction = SOP_READ_FLAG;
4949 break;
4950 }
4951
4952 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4953
4954 if (iocommand.buf_size > 0) {
4955 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4956
4957 rc = pqi_map_single(ctrl_info->pci_dev,
4958 &request.sg_descriptors[0], kernel_buffer,
4959 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4960 if (rc)
4961 goto out;
4962
4963 iu_length += sizeof(request.sg_descriptors[0]);
4964 }
4965
4966 put_unaligned_le16(iu_length, &request.header.iu_length);
4967
4968 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4969 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4970
4971 if (iocommand.buf_size > 0)
4972 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4973 PCI_DMA_BIDIRECTIONAL);
4974
4975 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4976
4977 if (rc == 0) {
4978 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4979 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4980 iocommand.error_info.CommandStatus =
4981 ciss_error_info.command_status;
4982 sense_data_length = ciss_error_info.sense_data_length;
4983 if (sense_data_length) {
4984 if (sense_data_length >
4985 sizeof(iocommand.error_info.SenseInfo))
4986 sense_data_length =
4987 sizeof(iocommand.error_info.SenseInfo);
4988 memcpy(iocommand.error_info.SenseInfo,
4989 pqi_error_info.data, sense_data_length);
4990 iocommand.error_info.SenseLen = sense_data_length;
4991 }
4992 }
4993
4994 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
4995 rc = -EFAULT;
4996 goto out;
4997 }
4998
4999 if (rc == 0 && iocommand.buf_size > 0 &&
5000 (iocommand.Request.Type.Direction & XFER_READ)) {
5001 if (copy_to_user(iocommand.buf, kernel_buffer,
5002 iocommand.buf_size)) {
5003 rc = -EFAULT;
5004 }
5005 }
5006
5007out:
5008 kfree(kernel_buffer);
5009
5010 return rc;
5011}
5012
5013static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5014{
5015 int rc;
5016 struct pqi_ctrl_info *ctrl_info;
5017
5018 ctrl_info = shost_to_hba(sdev->host);
5019
5020 switch (cmd) {
5021 case CCISS_DEREGDISK:
5022 case CCISS_REGNEWDISK:
5023 case CCISS_REGNEWD:
5024 rc = pqi_scan_scsi_devices(ctrl_info);
5025 break;
5026 case CCISS_GETPCIINFO:
5027 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5028 break;
5029 case CCISS_GETDRIVVER:
5030 rc = pqi_getdrivver_ioctl(arg);
5031 break;
5032 case CCISS_PASSTHRU:
5033 rc = pqi_passthru_ioctl(ctrl_info, arg);
5034 break;
5035 default:
5036 rc = -EINVAL;
5037 break;
5038 }
5039
5040 return rc;
5041}
5042
5043static ssize_t pqi_version_show(struct device *dev,
5044 struct device_attribute *attr, char *buffer)
5045{
5046 ssize_t count = 0;
5047 struct Scsi_Host *shost;
5048 struct pqi_ctrl_info *ctrl_info;
5049
5050 shost = class_to_shost(dev);
5051 ctrl_info = shost_to_hba(shost);
5052
5053 count += snprintf(buffer + count, PAGE_SIZE - count,
5054 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5055
5056 count += snprintf(buffer + count, PAGE_SIZE - count,
5057 "firmware: %s\n", ctrl_info->firmware_version);
5058
5059 return count;
5060}
5061
5062static ssize_t pqi_host_rescan_store(struct device *dev,
5063 struct device_attribute *attr, const char *buffer, size_t count)
5064{
5065 struct Scsi_Host *shost = class_to_shost(dev);
5066
5067 pqi_scan_start(shost);
5068
5069 return count;
5070}
5071
5072static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5073static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5074
5075static struct device_attribute *pqi_shost_attrs[] = {
5076 &dev_attr_version,
5077 &dev_attr_rescan,
5078 NULL
5079};
5080
5081static ssize_t pqi_sas_address_show(struct device *dev,
5082 struct device_attribute *attr, char *buffer)
5083{
5084 struct pqi_ctrl_info *ctrl_info;
5085 struct scsi_device *sdev;
5086 struct pqi_scsi_dev *device;
5087 unsigned long flags;
5088 u64 sas_address;
5089
5090 sdev = to_scsi_device(dev);
5091 ctrl_info = shost_to_hba(sdev->host);
5092
5093 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5094
5095 device = sdev->hostdata;
5096 if (pqi_is_logical_device(device)) {
5097 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5098 flags);
5099 return -ENODEV;
5100 }
5101 sas_address = device->sas_address;
5102
5103 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5104
5105 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5106}
5107
5108static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5109 struct device_attribute *attr, char *buffer)
5110{
5111 struct pqi_ctrl_info *ctrl_info;
5112 struct scsi_device *sdev;
5113 struct pqi_scsi_dev *device;
5114 unsigned long flags;
5115
5116 sdev = to_scsi_device(dev);
5117 ctrl_info = shost_to_hba(sdev->host);
5118
5119 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5120
5121 device = sdev->hostdata;
5122 buffer[0] = device->offload_enabled ? '1' : '0';
5123 buffer[1] = '\n';
5124 buffer[2] = '\0';
5125
5126 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5127
5128 return 2;
5129}
5130
5131static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5132static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5133 pqi_ssd_smart_path_enabled_show, NULL);
5134
5135static struct device_attribute *pqi_sdev_attrs[] = {
5136 &dev_attr_sas_address,
5137 &dev_attr_ssd_smart_path_enabled,
5138 NULL
5139};
5140
5141static struct scsi_host_template pqi_driver_template = {
5142 .module = THIS_MODULE,
5143 .name = DRIVER_NAME_SHORT,
5144 .proc_name = DRIVER_NAME_SHORT,
5145 .queuecommand = pqi_scsi_queue_command,
5146 .scan_start = pqi_scan_start,
5147 .scan_finished = pqi_scan_finished,
5148 .this_id = -1,
5149 .use_clustering = ENABLE_CLUSTERING,
5150 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5151 .ioctl = pqi_ioctl,
5152 .slave_alloc = pqi_slave_alloc,
5153 .slave_configure = pqi_slave_configure,
5154 .sdev_attrs = pqi_sdev_attrs,
5155 .shost_attrs = pqi_shost_attrs,
5156};
5157
5158static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5159{
5160 int rc;
5161 struct Scsi_Host *shost;
5162
5163 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5164 if (!shost) {
5165 dev_err(&ctrl_info->pci_dev->dev,
5166 "scsi_host_alloc failed for controller %u\n",
5167 ctrl_info->ctrl_id);
5168 return -ENOMEM;
5169 }
5170
5171 shost->io_port = 0;
5172 shost->n_io_port = 0;
5173 shost->this_id = -1;
5174 shost->max_channel = PQI_MAX_BUS;
5175 shost->max_cmd_len = MAX_COMMAND_SIZE;
5176 shost->max_lun = ~0;
5177 shost->max_id = ~0;
5178 shost->max_sectors = ctrl_info->max_sectors;
5179 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5180 shost->cmd_per_lun = shost->can_queue;
5181 shost->sg_tablesize = ctrl_info->sg_tablesize;
5182 shost->transportt = pqi_sas_transport_template;
5183 shost->irq = ctrl_info->msix_vectors[0];
5184 shost->unique_id = shost->irq;
5185 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5186 shost->hostdata[0] = (unsigned long)ctrl_info;
5187
5188 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5189 if (rc) {
5190 dev_err(&ctrl_info->pci_dev->dev,
5191 "scsi_add_host failed for controller %u\n",
5192 ctrl_info->ctrl_id);
5193 goto free_host;
5194 }
5195
5196 rc = pqi_add_sas_host(shost, ctrl_info);
5197 if (rc) {
5198 dev_err(&ctrl_info->pci_dev->dev,
5199 "add SAS host failed for controller %u\n",
5200 ctrl_info->ctrl_id);
5201 goto remove_host;
5202 }
5203
5204 ctrl_info->scsi_host = shost;
5205
5206 return 0;
5207
5208remove_host:
5209 scsi_remove_host(shost);
5210free_host:
5211 scsi_host_put(shost);
5212
5213 return rc;
5214}
5215
5216static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5217{
5218 struct Scsi_Host *shost;
5219
5220 pqi_delete_sas_host(ctrl_info);
5221
5222 shost = ctrl_info->scsi_host;
5223 if (!shost)
5224 return;
5225
5226 scsi_remove_host(shost);
5227 scsi_host_put(shost);
5228}
5229
5230#define PQI_RESET_ACTION_RESET 0x1
5231
5232#define PQI_RESET_TYPE_NO_RESET 0x0
5233#define PQI_RESET_TYPE_SOFT_RESET 0x1
5234#define PQI_RESET_TYPE_FIRM_RESET 0x2
5235#define PQI_RESET_TYPE_HARD_RESET 0x3
5236
5237static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5238{
5239 int rc;
5240 u32 reset_params;
5241
5242 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5243 PQI_RESET_TYPE_HARD_RESET;
5244
5245 writel(reset_params,
5246 &ctrl_info->pqi_registers->device_reset);
5247
5248 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5249 if (rc)
5250 dev_err(&ctrl_info->pci_dev->dev,
5251 "PQI reset failed\n");
5252
5253 return rc;
5254}
5255
5256static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5257{
5258 int rc;
5259 struct bmic_identify_controller *identify;
5260
5261 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5262 if (!identify)
5263 return -ENOMEM;
5264
5265 rc = pqi_identify_controller(ctrl_info, identify);
5266 if (rc)
5267 goto out;
5268
5269 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5270 sizeof(identify->firmware_version));
5271 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5272 snprintf(ctrl_info->firmware_version +
5273 strlen(ctrl_info->firmware_version),
5274 sizeof(ctrl_info->firmware_version),
5275 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5276
5277out:
5278 kfree(identify);
5279
5280 return rc;
5281}
5282
5283static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5284{
5285 int rc;
5286
5287 /*
5288 * When the controller comes out of reset, it is always running
5289 * in legacy SIS mode. This is so that it can be compatible
5290 * with legacy drivers shipped with OSes. So we have to talk
5291 * to it using SIS commands at first. Once we are satisified
5292 * that the controller supports PQI, we transition it into PQI
5293 * mode.
5294 */
5295
5296 /*
5297 * Wait until the controller is ready to start accepting SIS
5298 * commands.
5299 */
5300 rc = sis_wait_for_ctrl_ready(ctrl_info);
5301 if (rc) {
5302 dev_err(&ctrl_info->pci_dev->dev,
5303 "error initializing SIS interface\n");
5304 return rc;
5305 }
5306
5307 /*
5308 * Get the controller properties. This allows us to determine
5309 * whether or not it supports PQI mode.
5310 */
5311 rc = sis_get_ctrl_properties(ctrl_info);
5312 if (rc) {
5313 dev_err(&ctrl_info->pci_dev->dev,
5314 "error obtaining controller properties\n");
5315 return rc;
5316 }
5317
5318 rc = sis_get_pqi_capabilities(ctrl_info);
5319 if (rc) {
5320 dev_err(&ctrl_info->pci_dev->dev,
5321 "error obtaining controller capabilities\n");
5322 return rc;
5323 }
5324
5325 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5326 ctrl_info->max_outstanding_requests =
5327 PQI_MAX_OUTSTANDING_REQUESTS;
5328
5329 pqi_calculate_io_resources(ctrl_info);
5330
5331 rc = pqi_alloc_error_buffer(ctrl_info);
5332 if (rc) {
5333 dev_err(&ctrl_info->pci_dev->dev,
5334 "failed to allocate PQI error buffer\n");
5335 return rc;
5336 }
5337
5338 /*
5339 * If the function we are about to call succeeds, the
5340 * controller will transition from legacy SIS mode
5341 * into PQI mode.
5342 */
5343 rc = sis_init_base_struct_addr(ctrl_info);
5344 if (rc) {
5345 dev_err(&ctrl_info->pci_dev->dev,
5346 "error initializing PQI mode\n");
5347 return rc;
5348 }
5349
5350 /* Wait for the controller to complete the SIS -> PQI transition. */
5351 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5352 if (rc) {
5353 dev_err(&ctrl_info->pci_dev->dev,
5354 "transition to PQI mode failed\n");
5355 return rc;
5356 }
5357
5358 /* From here on, we are running in PQI mode. */
5359 ctrl_info->pqi_mode_enabled = true;
5360
5361 rc = pqi_alloc_admin_queues(ctrl_info);
5362 if (rc) {
5363 dev_err(&ctrl_info->pci_dev->dev,
5364 "error allocating admin queues\n");
5365 return rc;
5366 }
5367
5368 rc = pqi_create_admin_queues(ctrl_info);
5369 if (rc) {
5370 dev_err(&ctrl_info->pci_dev->dev,
5371 "error creating admin queues\n");
5372 return rc;
5373 }
5374
5375 rc = pqi_report_device_capability(ctrl_info);
5376 if (rc) {
5377 dev_err(&ctrl_info->pci_dev->dev,
5378 "obtaining device capability failed\n");
5379 return rc;
5380 }
5381
5382 rc = pqi_validate_device_capability(ctrl_info);
5383 if (rc)
5384 return rc;
5385
5386 pqi_calculate_queue_resources(ctrl_info);
5387
5388 rc = pqi_enable_msix_interrupts(ctrl_info);
5389 if (rc)
5390 return rc;
5391
5392 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5393 ctrl_info->max_msix_vectors =
5394 ctrl_info->num_msix_vectors_enabled;
5395 pqi_calculate_queue_resources(ctrl_info);
5396 }
5397
5398 rc = pqi_alloc_io_resources(ctrl_info);
5399 if (rc)
5400 return rc;
5401
5402 rc = pqi_alloc_operational_queues(ctrl_info);
5403 if (rc)
5404 return rc;
5405
5406 pqi_init_operational_queues(ctrl_info);
5407
5408 rc = pqi_request_irqs(ctrl_info);
5409 if (rc)
5410 return rc;
5411
5412 pqi_irq_set_affinity_hint(ctrl_info);
5413
5414 rc = pqi_create_queues(ctrl_info);
5415 if (rc)
5416 return rc;
5417
5418 sis_enable_msix(ctrl_info);
5419
5420 rc = pqi_configure_events(ctrl_info);
5421 if (rc) {
5422 dev_err(&ctrl_info->pci_dev->dev,
5423 "error configuring events\n");
5424 return rc;
5425 }
5426
5427 pqi_start_heartbeat_timer(ctrl_info);
5428
5429 ctrl_info->controller_online = true;
5430
5431 /* Register with the SCSI subsystem. */
5432 rc = pqi_register_scsi(ctrl_info);
5433 if (rc)
5434 return rc;
5435
5436 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5437 if (rc) {
5438 dev_err(&ctrl_info->pci_dev->dev,
5439 "error obtaining firmware version\n");
5440 return rc;
5441 }
5442
5443 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5444 if (rc) {
5445 dev_err(&ctrl_info->pci_dev->dev,
5446 "error updating host wellness\n");
5447 return rc;
5448 }
5449
5450 pqi_schedule_update_time_worker(ctrl_info);
5451
5452 pqi_scan_scsi_devices(ctrl_info);
5453
5454 return 0;
5455}
5456
5457static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5458{
5459 int rc;
5460 u64 mask;
5461
5462 rc = pci_enable_device(ctrl_info->pci_dev);
5463 if (rc) {
5464 dev_err(&ctrl_info->pci_dev->dev,
5465 "failed to enable PCI device\n");
5466 return rc;
5467 }
5468
5469 if (sizeof(dma_addr_t) > 4)
5470 mask = DMA_BIT_MASK(64);
5471 else
5472 mask = DMA_BIT_MASK(32);
5473
5474 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5475 if (rc) {
5476 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5477 goto disable_device;
5478 }
5479
5480 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5481 if (rc) {
5482 dev_err(&ctrl_info->pci_dev->dev,
5483 "failed to obtain PCI resources\n");
5484 goto disable_device;
5485 }
5486
5487 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5488 ctrl_info->pci_dev, 0),
5489 sizeof(struct pqi_ctrl_registers));
5490 if (!ctrl_info->iomem_base) {
5491 dev_err(&ctrl_info->pci_dev->dev,
5492 "failed to map memory for controller registers\n");
5493 rc = -ENOMEM;
5494 goto release_regions;
5495 }
5496
5497 ctrl_info->registers = ctrl_info->iomem_base;
5498 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5499
5500 /* Enable bus mastering. */
5501 pci_set_master(ctrl_info->pci_dev);
5502
5503 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5504
5505 return 0;
5506
5507release_regions:
5508 pci_release_regions(ctrl_info->pci_dev);
5509disable_device:
5510 pci_disable_device(ctrl_info->pci_dev);
5511
5512 return rc;
5513}
5514
5515static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5516{
5517 iounmap(ctrl_info->iomem_base);
5518 pci_release_regions(ctrl_info->pci_dev);
5519 pci_disable_device(ctrl_info->pci_dev);
5520 pci_set_drvdata(ctrl_info->pci_dev, NULL);
5521}
5522
5523static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5524{
5525 struct pqi_ctrl_info *ctrl_info;
5526
5527 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5528 GFP_KERNEL, numa_node);
5529 if (!ctrl_info)
5530 return NULL;
5531
5532 mutex_init(&ctrl_info->scan_mutex);
5533
5534 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5535 spin_lock_init(&ctrl_info->scsi_device_list_lock);
5536
5537 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5538 atomic_set(&ctrl_info->num_interrupts, 0);
5539
5540 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5541 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5542
5543 sema_init(&ctrl_info->sync_request_sem,
5544 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5545 sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5546
5547 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5548 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5549
5550 return ctrl_info;
5551}
5552
5553static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5554{
5555 kfree(ctrl_info);
5556}
5557
5558static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5559{
5560 pqi_irq_unset_affinity_hint(ctrl_info);
5561 pqi_free_irqs(ctrl_info);
5562 if (ctrl_info->num_msix_vectors_enabled)
5563 pci_disable_msix(ctrl_info->pci_dev);
5564}
5565
5566static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5567{
5568 pqi_stop_heartbeat_timer(ctrl_info);
5569 pqi_free_interrupts(ctrl_info);
5570 if (ctrl_info->queue_memory_base)
5571 dma_free_coherent(&ctrl_info->pci_dev->dev,
5572 ctrl_info->queue_memory_length,
5573 ctrl_info->queue_memory_base,
5574 ctrl_info->queue_memory_base_dma_handle);
5575 if (ctrl_info->admin_queue_memory_base)
5576 dma_free_coherent(&ctrl_info->pci_dev->dev,
5577 ctrl_info->admin_queue_memory_length,
5578 ctrl_info->admin_queue_memory_base,
5579 ctrl_info->admin_queue_memory_base_dma_handle);
5580 pqi_free_all_io_requests(ctrl_info);
5581 if (ctrl_info->error_buffer)
5582 dma_free_coherent(&ctrl_info->pci_dev->dev,
5583 ctrl_info->error_buffer_length,
5584 ctrl_info->error_buffer,
5585 ctrl_info->error_buffer_dma_handle);
5586 if (ctrl_info->iomem_base)
5587 pqi_cleanup_pci_init(ctrl_info);
5588 pqi_free_ctrl_info(ctrl_info);
5589}
5590
5591static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5592{
5593 int rc;
5594
5595 if (ctrl_info->controller_online) {
5596 cancel_delayed_work_sync(&ctrl_info->rescan_work);
5597 cancel_delayed_work_sync(&ctrl_info->update_time_work);
5598 pqi_remove_all_scsi_devices(ctrl_info);
5599 pqi_unregister_scsi(ctrl_info);
5600 ctrl_info->controller_online = false;
5601 }
5602 if (ctrl_info->pqi_mode_enabled) {
5603 sis_disable_msix(ctrl_info);
5604 rc = pqi_reset(ctrl_info);
5605 if (rc == 0)
5606 sis_reenable_sis_mode(ctrl_info);
5607 }
5608 pqi_free_ctrl_resources(ctrl_info);
5609}
5610
5611static void pqi_print_ctrl_info(struct pci_dev *pdev,
5612 const struct pci_device_id *id)
5613{
5614 char *ctrl_description;
5615
5616 if (id->driver_data) {
5617 ctrl_description = (char *)id->driver_data;
5618 } else {
5619 switch (id->subvendor) {
5620 case PCI_VENDOR_ID_HP:
5621 ctrl_description = hpe_branded_controller;
5622 break;
5623 case PCI_VENDOR_ID_ADAPTEC2:
5624 default:
5625 ctrl_description = microsemi_branded_controller;
5626 break;
5627 }
5628 }
5629
5630 dev_info(&pdev->dev, "%s found\n", ctrl_description);
5631}
5632
5633static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5634{
5635 int rc;
5636 int node;
5637 struct pqi_ctrl_info *ctrl_info;
5638
5639 pqi_print_ctrl_info(pdev, id);
5640
5641 if (pqi_disable_device_id_wildcards &&
5642 id->subvendor == PCI_ANY_ID &&
5643 id->subdevice == PCI_ANY_ID) {
5644 dev_warn(&pdev->dev,
5645 "controller not probed because device ID wildcards are disabled\n");
5646 return -ENODEV;
5647 }
5648
5649 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5650 dev_warn(&pdev->dev,
5651 "controller device ID matched using wildcards\n");
5652
5653 node = dev_to_node(&pdev->dev);
5654 if (node == NUMA_NO_NODE)
5655 set_dev_node(&pdev->dev, 0);
5656
5657 ctrl_info = pqi_alloc_ctrl_info(node);
5658 if (!ctrl_info) {
5659 dev_err(&pdev->dev,
5660 "failed to allocate controller info block\n");
5661 return -ENOMEM;
5662 }
5663
5664 ctrl_info->pci_dev = pdev;
5665
5666 rc = pqi_pci_init(ctrl_info);
5667 if (rc)
5668 goto error;
5669
5670 rc = pqi_ctrl_init(ctrl_info);
5671 if (rc)
5672 goto error;
5673
5674 return 0;
5675
5676error:
5677 pqi_remove_ctrl(ctrl_info);
5678
5679 return rc;
5680}
5681
5682static void pqi_pci_remove(struct pci_dev *pdev)
5683{
5684 struct pqi_ctrl_info *ctrl_info;
5685
5686 ctrl_info = pci_get_drvdata(pdev);
5687 if (!ctrl_info)
5688 return;
5689
5690 pqi_remove_ctrl(ctrl_info);
5691}
5692
5693static void pqi_shutdown(struct pci_dev *pdev)
5694{
5695 int rc;
5696 struct pqi_ctrl_info *ctrl_info;
5697
5698 ctrl_info = pci_get_drvdata(pdev);
5699 if (!ctrl_info)
5700 goto error;
5701
5702 /*
5703 * Write all data in the controller's battery-backed cache to
5704 * storage.
5705 */
5706 rc = pqi_flush_cache(ctrl_info);
5707 if (rc == 0)
5708 return;
5709
5710error:
5711 dev_warn(&pdev->dev,
5712 "unable to flush controller cache\n");
5713}
5714
5715/* Define the PCI IDs for the controllers that we support. */
5716static const struct pci_device_id pqi_pci_id_table[] = {
5717 {
5718 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5719 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5720 },
5721 {
5722 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5723 PCI_VENDOR_ID_HP, 0x0600)
5724 },
5725 {
5726 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5727 PCI_VENDOR_ID_HP, 0x0601)
5728 },
5729 {
5730 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5731 PCI_VENDOR_ID_HP, 0x0602)
5732 },
5733 {
5734 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5735 PCI_VENDOR_ID_HP, 0x0603)
5736 },
5737 {
5738 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5739 PCI_VENDOR_ID_HP, 0x0650)
5740 },
5741 {
5742 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5743 PCI_VENDOR_ID_HP, 0x0651)
5744 },
5745 {
5746 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5747 PCI_VENDOR_ID_HP, 0x0652)
5748 },
5749 {
5750 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5751 PCI_VENDOR_ID_HP, 0x0653)
5752 },
5753 {
5754 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5755 PCI_VENDOR_ID_HP, 0x0654)
5756 },
5757 {
5758 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5759 PCI_VENDOR_ID_HP, 0x0655)
5760 },
5761 {
5762 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5763 PCI_VENDOR_ID_HP, 0x0700)
5764 },
5765 {
5766 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5767 PCI_VENDOR_ID_HP, 0x0701)
5768 },
5769 {
5770 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5771 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5772 },
5773 {
5774 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5775 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5776 },
5777 {
5778 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5779 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5780 },
5781 {
5782 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5783 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5784 },
5785 {
5786 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5787 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5788 },
5789 {
5790 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5791 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5792 },
5793 {
5794 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5795 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5796 },
5797 {
5798 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5799 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5800 },
5801 {
5802 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5803 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5804 },
5805 {
5806 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5807 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5808 },
5809 {
5810 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5811 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5812 },
5813 {
5814 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5815 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5816 },
5817 {
5818 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5819 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5820 },
5821 {
5822 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5823 PCI_VENDOR_ID_HP, 0x1001)
5824 },
5825 {
5826 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5827 PCI_VENDOR_ID_HP, 0x1100)
5828 },
5829 {
5830 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5831 PCI_VENDOR_ID_HP, 0x1101)
5832 },
5833 {
5834 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5835 PCI_VENDOR_ID_HP, 0x1102)
5836 },
5837 {
5838 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5839 PCI_VENDOR_ID_HP, 0x1150)
5840 },
5841 {
5842 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5843 PCI_ANY_ID, PCI_ANY_ID)
5844 },
5845 { 0 }
5846};
5847
5848MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5849
5850static struct pci_driver pqi_pci_driver = {
5851 .name = DRIVER_NAME_SHORT,
5852 .id_table = pqi_pci_id_table,
5853 .probe = pqi_pci_probe,
5854 .remove = pqi_pci_remove,
5855 .shutdown = pqi_shutdown,
5856};
5857
5858static int __init pqi_init(void)
5859{
5860 int rc;
5861
5862 pr_info(DRIVER_NAME "\n");
5863
5864 pqi_sas_transport_template =
5865 sas_attach_transport(&pqi_sas_transport_functions);
5866 if (!pqi_sas_transport_template)
5867 return -ENODEV;
5868
5869 rc = pci_register_driver(&pqi_pci_driver);
5870 if (rc)
5871 sas_release_transport(pqi_sas_transport_template);
5872
5873 return rc;
5874}
5875
5876static void __exit pqi_cleanup(void)
5877{
5878 pci_unregister_driver(&pqi_pci_driver);
5879 sas_release_transport(pqi_sas_transport_template);
5880}
5881
5882module_init(pqi_init);
5883module_exit(pqi_cleanup);
5884
5885static void __attribute__((unused)) verify_structures(void)
5886{
5887 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5888 sis_host_to_ctrl_doorbell) != 0x20);
5889 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5890 sis_interrupt_mask) != 0x34);
5891 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5892 sis_ctrl_to_host_doorbell) != 0x9c);
5893 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5894 sis_ctrl_to_host_doorbell_clear) != 0xa0);
5895 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5896 sis_firmware_status) != 0xbc);
5897 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5898 sis_mailbox) != 0x1000);
5899 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5900 pqi_registers) != 0x4000);
5901
5902 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5903 iu_type) != 0x0);
5904 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5905 iu_length) != 0x2);
5906 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5907 response_queue_id) != 0x4);
5908 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5909 work_area) != 0x6);
5910 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5911
5912 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5913 status) != 0x0);
5914 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5915 service_response) != 0x1);
5916 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5917 data_present) != 0x2);
5918 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5919 reserved) != 0x3);
5920 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5921 residual_count) != 0x4);
5922 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5923 data_length) != 0x8);
5924 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5925 reserved1) != 0xa);
5926 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5927 data) != 0xc);
5928 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5929
5930 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5931 data_in_result) != 0x0);
5932 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5933 data_out_result) != 0x1);
5934 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5935 reserved) != 0x2);
5936 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5937 status) != 0x5);
5938 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5939 status_qualifier) != 0x6);
5940 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5941 sense_data_length) != 0x8);
5942 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5943 response_data_length) != 0xa);
5944 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5945 data_in_transferred) != 0xc);
5946 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5947 data_out_transferred) != 0x10);
5948 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5949 data) != 0x14);
5950 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5951
5952 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5953 signature) != 0x0);
5954 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5955 function_and_status_code) != 0x8);
5956 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5957 max_admin_iq_elements) != 0x10);
5958 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5959 max_admin_oq_elements) != 0x11);
5960 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5961 admin_iq_element_length) != 0x12);
5962 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5963 admin_oq_element_length) != 0x13);
5964 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5965 max_reset_timeout) != 0x14);
5966 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5967 legacy_intx_status) != 0x18);
5968 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5969 legacy_intx_mask_set) != 0x1c);
5970 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5971 legacy_intx_mask_clear) != 0x20);
5972 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5973 device_status) != 0x40);
5974 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5975 admin_iq_pi_offset) != 0x48);
5976 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5977 admin_oq_ci_offset) != 0x50);
5978 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5979 admin_iq_element_array_addr) != 0x58);
5980 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5981 admin_oq_element_array_addr) != 0x60);
5982 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5983 admin_iq_ci_addr) != 0x68);
5984 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5985 admin_oq_pi_addr) != 0x70);
5986 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5987 admin_iq_num_elements) != 0x78);
5988 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5989 admin_oq_num_elements) != 0x79);
5990 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5991 admin_queue_int_msg_num) != 0x7a);
5992 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5993 device_error) != 0x80);
5994 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5995 error_details) != 0x88);
5996 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5997 device_reset) != 0x90);
5998 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5999 power_action) != 0x94);
6000 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6001
6002 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6003 header.iu_type) != 0);
6004 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6005 header.iu_length) != 2);
6006 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6007 header.work_area) != 6);
6008 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6009 request_id) != 8);
6010 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6011 function_code) != 10);
6012 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6013 data.report_device_capability.buffer_length) != 44);
6014 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6015 data.report_device_capability.sg_descriptor) != 48);
6016 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6017 data.create_operational_iq.queue_id) != 12);
6018 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6019 data.create_operational_iq.element_array_addr) != 16);
6020 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6021 data.create_operational_iq.ci_addr) != 24);
6022 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6023 data.create_operational_iq.num_elements) != 32);
6024 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6025 data.create_operational_iq.element_length) != 34);
6026 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6027 data.create_operational_iq.queue_protocol) != 36);
6028 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6029 data.create_operational_oq.queue_id) != 12);
6030 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6031 data.create_operational_oq.element_array_addr) != 16);
6032 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6033 data.create_operational_oq.pi_addr) != 24);
6034 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6035 data.create_operational_oq.num_elements) != 32);
6036 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6037 data.create_operational_oq.element_length) != 34);
6038 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6039 data.create_operational_oq.queue_protocol) != 36);
6040 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6041 data.create_operational_oq.int_msg_num) != 40);
6042 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6043 data.create_operational_oq.coalescing_count) != 42);
6044 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6045 data.create_operational_oq.min_coalescing_time) != 44);
6046 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6047 data.create_operational_oq.max_coalescing_time) != 48);
6048 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6049 data.delete_operational_queue.queue_id) != 12);
6050 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6051 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6052 data.create_operational_iq) != 64 - 11);
6053 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6054 data.create_operational_oq) != 64 - 11);
6055 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6056 data.delete_operational_queue) != 64 - 11);
6057
6058 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6059 header.iu_type) != 0);
6060 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6061 header.iu_length) != 2);
6062 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6063 header.work_area) != 6);
6064 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6065 request_id) != 8);
6066 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6067 function_code) != 10);
6068 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6069 status) != 11);
6070 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6071 data.create_operational_iq.status_descriptor) != 12);
6072 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6073 data.create_operational_iq.iq_pi_offset) != 16);
6074 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6075 data.create_operational_oq.status_descriptor) != 12);
6076 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6077 data.create_operational_oq.oq_ci_offset) != 16);
6078 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6079
6080 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6081 header.iu_type) != 0);
6082 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6083 header.iu_length) != 2);
6084 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6085 header.response_queue_id) != 4);
6086 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6087 header.work_area) != 6);
6088 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6089 request_id) != 8);
6090 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6091 nexus_id) != 10);
6092 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6093 buffer_length) != 12);
6094 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6095 lun_number) != 16);
6096 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6097 protocol_specific) != 24);
6098 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6099 error_index) != 27);
6100 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6101 cdb) != 32);
6102 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6103 sg_descriptors) != 64);
6104 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6105 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6106
6107 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6108 header.iu_type) != 0);
6109 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6110 header.iu_length) != 2);
6111 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6112 header.response_queue_id) != 4);
6113 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6114 header.work_area) != 6);
6115 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6116 request_id) != 8);
6117 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6118 nexus_id) != 12);
6119 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6120 buffer_length) != 16);
6121 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6122 data_encryption_key_index) != 22);
6123 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6124 encrypt_tweak_lower) != 24);
6125 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6126 encrypt_tweak_upper) != 28);
6127 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6128 cdb) != 32);
6129 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6130 error_index) != 48);
6131 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6132 num_sg_descriptors) != 50);
6133 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6134 cdb_length) != 51);
6135 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6136 lun_number) != 52);
6137 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6138 sg_descriptors) != 64);
6139 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6140 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6141
6142 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6143 header.iu_type) != 0);
6144 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6145 header.iu_length) != 2);
6146 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6147 request_id) != 8);
6148 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6149 error_index) != 10);
6150
6151 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6152 header.iu_type) != 0);
6153 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6154 header.iu_length) != 2);
6155 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6156 header.response_queue_id) != 4);
6157 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6158 request_id) != 8);
6159 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6160 data.report_event_configuration.buffer_length) != 12);
6161 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6162 data.report_event_configuration.sg_descriptors) != 16);
6163 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6164 data.set_event_configuration.global_event_oq_id) != 10);
6165 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6166 data.set_event_configuration.buffer_length) != 12);
6167 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6168 data.set_event_configuration.sg_descriptors) != 16);
6169
6170 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6171 max_inbound_iu_length) != 6);
6172 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6173 max_outbound_iu_length) != 14);
6174 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6175
6176 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6177 data_length) != 0);
6178 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6179 iq_arbitration_priority_support_bitmask) != 8);
6180 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6181 maximum_aw_a) != 9);
6182 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6183 maximum_aw_b) != 10);
6184 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6185 maximum_aw_c) != 11);
6186 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6187 max_inbound_queues) != 16);
6188 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6189 max_elements_per_iq) != 18);
6190 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6191 max_iq_element_length) != 24);
6192 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6193 min_iq_element_length) != 26);
6194 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6195 max_outbound_queues) != 30);
6196 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6197 max_elements_per_oq) != 32);
6198 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6199 intr_coalescing_time_granularity) != 34);
6200 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6201 max_oq_element_length) != 36);
6202 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6203 min_oq_element_length) != 38);
6204 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6205 iu_layer_descriptors) != 64);
6206 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6207
6208 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6209 event_type) != 0);
6210 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6211 oq_id) != 2);
6212 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6213
6214 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6215 num_event_descriptors) != 2);
6216 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6217 descriptors) != 4);
6218
6219 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6220 header.iu_type) != 0);
6221 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6222 header.iu_length) != 2);
6223 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6224 event_type) != 8);
6225 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6226 event_id) != 10);
6227 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6228 additional_event_id) != 12);
6229 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6230 data) != 16);
6231 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6232
6233 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6234 header.iu_type) != 0);
6235 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6236 header.iu_length) != 2);
6237 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6238 event_type) != 8);
6239 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6240 event_id) != 10);
6241 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6242 additional_event_id) != 12);
6243 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6244
6245 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6246 header.iu_type) != 0);
6247 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6248 header.iu_length) != 2);
6249 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6250 request_id) != 8);
6251 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6252 nexus_id) != 10);
6253 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6254 lun_number) != 16);
6255 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6256 protocol_specific) != 24);
6257 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6258 outbound_queue_id_to_manage) != 26);
6259 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6260 request_id_to_manage) != 28);
6261 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6262 task_management_function) != 30);
6263 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6264
6265 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6266 header.iu_type) != 0);
6267 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6268 header.iu_length) != 2);
6269 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6270 request_id) != 8);
6271 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6272 nexus_id) != 10);
6273 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6274 additional_response_info) != 12);
6275 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6276 response_code) != 15);
6277 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6278
6279 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6280 configured_logical_drive_count) != 0);
6281 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6282 configuration_signature) != 1);
6283 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6284 firmware_version) != 5);
6285 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6286 extended_logical_unit_count) != 154);
6287 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6288 firmware_build_number) != 190);
6289 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6290 controller_mode) != 292);
6291
6292 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6293 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6294 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6295 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6296 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6297 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6298 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6299 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6300 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6301 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6302 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6303 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6304
6305 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6306}