]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/smartpqi/smartpqi_init.c
scsi: smartpqi: add kdump support
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / smartpqi / smartpqi_init.c
1 /*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/cciss_ioctl.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_transport_sas.h>
33 #include <asm/unaligned.h>
34 #include "smartpqi.h"
35 #include "smartpqi_sis.h"
36
37 #if !defined(BUILD_TIMESTAMP)
38 #define BUILD_TIMESTAMP
39 #endif
40
41 #define DRIVER_VERSION "0.9.9-100"
42 #define DRIVER_MAJOR 0
43 #define DRIVER_MINOR 9
44 #define DRIVER_RELEASE 9
45 #define DRIVER_REVISION 100
46
47 #define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
48 #define DRIVER_NAME_SHORT "smartpqi"
49
50 MODULE_AUTHOR("Microsemi");
51 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
52 DRIVER_VERSION);
53 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
56
57 #define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
58
59 static char *hpe_branded_controller = "HPE Smart Array Controller";
60 static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
61
62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
63 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
64 static void pqi_scan_start(struct Scsi_Host *shost);
65 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
66 struct pqi_queue_group *queue_group, enum pqi_io_path path,
67 struct pqi_io_request *io_request);
68 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
69 struct pqi_iu_header *request, unsigned int flags,
70 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
71 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
72 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
73 unsigned int cdb_length, struct pqi_queue_group *queue_group,
74 struct pqi_encryption_info *encryption_info);
75
76 /* for flags argument to pqi_submit_raid_request_synchronous() */
77 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
78
79 static struct scsi_transport_template *pqi_sas_transport_template;
80
81 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
82
83 static int pqi_disable_device_id_wildcards;
84 module_param_named(disable_device_id_wildcards,
85 pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(disable_device_id_wildcards,
87 "Disable device ID wildcards.");
88
89 static char *raid_levels[] = {
90 "RAID-0",
91 "RAID-4",
92 "RAID-1(1+0)",
93 "RAID-5",
94 "RAID-5+1",
95 "RAID-ADG",
96 "RAID-1(ADM)",
97 };
98
99 static char *pqi_raid_level_to_string(u8 raid_level)
100 {
101 if (raid_level < ARRAY_SIZE(raid_levels))
102 return raid_levels[raid_level];
103
104 return "";
105 }
106
107 #define SA_RAID_0 0
108 #define SA_RAID_4 1
109 #define SA_RAID_1 2 /* also used for RAID 10 */
110 #define SA_RAID_5 3 /* also used for RAID 50 */
111 #define SA_RAID_51 4
112 #define SA_RAID_6 5 /* also used for RAID 60 */
113 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
114 #define SA_RAID_MAX SA_RAID_ADM
115 #define SA_RAID_UNKNOWN 0xff
116
117 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
118 {
119 scmd->scsi_done(scmd);
120 }
121
122 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
123 {
124 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
125 }
126
127 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
128 {
129 void *hostdata = shost_priv(shost);
130
131 return *((struct pqi_ctrl_info **)hostdata);
132 }
133
134 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
135 {
136 return !device->is_physical_device;
137 }
138
139 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
140 {
141 return !ctrl_info->controller_online;
142 }
143
144 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
145 {
146 if (ctrl_info->controller_online)
147 if (!sis_is_firmware_running(ctrl_info))
148 pqi_take_ctrl_offline(ctrl_info);
149 }
150
151 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
152 {
153 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
154 }
155
156 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
157 struct pqi_ctrl_info *ctrl_info)
158 {
159 return sis_read_driver_scratch(ctrl_info);
160 }
161
162 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
163 enum pqi_ctrl_mode mode)
164 {
165 sis_write_driver_scratch(ctrl_info, mode);
166 }
167
168 #define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
169
170 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
171 {
172 schedule_delayed_work(&ctrl_info->rescan_work,
173 PQI_RESCAN_WORK_INTERVAL);
174 }
175
176 static int pqi_map_single(struct pci_dev *pci_dev,
177 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
178 size_t buffer_length, int data_direction)
179 {
180 dma_addr_t bus_address;
181
182 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
183 return 0;
184
185 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
186 data_direction);
187 if (pci_dma_mapping_error(pci_dev, bus_address))
188 return -ENOMEM;
189
190 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
191 put_unaligned_le32(buffer_length, &sg_descriptor->length);
192 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
193
194 return 0;
195 }
196
197 static void pqi_pci_unmap(struct pci_dev *pci_dev,
198 struct pqi_sg_descriptor *descriptors, int num_descriptors,
199 int data_direction)
200 {
201 int i;
202
203 if (data_direction == PCI_DMA_NONE)
204 return;
205
206 for (i = 0; i < num_descriptors; i++)
207 pci_unmap_single(pci_dev,
208 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
209 get_unaligned_le32(&descriptors[i].length),
210 data_direction);
211 }
212
213 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
214 struct pqi_raid_path_request *request, u8 cmd,
215 u8 *scsi3addr, void *buffer, size_t buffer_length,
216 u16 vpd_page, int *pci_direction)
217 {
218 u8 *cdb;
219 int pci_dir;
220
221 memset(request, 0, sizeof(*request));
222
223 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
224 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
225 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
226 &request->header.iu_length);
227 put_unaligned_le32(buffer_length, &request->buffer_length);
228 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
229 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
230 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
231
232 cdb = request->cdb;
233
234 switch (cmd) {
235 case INQUIRY:
236 request->data_direction = SOP_READ_FLAG;
237 cdb[0] = INQUIRY;
238 if (vpd_page & VPD_PAGE) {
239 cdb[1] = 0x1;
240 cdb[2] = (u8)vpd_page;
241 }
242 cdb[4] = (u8)buffer_length;
243 break;
244 case CISS_REPORT_LOG:
245 case CISS_REPORT_PHYS:
246 request->data_direction = SOP_READ_FLAG;
247 cdb[0] = cmd;
248 if (cmd == CISS_REPORT_PHYS)
249 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
250 else
251 cdb[1] = CISS_REPORT_LOG_EXTENDED;
252 put_unaligned_be32(buffer_length, &cdb[6]);
253 break;
254 case CISS_GET_RAID_MAP:
255 request->data_direction = SOP_READ_FLAG;
256 cdb[0] = CISS_READ;
257 cdb[1] = CISS_GET_RAID_MAP;
258 put_unaligned_be32(buffer_length, &cdb[6]);
259 break;
260 case SA_CACHE_FLUSH:
261 request->data_direction = SOP_WRITE_FLAG;
262 cdb[0] = BMIC_WRITE;
263 cdb[6] = BMIC_CACHE_FLUSH;
264 put_unaligned_be16(buffer_length, &cdb[7]);
265 break;
266 case BMIC_IDENTIFY_CONTROLLER:
267 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
268 request->data_direction = SOP_READ_FLAG;
269 cdb[0] = BMIC_READ;
270 cdb[6] = cmd;
271 put_unaligned_be16(buffer_length, &cdb[7]);
272 break;
273 case BMIC_WRITE_HOST_WELLNESS:
274 request->data_direction = SOP_WRITE_FLAG;
275 cdb[0] = BMIC_WRITE;
276 cdb[6] = cmd;
277 put_unaligned_be16(buffer_length, &cdb[7]);
278 break;
279 default:
280 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
281 cmd);
282 WARN_ON(cmd);
283 break;
284 }
285
286 switch (request->data_direction) {
287 case SOP_READ_FLAG:
288 pci_dir = PCI_DMA_FROMDEVICE;
289 break;
290 case SOP_WRITE_FLAG:
291 pci_dir = PCI_DMA_TODEVICE;
292 break;
293 case SOP_NO_DIRECTION_FLAG:
294 pci_dir = PCI_DMA_NONE;
295 break;
296 default:
297 pci_dir = PCI_DMA_BIDIRECTIONAL;
298 break;
299 }
300
301 *pci_direction = pci_dir;
302
303 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
304 buffer, buffer_length, pci_dir);
305 }
306
307 static struct pqi_io_request *pqi_alloc_io_request(
308 struct pqi_ctrl_info *ctrl_info)
309 {
310 struct pqi_io_request *io_request;
311 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
312
313 while (1) {
314 io_request = &ctrl_info->io_request_pool[i];
315 if (atomic_inc_return(&io_request->refcount) == 1)
316 break;
317 atomic_dec(&io_request->refcount);
318 i = (i + 1) % ctrl_info->max_io_slots;
319 }
320
321 /* benignly racy */
322 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
323
324 io_request->scmd = NULL;
325 io_request->status = 0;
326 io_request->error_info = NULL;
327
328 return io_request;
329 }
330
331 static void pqi_free_io_request(struct pqi_io_request *io_request)
332 {
333 atomic_dec(&io_request->refcount);
334 }
335
336 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
337 struct bmic_identify_controller *buffer)
338 {
339 int rc;
340 int pci_direction;
341 struct pqi_raid_path_request request;
342
343 rc = pqi_build_raid_path_request(ctrl_info, &request,
344 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
345 sizeof(*buffer), 0, &pci_direction);
346 if (rc)
347 return rc;
348
349 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
350 NULL, NO_TIMEOUT);
351
352 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
353 pci_direction);
354
355 return rc;
356 }
357
358 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
359 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
360 {
361 int rc;
362 int pci_direction;
363 struct pqi_raid_path_request request;
364
365 rc = pqi_build_raid_path_request(ctrl_info, &request,
366 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
367 &pci_direction);
368 if (rc)
369 return rc;
370
371 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
372 NULL, NO_TIMEOUT);
373
374 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
375 pci_direction);
376
377 return rc;
378 }
379
380 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
381 struct pqi_scsi_dev *device,
382 struct bmic_identify_physical_device *buffer,
383 size_t buffer_length)
384 {
385 int rc;
386 int pci_direction;
387 u16 bmic_device_index;
388 struct pqi_raid_path_request request;
389
390 rc = pqi_build_raid_path_request(ctrl_info, &request,
391 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
392 buffer_length, 0, &pci_direction);
393 if (rc)
394 return rc;
395
396 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
397 request.cdb[2] = (u8)bmic_device_index;
398 request.cdb[9] = (u8)(bmic_device_index >> 8);
399
400 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
401 0, NULL, NO_TIMEOUT);
402
403 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
404 pci_direction);
405
406 return rc;
407 }
408
409 #define SA_CACHE_FLUSH_BUFFER_LENGTH 4
410 #define PQI_FLUSH_CACHE_TIMEOUT (30 * 1000)
411
412 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
413 {
414 int rc;
415 struct pqi_raid_path_request request;
416 int pci_direction;
417 u8 *buffer;
418
419 /*
420 * Don't bother trying to flush the cache if the controller is
421 * locked up.
422 */
423 if (pqi_ctrl_offline(ctrl_info))
424 return -ENXIO;
425
426 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
427 if (!buffer)
428 return -ENOMEM;
429
430 rc = pqi_build_raid_path_request(ctrl_info, &request,
431 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
432 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
433 if (rc)
434 goto out;
435
436 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
437 0, NULL, PQI_FLUSH_CACHE_TIMEOUT);
438
439 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
440 pci_direction);
441
442 out:
443 kfree(buffer);
444
445 return rc;
446 }
447
448 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
449 void *buffer, size_t buffer_length)
450 {
451 int rc;
452 struct pqi_raid_path_request request;
453 int pci_direction;
454
455 rc = pqi_build_raid_path_request(ctrl_info, &request,
456 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
457 buffer_length, 0, &pci_direction);
458 if (rc)
459 return rc;
460
461 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
462 0, NULL, NO_TIMEOUT);
463
464 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
465 pci_direction);
466
467 return rc;
468 }
469
470 #pragma pack(1)
471
472 struct bmic_host_wellness_driver_version {
473 u8 start_tag[4];
474 u8 driver_version_tag[2];
475 __le16 driver_version_length;
476 char driver_version[32];
477 u8 end_tag[2];
478 };
479
480 #pragma pack()
481
482 static int pqi_write_driver_version_to_host_wellness(
483 struct pqi_ctrl_info *ctrl_info)
484 {
485 int rc;
486 struct bmic_host_wellness_driver_version *buffer;
487 size_t buffer_length;
488
489 buffer_length = sizeof(*buffer);
490
491 buffer = kmalloc(buffer_length, GFP_KERNEL);
492 if (!buffer)
493 return -ENOMEM;
494
495 buffer->start_tag[0] = '<';
496 buffer->start_tag[1] = 'H';
497 buffer->start_tag[2] = 'W';
498 buffer->start_tag[3] = '>';
499 buffer->driver_version_tag[0] = 'D';
500 buffer->driver_version_tag[1] = 'V';
501 put_unaligned_le16(sizeof(buffer->driver_version),
502 &buffer->driver_version_length);
503 strncpy(buffer->driver_version, DRIVER_VERSION,
504 sizeof(buffer->driver_version) - 1);
505 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
506 buffer->end_tag[0] = 'Z';
507 buffer->end_tag[1] = 'Z';
508
509 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
510
511 kfree(buffer);
512
513 return rc;
514 }
515
516 #pragma pack(1)
517
518 struct bmic_host_wellness_time {
519 u8 start_tag[4];
520 u8 time_tag[2];
521 __le16 time_length;
522 u8 time[8];
523 u8 dont_write_tag[2];
524 u8 end_tag[2];
525 };
526
527 #pragma pack()
528
529 static int pqi_write_current_time_to_host_wellness(
530 struct pqi_ctrl_info *ctrl_info)
531 {
532 int rc;
533 struct bmic_host_wellness_time *buffer;
534 size_t buffer_length;
535 time64_t local_time;
536 unsigned int year;
537 struct timeval time;
538 struct rtc_time tm;
539
540 buffer_length = sizeof(*buffer);
541
542 buffer = kmalloc(buffer_length, GFP_KERNEL);
543 if (!buffer)
544 return -ENOMEM;
545
546 buffer->start_tag[0] = '<';
547 buffer->start_tag[1] = 'H';
548 buffer->start_tag[2] = 'W';
549 buffer->start_tag[3] = '>';
550 buffer->time_tag[0] = 'T';
551 buffer->time_tag[1] = 'D';
552 put_unaligned_le16(sizeof(buffer->time),
553 &buffer->time_length);
554
555 do_gettimeofday(&time);
556 local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
557 rtc_time64_to_tm(local_time, &tm);
558 year = tm.tm_year + 1900;
559
560 buffer->time[0] = bin2bcd(tm.tm_hour);
561 buffer->time[1] = bin2bcd(tm.tm_min);
562 buffer->time[2] = bin2bcd(tm.tm_sec);
563 buffer->time[3] = 0;
564 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
565 buffer->time[5] = bin2bcd(tm.tm_mday);
566 buffer->time[6] = bin2bcd(year / 100);
567 buffer->time[7] = bin2bcd(year % 100);
568
569 buffer->dont_write_tag[0] = 'D';
570 buffer->dont_write_tag[1] = 'W';
571 buffer->end_tag[0] = 'Z';
572 buffer->end_tag[1] = 'Z';
573
574 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
575
576 kfree(buffer);
577
578 return rc;
579 }
580
581 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
582
583 static void pqi_update_time_worker(struct work_struct *work)
584 {
585 int rc;
586 struct pqi_ctrl_info *ctrl_info;
587
588 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
589 update_time_work);
590
591 if (!ctrl_info) {
592 printk("%s: NULL controller pointer.\n", __func__);
593 return;
594 }
595 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
596 if (rc)
597 dev_warn(&ctrl_info->pci_dev->dev,
598 "error updating time on controller\n");
599
600 schedule_delayed_work(&ctrl_info->update_time_work,
601 PQI_UPDATE_TIME_WORK_INTERVAL);
602 }
603
604 static inline void pqi_schedule_update_time_worker(
605 struct pqi_ctrl_info *ctrl_info)
606 {
607 schedule_delayed_work(&ctrl_info->update_time_work, 120);
608 }
609
610 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
611 void *buffer, size_t buffer_length)
612 {
613 int rc;
614 int pci_direction;
615 struct pqi_raid_path_request request;
616
617 rc = pqi_build_raid_path_request(ctrl_info, &request,
618 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
619 if (rc)
620 return rc;
621
622 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
623 NULL, NO_TIMEOUT);
624
625 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
626 pci_direction);
627
628 return rc;
629 }
630
631 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
632 void **buffer)
633 {
634 int rc;
635 size_t lun_list_length;
636 size_t lun_data_length;
637 size_t new_lun_list_length;
638 void *lun_data = NULL;
639 struct report_lun_header *report_lun_header;
640
641 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
642 if (!report_lun_header) {
643 rc = -ENOMEM;
644 goto out;
645 }
646
647 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
648 sizeof(*report_lun_header));
649 if (rc)
650 goto out;
651
652 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
653
654 again:
655 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
656
657 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
658 if (!lun_data) {
659 rc = -ENOMEM;
660 goto out;
661 }
662
663 if (lun_list_length == 0) {
664 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
665 goto out;
666 }
667
668 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
669 if (rc)
670 goto out;
671
672 new_lun_list_length = get_unaligned_be32(
673 &((struct report_lun_header *)lun_data)->list_length);
674
675 if (new_lun_list_length > lun_list_length) {
676 lun_list_length = new_lun_list_length;
677 kfree(lun_data);
678 goto again;
679 }
680
681 out:
682 kfree(report_lun_header);
683
684 if (rc) {
685 kfree(lun_data);
686 lun_data = NULL;
687 }
688
689 *buffer = lun_data;
690
691 return rc;
692 }
693
694 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
695 void **buffer)
696 {
697 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
698 buffer);
699 }
700
701 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
702 void **buffer)
703 {
704 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
705 }
706
707 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
708 struct report_phys_lun_extended **physdev_list,
709 struct report_log_lun_extended **logdev_list)
710 {
711 int rc;
712 size_t logdev_list_length;
713 size_t logdev_data_length;
714 struct report_log_lun_extended *internal_logdev_list;
715 struct report_log_lun_extended *logdev_data;
716 struct report_lun_header report_lun_header;
717
718 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
719 if (rc)
720 dev_err(&ctrl_info->pci_dev->dev,
721 "report physical LUNs failed\n");
722
723 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
724 if (rc)
725 dev_err(&ctrl_info->pci_dev->dev,
726 "report logical LUNs failed\n");
727
728 /*
729 * Tack the controller itself onto the end of the logical device list.
730 */
731
732 logdev_data = *logdev_list;
733
734 if (logdev_data) {
735 logdev_list_length =
736 get_unaligned_be32(&logdev_data->header.list_length);
737 } else {
738 memset(&report_lun_header, 0, sizeof(report_lun_header));
739 logdev_data =
740 (struct report_log_lun_extended *)&report_lun_header;
741 logdev_list_length = 0;
742 }
743
744 logdev_data_length = sizeof(struct report_lun_header) +
745 logdev_list_length;
746
747 internal_logdev_list = kmalloc(logdev_data_length +
748 sizeof(struct report_log_lun_extended), GFP_KERNEL);
749 if (!internal_logdev_list) {
750 kfree(*logdev_list);
751 *logdev_list = NULL;
752 return -ENOMEM;
753 }
754
755 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
756 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
757 sizeof(struct report_log_lun_extended_entry));
758 put_unaligned_be32(logdev_list_length +
759 sizeof(struct report_log_lun_extended_entry),
760 &internal_logdev_list->header.list_length);
761
762 kfree(*logdev_list);
763 *logdev_list = internal_logdev_list;
764
765 return 0;
766 }
767
768 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
769 int bus, int target, int lun)
770 {
771 device->bus = bus;
772 device->target = target;
773 device->lun = lun;
774 }
775
776 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
777 {
778 u8 *scsi3addr;
779 u32 lunid;
780
781 scsi3addr = device->scsi3addr;
782 lunid = get_unaligned_le32(scsi3addr);
783
784 if (pqi_is_hba_lunid(scsi3addr)) {
785 /* The specified device is the controller. */
786 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
787 device->target_lun_valid = true;
788 return;
789 }
790
791 if (pqi_is_logical_device(device)) {
792 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
793 lunid & 0x3fff);
794 device->target_lun_valid = true;
795 return;
796 }
797
798 /*
799 * Defer target and LUN assignment for non-controller physical devices
800 * because the SAS transport layer will make these assignments later.
801 */
802 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
803 }
804
805 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
806 struct pqi_scsi_dev *device)
807 {
808 int rc;
809 u8 raid_level;
810 u8 *buffer;
811
812 raid_level = SA_RAID_UNKNOWN;
813
814 buffer = kmalloc(64, GFP_KERNEL);
815 if (buffer) {
816 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
817 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
818 if (rc == 0) {
819 raid_level = buffer[8];
820 if (raid_level > SA_RAID_MAX)
821 raid_level = SA_RAID_UNKNOWN;
822 }
823 kfree(buffer);
824 }
825
826 device->raid_level = raid_level;
827 }
828
829 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
830 struct pqi_scsi_dev *device, struct raid_map *raid_map)
831 {
832 char *err_msg;
833 u32 raid_map_size;
834 u32 r5or6_blocks_per_row;
835 unsigned int num_phys_disks;
836 unsigned int num_raid_map_entries;
837
838 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
839
840 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
841 err_msg = "RAID map too small";
842 goto bad_raid_map;
843 }
844
845 if (raid_map_size > sizeof(*raid_map)) {
846 err_msg = "RAID map too large";
847 goto bad_raid_map;
848 }
849
850 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
851 (get_unaligned_le16(&raid_map->data_disks_per_row) +
852 get_unaligned_le16(&raid_map->metadata_disks_per_row));
853 num_raid_map_entries = num_phys_disks *
854 get_unaligned_le16(&raid_map->row_cnt);
855
856 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
857 err_msg = "invalid number of map entries in RAID map";
858 goto bad_raid_map;
859 }
860
861 if (device->raid_level == SA_RAID_1) {
862 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
863 err_msg = "invalid RAID-1 map";
864 goto bad_raid_map;
865 }
866 } else if (device->raid_level == SA_RAID_ADM) {
867 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
868 err_msg = "invalid RAID-1(ADM) map";
869 goto bad_raid_map;
870 }
871 } else if ((device->raid_level == SA_RAID_5 ||
872 device->raid_level == SA_RAID_6) &&
873 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
874 /* RAID 50/60 */
875 r5or6_blocks_per_row =
876 get_unaligned_le16(&raid_map->strip_size) *
877 get_unaligned_le16(&raid_map->data_disks_per_row);
878 if (r5or6_blocks_per_row == 0) {
879 err_msg = "invalid RAID-5 or RAID-6 map";
880 goto bad_raid_map;
881 }
882 }
883
884 return 0;
885
886 bad_raid_map:
887 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
888
889 return -EINVAL;
890 }
891
892 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
893 struct pqi_scsi_dev *device)
894 {
895 int rc;
896 int pci_direction;
897 struct pqi_raid_path_request request;
898 struct raid_map *raid_map;
899
900 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
901 if (!raid_map)
902 return -ENOMEM;
903
904 rc = pqi_build_raid_path_request(ctrl_info, &request,
905 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
906 sizeof(*raid_map), 0, &pci_direction);
907 if (rc)
908 goto error;
909
910 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
911 NULL, NO_TIMEOUT);
912
913 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
914 pci_direction);
915
916 if (rc)
917 goto error;
918
919 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
920 if (rc)
921 goto error;
922
923 device->raid_map = raid_map;
924
925 return 0;
926
927 error:
928 kfree(raid_map);
929
930 return rc;
931 }
932
933 static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
934 struct pqi_scsi_dev *device)
935 {
936 int rc;
937 u8 *buffer;
938 u8 offload_status;
939
940 buffer = kmalloc(64, GFP_KERNEL);
941 if (!buffer)
942 return;
943
944 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
945 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
946 if (rc)
947 goto out;
948
949 #define OFFLOAD_STATUS_BYTE 4
950 #define OFFLOAD_CONFIGURED_BIT 0x1
951 #define OFFLOAD_ENABLED_BIT 0x2
952
953 offload_status = buffer[OFFLOAD_STATUS_BYTE];
954 device->offload_configured =
955 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
956 if (device->offload_configured) {
957 device->offload_enabled_pending =
958 !!(offload_status & OFFLOAD_ENABLED_BIT);
959 if (pqi_get_raid_map(ctrl_info, device))
960 device->offload_enabled_pending = false;
961 }
962
963 out:
964 kfree(buffer);
965 }
966
967 /*
968 * Use vendor-specific VPD to determine online/offline status of a volume.
969 */
970
971 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
972 struct pqi_scsi_dev *device)
973 {
974 int rc;
975 size_t page_length;
976 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
977 bool volume_offline = true;
978 u32 volume_flags;
979 struct ciss_vpd_logical_volume_status *vpd;
980
981 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
982 if (!vpd)
983 goto no_buffer;
984
985 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
986 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
987 if (rc)
988 goto out;
989
990 page_length = offsetof(struct ciss_vpd_logical_volume_status,
991 volume_status) + vpd->page_length;
992 if (page_length < sizeof(*vpd))
993 goto out;
994
995 volume_status = vpd->volume_status;
996 volume_flags = get_unaligned_be32(&vpd->flags);
997 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
998
999 out:
1000 kfree(vpd);
1001 no_buffer:
1002 device->volume_status = volume_status;
1003 device->volume_offline = volume_offline;
1004 }
1005
1006 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1007 struct pqi_scsi_dev *device)
1008 {
1009 int rc;
1010 u8 *buffer;
1011
1012 buffer = kmalloc(64, GFP_KERNEL);
1013 if (!buffer)
1014 return -ENOMEM;
1015
1016 /* Send an inquiry to the device to see what it is. */
1017 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1018 if (rc)
1019 goto out;
1020
1021 scsi_sanitize_inquiry_string(&buffer[8], 8);
1022 scsi_sanitize_inquiry_string(&buffer[16], 16);
1023
1024 device->devtype = buffer[0] & 0x1f;
1025 memcpy(device->vendor, &buffer[8],
1026 sizeof(device->vendor));
1027 memcpy(device->model, &buffer[16],
1028 sizeof(device->model));
1029
1030 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1031 pqi_get_raid_level(ctrl_info, device);
1032 pqi_get_offload_status(ctrl_info, device);
1033 pqi_get_volume_status(ctrl_info, device);
1034 }
1035
1036 out:
1037 kfree(buffer);
1038
1039 return rc;
1040 }
1041
1042 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1043 struct pqi_scsi_dev *device,
1044 struct bmic_identify_physical_device *id_phys)
1045 {
1046 int rc;
1047
1048 memset(id_phys, 0, sizeof(*id_phys));
1049
1050 rc = pqi_identify_physical_device(ctrl_info, device,
1051 id_phys, sizeof(*id_phys));
1052 if (rc) {
1053 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1054 return;
1055 }
1056
1057 device->queue_depth =
1058 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1059 device->device_type = id_phys->device_type;
1060 device->active_path_index = id_phys->active_path_number;
1061 device->path_map = id_phys->redundant_path_present_map;
1062 memcpy(&device->box,
1063 &id_phys->alternate_paths_phys_box_on_port,
1064 sizeof(device->box));
1065 memcpy(&device->phys_connector,
1066 &id_phys->alternate_paths_phys_connector,
1067 sizeof(device->phys_connector));
1068 device->bay = id_phys->phys_bay_in_box;
1069 }
1070
1071 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1072 struct pqi_scsi_dev *device)
1073 {
1074 char *status;
1075 static const char unknown_state_str[] =
1076 "Volume is in an unknown state (%u)";
1077 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1078
1079 switch (device->volume_status) {
1080 case CISS_LV_OK:
1081 status = "Volume online";
1082 break;
1083 case CISS_LV_FAILED:
1084 status = "Volume failed";
1085 break;
1086 case CISS_LV_NOT_CONFIGURED:
1087 status = "Volume not configured";
1088 break;
1089 case CISS_LV_DEGRADED:
1090 status = "Volume degraded";
1091 break;
1092 case CISS_LV_READY_FOR_RECOVERY:
1093 status = "Volume ready for recovery operation";
1094 break;
1095 case CISS_LV_UNDERGOING_RECOVERY:
1096 status = "Volume undergoing recovery";
1097 break;
1098 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1099 status = "Wrong physical drive was replaced";
1100 break;
1101 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1102 status = "A physical drive not properly connected";
1103 break;
1104 case CISS_LV_HARDWARE_OVERHEATING:
1105 status = "Hardware is overheating";
1106 break;
1107 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1108 status = "Hardware has overheated";
1109 break;
1110 case CISS_LV_UNDERGOING_EXPANSION:
1111 status = "Volume undergoing expansion";
1112 break;
1113 case CISS_LV_NOT_AVAILABLE:
1114 status = "Volume waiting for transforming volume";
1115 break;
1116 case CISS_LV_QUEUED_FOR_EXPANSION:
1117 status = "Volume queued for expansion";
1118 break;
1119 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1120 status = "Volume disabled due to SCSI ID conflict";
1121 break;
1122 case CISS_LV_EJECTED:
1123 status = "Volume has been ejected";
1124 break;
1125 case CISS_LV_UNDERGOING_ERASE:
1126 status = "Volume undergoing background erase";
1127 break;
1128 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1129 status = "Volume ready for predictive spare rebuild";
1130 break;
1131 case CISS_LV_UNDERGOING_RPI:
1132 status = "Volume undergoing rapid parity initialization";
1133 break;
1134 case CISS_LV_PENDING_RPI:
1135 status = "Volume queued for rapid parity initialization";
1136 break;
1137 case CISS_LV_ENCRYPTED_NO_KEY:
1138 status = "Encrypted volume inaccessible - key not present";
1139 break;
1140 case CISS_LV_UNDERGOING_ENCRYPTION:
1141 status = "Volume undergoing encryption process";
1142 break;
1143 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1144 status = "Volume undergoing encryption re-keying process";
1145 break;
1146 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1147 status =
1148 "Encrypted volume inaccessible - disabled on ctrl";
1149 break;
1150 case CISS_LV_PENDING_ENCRYPTION:
1151 status = "Volume pending migration to encrypted state";
1152 break;
1153 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1154 status = "Volume pending encryption rekeying";
1155 break;
1156 case CISS_LV_NOT_SUPPORTED:
1157 status = "Volume not supported on this controller";
1158 break;
1159 case CISS_LV_STATUS_UNAVAILABLE:
1160 status = "Volume status not available";
1161 break;
1162 default:
1163 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1164 unknown_state_str, device->volume_status);
1165 status = unknown_state_buffer;
1166 break;
1167 }
1168
1169 dev_info(&ctrl_info->pci_dev->dev,
1170 "scsi %d:%d:%d:%d %s\n",
1171 ctrl_info->scsi_host->host_no,
1172 device->bus, device->target, device->lun, status);
1173 }
1174
1175 static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1176 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1177 {
1178 struct pqi_scsi_dev *device;
1179
1180 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1181 scsi_device_list_entry) {
1182 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1183 continue;
1184 if (pqi_is_logical_device(device))
1185 continue;
1186 if (device->aio_handle == aio_handle)
1187 return device;
1188 }
1189
1190 return NULL;
1191 }
1192
1193 static void pqi_update_logical_drive_queue_depth(
1194 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1195 {
1196 unsigned int i;
1197 struct raid_map *raid_map;
1198 struct raid_map_disk_data *disk_data;
1199 struct pqi_scsi_dev *phys_disk;
1200 unsigned int num_phys_disks;
1201 unsigned int num_raid_map_entries;
1202 unsigned int queue_depth;
1203
1204 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1205
1206 raid_map = logical_drive->raid_map;
1207 if (!raid_map)
1208 return;
1209
1210 disk_data = raid_map->disk_data;
1211 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1212 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1213 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1214 num_raid_map_entries = num_phys_disks *
1215 get_unaligned_le16(&raid_map->row_cnt);
1216
1217 queue_depth = 0;
1218 for (i = 0; i < num_raid_map_entries; i++) {
1219 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1220 disk_data[i].aio_handle);
1221
1222 if (!phys_disk) {
1223 dev_warn(&ctrl_info->pci_dev->dev,
1224 "failed to find physical disk for logical drive %016llx\n",
1225 get_unaligned_be64(logical_drive->scsi3addr));
1226 logical_drive->offload_enabled = false;
1227 logical_drive->offload_enabled_pending = false;
1228 kfree(raid_map);
1229 logical_drive->raid_map = NULL;
1230 return;
1231 }
1232
1233 queue_depth += phys_disk->queue_depth;
1234 }
1235
1236 logical_drive->queue_depth = queue_depth;
1237 }
1238
1239 static void pqi_update_all_logical_drive_queue_depths(
1240 struct pqi_ctrl_info *ctrl_info)
1241 {
1242 struct pqi_scsi_dev *device;
1243
1244 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1245 scsi_device_list_entry) {
1246 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1247 continue;
1248 if (!pqi_is_logical_device(device))
1249 continue;
1250 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1251 }
1252 }
1253
1254 static void pqi_rescan_worker(struct work_struct *work)
1255 {
1256 struct pqi_ctrl_info *ctrl_info;
1257
1258 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1259 rescan_work);
1260
1261 pqi_scan_scsi_devices(ctrl_info);
1262 }
1263
1264 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1265 struct pqi_scsi_dev *device)
1266 {
1267 int rc;
1268
1269 if (pqi_is_logical_device(device))
1270 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1271 device->target, device->lun);
1272 else
1273 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1274
1275 return rc;
1276 }
1277
1278 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1279 struct pqi_scsi_dev *device)
1280 {
1281 if (pqi_is_logical_device(device))
1282 scsi_remove_device(device->sdev);
1283 else
1284 pqi_remove_sas_device(device);
1285 }
1286
1287 /* Assumes the SCSI device list lock is held. */
1288
1289 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1290 int bus, int target, int lun)
1291 {
1292 struct pqi_scsi_dev *device;
1293
1294 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1295 scsi_device_list_entry)
1296 if (device->bus == bus && device->target == target &&
1297 device->lun == lun)
1298 return device;
1299
1300 return NULL;
1301 }
1302
1303 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1304 struct pqi_scsi_dev *dev2)
1305 {
1306 if (dev1->is_physical_device != dev2->is_physical_device)
1307 return false;
1308
1309 if (dev1->is_physical_device)
1310 return dev1->wwid == dev2->wwid;
1311
1312 return memcmp(dev1->volume_id, dev2->volume_id,
1313 sizeof(dev1->volume_id)) == 0;
1314 }
1315
1316 enum pqi_find_result {
1317 DEVICE_NOT_FOUND,
1318 DEVICE_CHANGED,
1319 DEVICE_SAME,
1320 };
1321
1322 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1323 struct pqi_scsi_dev *device_to_find,
1324 struct pqi_scsi_dev **matching_device)
1325 {
1326 struct pqi_scsi_dev *device;
1327
1328 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1329 scsi_device_list_entry) {
1330 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1331 device->scsi3addr)) {
1332 *matching_device = device;
1333 if (pqi_device_equal(device_to_find, device)) {
1334 if (device_to_find->volume_offline)
1335 return DEVICE_CHANGED;
1336 return DEVICE_SAME;
1337 }
1338 return DEVICE_CHANGED;
1339 }
1340 }
1341
1342 return DEVICE_NOT_FOUND;
1343 }
1344
1345 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1346 char *action, struct pqi_scsi_dev *device)
1347 {
1348 dev_info(&ctrl_info->pci_dev->dev,
1349 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1350 action,
1351 ctrl_info->scsi_host->host_no,
1352 device->bus,
1353 device->target,
1354 device->lun,
1355 scsi_device_type(device->devtype),
1356 device->vendor,
1357 device->model,
1358 pqi_raid_level_to_string(device->raid_level),
1359 device->offload_configured ? '+' : '-',
1360 device->offload_enabled_pending ? '+' : '-',
1361 device->expose_device ? '+' : '-',
1362 device->queue_depth);
1363 }
1364
1365 /* Assumes the SCSI device list lock is held. */
1366
1367 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1368 struct pqi_scsi_dev *new_device)
1369 {
1370 existing_device->devtype = new_device->devtype;
1371 existing_device->device_type = new_device->device_type;
1372 existing_device->bus = new_device->bus;
1373 if (new_device->target_lun_valid) {
1374 existing_device->target = new_device->target;
1375 existing_device->lun = new_device->lun;
1376 existing_device->target_lun_valid = true;
1377 }
1378
1379 /* By definition, the scsi3addr and wwid fields are already the same. */
1380
1381 existing_device->is_physical_device = new_device->is_physical_device;
1382 existing_device->expose_device = new_device->expose_device;
1383 existing_device->no_uld_attach = new_device->no_uld_attach;
1384 existing_device->aio_enabled = new_device->aio_enabled;
1385 memcpy(existing_device->vendor, new_device->vendor,
1386 sizeof(existing_device->vendor));
1387 memcpy(existing_device->model, new_device->model,
1388 sizeof(existing_device->model));
1389 existing_device->sas_address = new_device->sas_address;
1390 existing_device->raid_level = new_device->raid_level;
1391 existing_device->queue_depth = new_device->queue_depth;
1392 existing_device->aio_handle = new_device->aio_handle;
1393 existing_device->volume_status = new_device->volume_status;
1394 existing_device->active_path_index = new_device->active_path_index;
1395 existing_device->path_map = new_device->path_map;
1396 existing_device->bay = new_device->bay;
1397 memcpy(existing_device->box, new_device->box,
1398 sizeof(existing_device->box));
1399 memcpy(existing_device->phys_connector, new_device->phys_connector,
1400 sizeof(existing_device->phys_connector));
1401 existing_device->offload_configured = new_device->offload_configured;
1402 existing_device->offload_enabled = false;
1403 existing_device->offload_enabled_pending =
1404 new_device->offload_enabled_pending;
1405 existing_device->offload_to_mirror = 0;
1406 kfree(existing_device->raid_map);
1407 existing_device->raid_map = new_device->raid_map;
1408
1409 /* To prevent this from being freed later. */
1410 new_device->raid_map = NULL;
1411 }
1412
1413 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1414 {
1415 if (device) {
1416 kfree(device->raid_map);
1417 kfree(device);
1418 }
1419 }
1420
1421 /*
1422 * Called when exposing a new device to the OS fails in order to re-adjust
1423 * our internal SCSI device list to match the SCSI ML's view.
1424 */
1425
1426 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1427 struct pqi_scsi_dev *device)
1428 {
1429 unsigned long flags;
1430
1431 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1432 list_del(&device->scsi_device_list_entry);
1433 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1434
1435 /* Allow the device structure to be freed later. */
1436 device->keep_device = false;
1437 }
1438
1439 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1440 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1441 {
1442 int rc;
1443 unsigned int i;
1444 unsigned long flags;
1445 enum pqi_find_result find_result;
1446 struct pqi_scsi_dev *device;
1447 struct pqi_scsi_dev *next;
1448 struct pqi_scsi_dev *matching_device;
1449 struct list_head add_list;
1450 struct list_head delete_list;
1451
1452 INIT_LIST_HEAD(&add_list);
1453 INIT_LIST_HEAD(&delete_list);
1454
1455 /*
1456 * The idea here is to do as little work as possible while holding the
1457 * spinlock. That's why we go to great pains to defer anything other
1458 * than updating the internal device list until after we release the
1459 * spinlock.
1460 */
1461
1462 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1463
1464 /* Assume that all devices in the existing list have gone away. */
1465 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1466 scsi_device_list_entry)
1467 device->device_gone = true;
1468
1469 for (i = 0; i < num_new_devices; i++) {
1470 device = new_device_list[i];
1471
1472 find_result = pqi_scsi_find_entry(ctrl_info, device,
1473 &matching_device);
1474
1475 switch (find_result) {
1476 case DEVICE_SAME:
1477 /*
1478 * The newly found device is already in the existing
1479 * device list.
1480 */
1481 device->new_device = false;
1482 matching_device->device_gone = false;
1483 pqi_scsi_update_device(matching_device, device);
1484 break;
1485 case DEVICE_NOT_FOUND:
1486 /*
1487 * The newly found device is NOT in the existing device
1488 * list.
1489 */
1490 device->new_device = true;
1491 break;
1492 case DEVICE_CHANGED:
1493 /*
1494 * The original device has gone away and we need to add
1495 * the new device.
1496 */
1497 device->new_device = true;
1498 break;
1499 default:
1500 WARN_ON(find_result);
1501 break;
1502 }
1503 }
1504
1505 /* Process all devices that have gone away. */
1506 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1507 scsi_device_list_entry) {
1508 if (device->device_gone) {
1509 list_del(&device->scsi_device_list_entry);
1510 list_add_tail(&device->delete_list_entry, &delete_list);
1511 }
1512 }
1513
1514 /* Process all new devices. */
1515 for (i = 0; i < num_new_devices; i++) {
1516 device = new_device_list[i];
1517 if (!device->new_device)
1518 continue;
1519 if (device->volume_offline)
1520 continue;
1521 list_add_tail(&device->scsi_device_list_entry,
1522 &ctrl_info->scsi_device_list);
1523 list_add_tail(&device->add_list_entry, &add_list);
1524 /* To prevent this device structure from being freed later. */
1525 device->keep_device = true;
1526 }
1527
1528 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1529
1530 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1531 scsi_device_list_entry)
1532 device->offload_enabled =
1533 device->offload_enabled_pending;
1534
1535 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1536
1537 /* Remove all devices that have gone away. */
1538 list_for_each_entry_safe(device, next, &delete_list,
1539 delete_list_entry) {
1540 if (device->sdev)
1541 pqi_remove_device(ctrl_info, device);
1542 if (device->volume_offline) {
1543 pqi_dev_info(ctrl_info, "offline", device);
1544 pqi_show_volume_status(ctrl_info, device);
1545 } else {
1546 pqi_dev_info(ctrl_info, "removed", device);
1547 }
1548 list_del(&device->delete_list_entry);
1549 pqi_free_device(device);
1550 }
1551
1552 /*
1553 * Notify the SCSI ML if the queue depth of any existing device has
1554 * changed.
1555 */
1556 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1557 scsi_device_list_entry) {
1558 if (device->sdev && device->queue_depth !=
1559 device->advertised_queue_depth) {
1560 device->advertised_queue_depth = device->queue_depth;
1561 scsi_change_queue_depth(device->sdev,
1562 device->advertised_queue_depth);
1563 }
1564 }
1565
1566 /* Expose any new devices. */
1567 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1568 if (device->expose_device && !device->sdev) {
1569 rc = pqi_add_device(ctrl_info, device);
1570 if (rc) {
1571 dev_warn(&ctrl_info->pci_dev->dev,
1572 "scsi %d:%d:%d:%d addition failed, device not added\n",
1573 ctrl_info->scsi_host->host_no,
1574 device->bus, device->target,
1575 device->lun);
1576 pqi_fixup_botched_add(ctrl_info, device);
1577 continue;
1578 }
1579 }
1580 pqi_dev_info(ctrl_info, "added", device);
1581 }
1582 }
1583
1584 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1585 {
1586 bool is_supported = false;
1587
1588 switch (device->devtype) {
1589 case TYPE_DISK:
1590 case TYPE_ZBC:
1591 case TYPE_TAPE:
1592 case TYPE_MEDIUM_CHANGER:
1593 case TYPE_ENCLOSURE:
1594 is_supported = true;
1595 break;
1596 case TYPE_RAID:
1597 /*
1598 * Only support the HBA controller itself as a RAID
1599 * controller. If it's a RAID controller other than
1600 * the HBA itself (an external RAID controller, MSA500
1601 * or similar), we don't support it.
1602 */
1603 if (pqi_is_hba_lunid(device->scsi3addr))
1604 is_supported = true;
1605 break;
1606 }
1607
1608 return is_supported;
1609 }
1610
1611 static inline bool pqi_skip_device(u8 *scsi3addr,
1612 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1613 {
1614 u8 device_flags;
1615
1616 if (!MASKED_DEVICE(scsi3addr))
1617 return false;
1618
1619 /* The device is masked. */
1620
1621 device_flags = phys_lun_ext_entry->device_flags;
1622
1623 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1624 /*
1625 * It's a non-disk device. We ignore all devices of this type
1626 * when they're masked.
1627 */
1628 return true;
1629 }
1630
1631 return false;
1632 }
1633
1634 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1635 {
1636 /* Expose all devices except for physical devices that are masked. */
1637 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1638 return false;
1639
1640 return true;
1641 }
1642
1643 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1644 {
1645 int i;
1646 int rc;
1647 struct list_head new_device_list_head;
1648 struct report_phys_lun_extended *physdev_list = NULL;
1649 struct report_log_lun_extended *logdev_list = NULL;
1650 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1651 struct report_log_lun_extended_entry *log_lun_ext_entry;
1652 struct bmic_identify_physical_device *id_phys = NULL;
1653 u32 num_physicals;
1654 u32 num_logicals;
1655 struct pqi_scsi_dev **new_device_list = NULL;
1656 struct pqi_scsi_dev *device;
1657 struct pqi_scsi_dev *next;
1658 unsigned int num_new_devices;
1659 unsigned int num_valid_devices;
1660 bool is_physical_device;
1661 u8 *scsi3addr;
1662 static char *out_of_memory_msg =
1663 "out of memory, device discovery stopped";
1664
1665 INIT_LIST_HEAD(&new_device_list_head);
1666
1667 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1668 if (rc)
1669 goto out;
1670
1671 if (physdev_list)
1672 num_physicals =
1673 get_unaligned_be32(&physdev_list->header.list_length)
1674 / sizeof(physdev_list->lun_entries[0]);
1675 else
1676 num_physicals = 0;
1677
1678 if (logdev_list)
1679 num_logicals =
1680 get_unaligned_be32(&logdev_list->header.list_length)
1681 / sizeof(logdev_list->lun_entries[0]);
1682 else
1683 num_logicals = 0;
1684
1685 if (num_physicals) {
1686 /*
1687 * We need this buffer for calls to pqi_get_physical_disk_info()
1688 * below. We allocate it here instead of inside
1689 * pqi_get_physical_disk_info() because it's a fairly large
1690 * buffer.
1691 */
1692 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1693 if (!id_phys) {
1694 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1695 out_of_memory_msg);
1696 rc = -ENOMEM;
1697 goto out;
1698 }
1699 }
1700
1701 num_new_devices = num_physicals + num_logicals;
1702
1703 new_device_list = kmalloc(sizeof(*new_device_list) *
1704 num_new_devices, GFP_KERNEL);
1705 if (!new_device_list) {
1706 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1707 rc = -ENOMEM;
1708 goto out;
1709 }
1710
1711 for (i = 0; i < num_new_devices; i++) {
1712 device = kzalloc(sizeof(*device), GFP_KERNEL);
1713 if (!device) {
1714 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1715 out_of_memory_msg);
1716 rc = -ENOMEM;
1717 goto out;
1718 }
1719 list_add_tail(&device->new_device_list_entry,
1720 &new_device_list_head);
1721 }
1722
1723 device = NULL;
1724 num_valid_devices = 0;
1725
1726 for (i = 0; i < num_new_devices; i++) {
1727
1728 if (i < num_physicals) {
1729 is_physical_device = true;
1730 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1731 log_lun_ext_entry = NULL;
1732 scsi3addr = phys_lun_ext_entry->lunid;
1733 } else {
1734 is_physical_device = false;
1735 phys_lun_ext_entry = NULL;
1736 log_lun_ext_entry =
1737 &logdev_list->lun_entries[i - num_physicals];
1738 scsi3addr = log_lun_ext_entry->lunid;
1739 }
1740
1741 if (is_physical_device &&
1742 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1743 continue;
1744
1745 if (device)
1746 device = list_next_entry(device, new_device_list_entry);
1747 else
1748 device = list_first_entry(&new_device_list_head,
1749 struct pqi_scsi_dev, new_device_list_entry);
1750
1751 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1752 device->is_physical_device = is_physical_device;
1753 device->raid_level = SA_RAID_UNKNOWN;
1754
1755 /* Gather information about the device. */
1756 rc = pqi_get_device_info(ctrl_info, device);
1757 if (rc == -ENOMEM) {
1758 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1759 out_of_memory_msg);
1760 goto out;
1761 }
1762 if (rc) {
1763 dev_warn(&ctrl_info->pci_dev->dev,
1764 "obtaining device info failed, skipping device %016llx\n",
1765 get_unaligned_be64(device->scsi3addr));
1766 rc = 0;
1767 continue;
1768 }
1769
1770 if (!pqi_is_supported_device(device))
1771 continue;
1772
1773 pqi_assign_bus_target_lun(device);
1774
1775 device->expose_device = pqi_expose_device(device);
1776
1777 if (device->is_physical_device) {
1778 device->wwid = phys_lun_ext_entry->wwid;
1779 if ((phys_lun_ext_entry->device_flags &
1780 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1781 phys_lun_ext_entry->aio_handle)
1782 device->aio_enabled = true;
1783 } else {
1784 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1785 sizeof(device->volume_id));
1786 }
1787
1788 switch (device->devtype) {
1789 case TYPE_DISK:
1790 case TYPE_ZBC:
1791 case TYPE_ENCLOSURE:
1792 if (device->is_physical_device) {
1793 device->sas_address =
1794 get_unaligned_be64(&device->wwid);
1795 if (device->devtype == TYPE_DISK ||
1796 device->devtype == TYPE_ZBC) {
1797 device->aio_handle =
1798 phys_lun_ext_entry->aio_handle;
1799 pqi_get_physical_disk_info(ctrl_info,
1800 device, id_phys);
1801 }
1802 }
1803 break;
1804 }
1805
1806 new_device_list[num_valid_devices++] = device;
1807 }
1808
1809 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1810
1811 out:
1812 list_for_each_entry_safe(device, next, &new_device_list_head,
1813 new_device_list_entry) {
1814 if (device->keep_device)
1815 continue;
1816 list_del(&device->new_device_list_entry);
1817 pqi_free_device(device);
1818 }
1819
1820 kfree(new_device_list);
1821 kfree(physdev_list);
1822 kfree(logdev_list);
1823 kfree(id_phys);
1824
1825 return rc;
1826 }
1827
1828 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1829 {
1830 unsigned long flags;
1831 struct pqi_scsi_dev *device;
1832 struct pqi_scsi_dev *next;
1833
1834 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1835
1836 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1837 scsi_device_list_entry) {
1838 if (device->sdev)
1839 pqi_remove_device(ctrl_info, device);
1840 list_del(&device->scsi_device_list_entry);
1841 pqi_free_device(device);
1842 }
1843
1844 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1845 }
1846
1847 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1848 {
1849 int rc;
1850
1851 if (pqi_ctrl_offline(ctrl_info))
1852 return -ENXIO;
1853
1854 mutex_lock(&ctrl_info->scan_mutex);
1855
1856 rc = pqi_update_scsi_devices(ctrl_info);
1857 if (rc)
1858 pqi_schedule_rescan_worker(ctrl_info);
1859
1860 mutex_unlock(&ctrl_info->scan_mutex);
1861
1862 return rc;
1863 }
1864
1865 static void pqi_scan_start(struct Scsi_Host *shost)
1866 {
1867 pqi_scan_scsi_devices(shost_to_hba(shost));
1868 }
1869
1870 /* Returns TRUE if scan is finished. */
1871
1872 static int pqi_scan_finished(struct Scsi_Host *shost,
1873 unsigned long elapsed_time)
1874 {
1875 struct pqi_ctrl_info *ctrl_info;
1876
1877 ctrl_info = shost_priv(shost);
1878
1879 return !mutex_is_locked(&ctrl_info->scan_mutex);
1880 }
1881
1882 static inline void pqi_set_encryption_info(
1883 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1884 u64 first_block)
1885 {
1886 u32 volume_blk_size;
1887
1888 /*
1889 * Set the encryption tweak values based on logical block address.
1890 * If the block size is 512, the tweak value is equal to the LBA.
1891 * For other block sizes, tweak value is (LBA * block size) / 512.
1892 */
1893 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1894 if (volume_blk_size != 512)
1895 first_block = (first_block * volume_blk_size) / 512;
1896
1897 encryption_info->data_encryption_key_index =
1898 get_unaligned_le16(&raid_map->data_encryption_key_index);
1899 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1900 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1901 }
1902
1903 /*
1904 * Attempt to perform offload RAID mapping for a logical volume I/O.
1905 */
1906
1907 #define PQI_RAID_BYPASS_INELIGIBLE 1
1908
1909 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1910 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1911 struct pqi_queue_group *queue_group)
1912 {
1913 struct raid_map *raid_map;
1914 bool is_write = false;
1915 u32 map_index;
1916 u64 first_block;
1917 u64 last_block;
1918 u32 block_cnt;
1919 u32 blocks_per_row;
1920 u64 first_row;
1921 u64 last_row;
1922 u32 first_row_offset;
1923 u32 last_row_offset;
1924 u32 first_column;
1925 u32 last_column;
1926 u64 r0_first_row;
1927 u64 r0_last_row;
1928 u32 r5or6_blocks_per_row;
1929 u64 r5or6_first_row;
1930 u64 r5or6_last_row;
1931 u32 r5or6_first_row_offset;
1932 u32 r5or6_last_row_offset;
1933 u32 r5or6_first_column;
1934 u32 r5or6_last_column;
1935 u16 data_disks_per_row;
1936 u32 total_disks_per_row;
1937 u16 layout_map_count;
1938 u32 stripesize;
1939 u16 strip_size;
1940 u32 first_group;
1941 u32 last_group;
1942 u32 current_group;
1943 u32 map_row;
1944 u32 aio_handle;
1945 u64 disk_block;
1946 u32 disk_block_cnt;
1947 u8 cdb[16];
1948 u8 cdb_length;
1949 int offload_to_mirror;
1950 struct pqi_encryption_info *encryption_info_ptr;
1951 struct pqi_encryption_info encryption_info;
1952 #if BITS_PER_LONG == 32
1953 u64 tmpdiv;
1954 #endif
1955
1956 /* Check for valid opcode, get LBA and block count. */
1957 switch (scmd->cmnd[0]) {
1958 case WRITE_6:
1959 is_write = true;
1960 /* fall through */
1961 case READ_6:
1962 first_block = (u64)get_unaligned_be16(&scmd->cmnd[2]);
1963 block_cnt = (u32)scmd->cmnd[4];
1964 if (block_cnt == 0)
1965 block_cnt = 256;
1966 break;
1967 case WRITE_10:
1968 is_write = true;
1969 /* fall through */
1970 case READ_10:
1971 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1972 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1973 break;
1974 case WRITE_12:
1975 is_write = true;
1976 /* fall through */
1977 case READ_12:
1978 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1979 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1980 break;
1981 case WRITE_16:
1982 is_write = true;
1983 /* fall through */
1984 case READ_16:
1985 first_block = get_unaligned_be64(&scmd->cmnd[2]);
1986 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1987 break;
1988 default:
1989 /* Process via normal I/O path. */
1990 return PQI_RAID_BYPASS_INELIGIBLE;
1991 }
1992
1993 /* Check for write to non-RAID-0. */
1994 if (is_write && device->raid_level != SA_RAID_0)
1995 return PQI_RAID_BYPASS_INELIGIBLE;
1996
1997 if (unlikely(block_cnt == 0))
1998 return PQI_RAID_BYPASS_INELIGIBLE;
1999
2000 last_block = first_block + block_cnt - 1;
2001 raid_map = device->raid_map;
2002
2003 /* Check for invalid block or wraparound. */
2004 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2005 last_block < first_block)
2006 return PQI_RAID_BYPASS_INELIGIBLE;
2007
2008 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2009 strip_size = get_unaligned_le16(&raid_map->strip_size);
2010 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2011
2012 /* Calculate stripe information for the request. */
2013 blocks_per_row = data_disks_per_row * strip_size;
2014 #if BITS_PER_LONG == 32
2015 tmpdiv = first_block;
2016 do_div(tmpdiv, blocks_per_row);
2017 first_row = tmpdiv;
2018 tmpdiv = last_block;
2019 do_div(tmpdiv, blocks_per_row);
2020 last_row = tmpdiv;
2021 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2022 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2023 tmpdiv = first_row_offset;
2024 do_div(tmpdiv, strip_size);
2025 first_column = tmpdiv;
2026 tmpdiv = last_row_offset;
2027 do_div(tmpdiv, strip_size);
2028 last_column = tmpdiv;
2029 #else
2030 first_row = first_block / blocks_per_row;
2031 last_row = last_block / blocks_per_row;
2032 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2033 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2034 first_column = first_row_offset / strip_size;
2035 last_column = last_row_offset / strip_size;
2036 #endif
2037
2038 /* If this isn't a single row/column then give to the controller. */
2039 if (first_row != last_row || first_column != last_column)
2040 return PQI_RAID_BYPASS_INELIGIBLE;
2041
2042 /* Proceeding with driver mapping. */
2043 total_disks_per_row = data_disks_per_row +
2044 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2045 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2046 get_unaligned_le16(&raid_map->row_cnt);
2047 map_index = (map_row * total_disks_per_row) + first_column;
2048
2049 /* RAID 1 */
2050 if (device->raid_level == SA_RAID_1) {
2051 if (device->offload_to_mirror)
2052 map_index += data_disks_per_row;
2053 device->offload_to_mirror = !device->offload_to_mirror;
2054 } else if (device->raid_level == SA_RAID_ADM) {
2055 /* RAID ADM */
2056 /*
2057 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2058 * divisible by 3.
2059 */
2060 offload_to_mirror = device->offload_to_mirror;
2061 if (offload_to_mirror == 0) {
2062 /* use physical disk in the first mirrored group. */
2063 map_index %= data_disks_per_row;
2064 } else {
2065 do {
2066 /*
2067 * Determine mirror group that map_index
2068 * indicates.
2069 */
2070 current_group = map_index / data_disks_per_row;
2071
2072 if (offload_to_mirror != current_group) {
2073 if (current_group <
2074 layout_map_count - 1) {
2075 /*
2076 * Select raid index from
2077 * next group.
2078 */
2079 map_index += data_disks_per_row;
2080 current_group++;
2081 } else {
2082 /*
2083 * Select raid index from first
2084 * group.
2085 */
2086 map_index %= data_disks_per_row;
2087 current_group = 0;
2088 }
2089 }
2090 } while (offload_to_mirror != current_group);
2091 }
2092
2093 /* Set mirror group to use next time. */
2094 offload_to_mirror =
2095 (offload_to_mirror >= layout_map_count - 1) ?
2096 0 : offload_to_mirror + 1;
2097 WARN_ON(offload_to_mirror >= layout_map_count);
2098 device->offload_to_mirror = offload_to_mirror;
2099 /*
2100 * Avoid direct use of device->offload_to_mirror within this
2101 * function since multiple threads might simultaneously
2102 * increment it beyond the range of device->layout_map_count -1.
2103 */
2104 } else if ((device->raid_level == SA_RAID_5 ||
2105 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2106 /* RAID 50/60 */
2107 /* Verify first and last block are in same RAID group */
2108 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2109 stripesize = r5or6_blocks_per_row * layout_map_count;
2110 #if BITS_PER_LONG == 32
2111 tmpdiv = first_block;
2112 first_group = do_div(tmpdiv, stripesize);
2113 tmpdiv = first_group;
2114 do_div(tmpdiv, r5or6_blocks_per_row);
2115 first_group = tmpdiv;
2116 tmpdiv = last_block;
2117 last_group = do_div(tmpdiv, stripesize);
2118 tmpdiv = last_group;
2119 do_div(tmpdiv, r5or6_blocks_per_row);
2120 last_group = tmpdiv;
2121 #else
2122 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2123 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2124 #endif
2125 if (first_group != last_group)
2126 return PQI_RAID_BYPASS_INELIGIBLE;
2127
2128 /* Verify request is in a single row of RAID 5/6 */
2129 #if BITS_PER_LONG == 32
2130 tmpdiv = first_block;
2131 do_div(tmpdiv, stripesize);
2132 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2133 tmpdiv = last_block;
2134 do_div(tmpdiv, stripesize);
2135 r5or6_last_row = r0_last_row = tmpdiv;
2136 #else
2137 first_row = r5or6_first_row = r0_first_row =
2138 first_block / stripesize;
2139 r5or6_last_row = r0_last_row = last_block / stripesize;
2140 #endif
2141 if (r5or6_first_row != r5or6_last_row)
2142 return PQI_RAID_BYPASS_INELIGIBLE;
2143
2144 /* Verify request is in a single column */
2145 #if BITS_PER_LONG == 32
2146 tmpdiv = first_block;
2147 first_row_offset = do_div(tmpdiv, stripesize);
2148 tmpdiv = first_row_offset;
2149 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2150 r5or6_first_row_offset = first_row_offset;
2151 tmpdiv = last_block;
2152 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2153 tmpdiv = r5or6_last_row_offset;
2154 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2155 tmpdiv = r5or6_first_row_offset;
2156 do_div(tmpdiv, strip_size);
2157 first_column = r5or6_first_column = tmpdiv;
2158 tmpdiv = r5or6_last_row_offset;
2159 do_div(tmpdiv, strip_size);
2160 r5or6_last_column = tmpdiv;
2161 #else
2162 first_row_offset = r5or6_first_row_offset =
2163 (u32)((first_block % stripesize) %
2164 r5or6_blocks_per_row);
2165
2166 r5or6_last_row_offset =
2167 (u32)((last_block % stripesize) %
2168 r5or6_blocks_per_row);
2169
2170 first_column = r5or6_first_row_offset / strip_size;
2171 r5or6_first_column = first_column;
2172 r5or6_last_column = r5or6_last_row_offset / strip_size;
2173 #endif
2174 if (r5or6_first_column != r5or6_last_column)
2175 return PQI_RAID_BYPASS_INELIGIBLE;
2176
2177 /* Request is eligible */
2178 map_row =
2179 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2180 get_unaligned_le16(&raid_map->row_cnt);
2181
2182 map_index = (first_group *
2183 (get_unaligned_le16(&raid_map->row_cnt) *
2184 total_disks_per_row)) +
2185 (map_row * total_disks_per_row) + first_column;
2186 }
2187
2188 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2189 return PQI_RAID_BYPASS_INELIGIBLE;
2190
2191 aio_handle = raid_map->disk_data[map_index].aio_handle;
2192 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2193 first_row * strip_size +
2194 (first_row_offset - first_column * strip_size);
2195 disk_block_cnt = block_cnt;
2196
2197 /* Handle differing logical/physical block sizes. */
2198 if (raid_map->phys_blk_shift) {
2199 disk_block <<= raid_map->phys_blk_shift;
2200 disk_block_cnt <<= raid_map->phys_blk_shift;
2201 }
2202
2203 if (unlikely(disk_block_cnt > 0xffff))
2204 return PQI_RAID_BYPASS_INELIGIBLE;
2205
2206 /* Build the new CDB for the physical disk I/O. */
2207 if (disk_block > 0xffffffff) {
2208 cdb[0] = is_write ? WRITE_16 : READ_16;
2209 cdb[1] = 0;
2210 put_unaligned_be64(disk_block, &cdb[2]);
2211 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2212 cdb[14] = 0;
2213 cdb[15] = 0;
2214 cdb_length = 16;
2215 } else {
2216 cdb[0] = is_write ? WRITE_10 : READ_10;
2217 cdb[1] = 0;
2218 put_unaligned_be32((u32)disk_block, &cdb[2]);
2219 cdb[6] = 0;
2220 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2221 cdb[9] = 0;
2222 cdb_length = 10;
2223 }
2224
2225 if (get_unaligned_le16(&raid_map->flags) &
2226 RAID_MAP_ENCRYPTION_ENABLED) {
2227 pqi_set_encryption_info(&encryption_info, raid_map,
2228 first_block);
2229 encryption_info_ptr = &encryption_info;
2230 } else {
2231 encryption_info_ptr = NULL;
2232 }
2233
2234 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2235 cdb, cdb_length, queue_group, encryption_info_ptr);
2236 }
2237
2238 #define PQI_STATUS_IDLE 0x0
2239
2240 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2241 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2242
2243 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2244 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2245 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2246 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2247 #define PQI_DEVICE_STATE_ERROR 0x4
2248
2249 #define PQI_MODE_READY_TIMEOUT_SECS 30
2250 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2251
2252 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2253 {
2254 struct pqi_device_registers __iomem *pqi_registers;
2255 unsigned long timeout;
2256 u64 signature;
2257 u8 status;
2258
2259 pqi_registers = ctrl_info->pqi_registers;
2260 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2261
2262 while (1) {
2263 signature = readq(&pqi_registers->signature);
2264 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2265 sizeof(signature)) == 0)
2266 break;
2267 if (time_after(jiffies, timeout)) {
2268 dev_err(&ctrl_info->pci_dev->dev,
2269 "timed out waiting for PQI signature\n");
2270 return -ETIMEDOUT;
2271 }
2272 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2273 }
2274
2275 while (1) {
2276 status = readb(&pqi_registers->function_and_status_code);
2277 if (status == PQI_STATUS_IDLE)
2278 break;
2279 if (time_after(jiffies, timeout)) {
2280 dev_err(&ctrl_info->pci_dev->dev,
2281 "timed out waiting for PQI IDLE\n");
2282 return -ETIMEDOUT;
2283 }
2284 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2285 }
2286
2287 while (1) {
2288 if (readl(&pqi_registers->device_status) ==
2289 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2290 break;
2291 if (time_after(jiffies, timeout)) {
2292 dev_err(&ctrl_info->pci_dev->dev,
2293 "timed out waiting for PQI all registers ready\n");
2294 return -ETIMEDOUT;
2295 }
2296 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2297 }
2298
2299 return 0;
2300 }
2301
2302 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2303 {
2304 struct pqi_scsi_dev *device;
2305
2306 device = io_request->scmd->device->hostdata;
2307 device->offload_enabled = false;
2308 }
2309
2310 static inline void pqi_take_device_offline(struct scsi_device *sdev)
2311 {
2312 struct pqi_ctrl_info *ctrl_info;
2313 struct pqi_scsi_dev *device;
2314
2315 if (scsi_device_online(sdev)) {
2316 scsi_device_set_state(sdev, SDEV_OFFLINE);
2317 ctrl_info = shost_to_hba(sdev->host);
2318 schedule_delayed_work(&ctrl_info->rescan_work, 0);
2319 device = sdev->hostdata;
2320 dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2321 ctrl_info->scsi_host->host_no, device->bus,
2322 device->target, device->lun);
2323 }
2324 }
2325
2326 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2327 {
2328 u8 scsi_status;
2329 u8 host_byte;
2330 struct scsi_cmnd *scmd;
2331 struct pqi_raid_error_info *error_info;
2332 size_t sense_data_length;
2333 int residual_count;
2334 int xfer_count;
2335 struct scsi_sense_hdr sshdr;
2336
2337 scmd = io_request->scmd;
2338 if (!scmd)
2339 return;
2340
2341 error_info = io_request->error_info;
2342 scsi_status = error_info->status;
2343 host_byte = DID_OK;
2344
2345 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2346 xfer_count =
2347 get_unaligned_le32(&error_info->data_out_transferred);
2348 residual_count = scsi_bufflen(scmd) - xfer_count;
2349 scsi_set_resid(scmd, residual_count);
2350 if (xfer_count < scmd->underflow)
2351 host_byte = DID_SOFT_ERROR;
2352 }
2353
2354 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2355 if (sense_data_length == 0)
2356 sense_data_length =
2357 get_unaligned_le16(&error_info->response_data_length);
2358 if (sense_data_length) {
2359 if (sense_data_length > sizeof(error_info->data))
2360 sense_data_length = sizeof(error_info->data);
2361
2362 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2363 scsi_normalize_sense(error_info->data,
2364 sense_data_length, &sshdr) &&
2365 sshdr.sense_key == HARDWARE_ERROR &&
2366 sshdr.asc == 0x3e &&
2367 sshdr.ascq == 0x1) {
2368 pqi_take_device_offline(scmd->device);
2369 host_byte = DID_NO_CONNECT;
2370 }
2371
2372 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2373 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2374 memcpy(scmd->sense_buffer, error_info->data,
2375 sense_data_length);
2376 }
2377
2378 scmd->result = scsi_status;
2379 set_host_byte(scmd, host_byte);
2380 }
2381
2382 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2383 {
2384 u8 scsi_status;
2385 u8 host_byte;
2386 struct scsi_cmnd *scmd;
2387 struct pqi_aio_error_info *error_info;
2388 size_t sense_data_length;
2389 int residual_count;
2390 int xfer_count;
2391 bool device_offline;
2392
2393 scmd = io_request->scmd;
2394 error_info = io_request->error_info;
2395 host_byte = DID_OK;
2396 sense_data_length = 0;
2397 device_offline = false;
2398
2399 switch (error_info->service_response) {
2400 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2401 scsi_status = error_info->status;
2402 break;
2403 case PQI_AIO_SERV_RESPONSE_FAILURE:
2404 switch (error_info->status) {
2405 case PQI_AIO_STATUS_IO_ABORTED:
2406 scsi_status = SAM_STAT_TASK_ABORTED;
2407 break;
2408 case PQI_AIO_STATUS_UNDERRUN:
2409 scsi_status = SAM_STAT_GOOD;
2410 residual_count = get_unaligned_le32(
2411 &error_info->residual_count);
2412 scsi_set_resid(scmd, residual_count);
2413 xfer_count = scsi_bufflen(scmd) - residual_count;
2414 if (xfer_count < scmd->underflow)
2415 host_byte = DID_SOFT_ERROR;
2416 break;
2417 case PQI_AIO_STATUS_OVERRUN:
2418 scsi_status = SAM_STAT_GOOD;
2419 break;
2420 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2421 pqi_aio_path_disabled(io_request);
2422 scsi_status = SAM_STAT_GOOD;
2423 io_request->status = -EAGAIN;
2424 break;
2425 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2426 case PQI_AIO_STATUS_INVALID_DEVICE:
2427 device_offline = true;
2428 pqi_take_device_offline(scmd->device);
2429 host_byte = DID_NO_CONNECT;
2430 scsi_status = SAM_STAT_CHECK_CONDITION;
2431 break;
2432 case PQI_AIO_STATUS_IO_ERROR:
2433 default:
2434 scsi_status = SAM_STAT_CHECK_CONDITION;
2435 break;
2436 }
2437 break;
2438 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2439 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2440 scsi_status = SAM_STAT_GOOD;
2441 break;
2442 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2443 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2444 default:
2445 scsi_status = SAM_STAT_CHECK_CONDITION;
2446 break;
2447 }
2448
2449 if (error_info->data_present) {
2450 sense_data_length =
2451 get_unaligned_le16(&error_info->data_length);
2452 if (sense_data_length) {
2453 if (sense_data_length > sizeof(error_info->data))
2454 sense_data_length = sizeof(error_info->data);
2455 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2456 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2457 memcpy(scmd->sense_buffer, error_info->data,
2458 sense_data_length);
2459 }
2460 }
2461
2462 if (device_offline && sense_data_length == 0)
2463 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2464 0x3e, 0x1);
2465
2466 scmd->result = scsi_status;
2467 set_host_byte(scmd, host_byte);
2468 }
2469
2470 static void pqi_process_io_error(unsigned int iu_type,
2471 struct pqi_io_request *io_request)
2472 {
2473 switch (iu_type) {
2474 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2475 pqi_process_raid_io_error(io_request);
2476 break;
2477 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2478 pqi_process_aio_io_error(io_request);
2479 break;
2480 }
2481 }
2482
2483 static int pqi_interpret_task_management_response(
2484 struct pqi_task_management_response *response)
2485 {
2486 int rc;
2487
2488 switch (response->response_code) {
2489 case SOP_TMF_COMPLETE:
2490 case SOP_TMF_FUNCTION_SUCCEEDED:
2491 rc = 0;
2492 break;
2493 default:
2494 rc = -EIO;
2495 break;
2496 }
2497
2498 return rc;
2499 }
2500
2501 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2502 struct pqi_queue_group *queue_group)
2503 {
2504 unsigned int num_responses;
2505 pqi_index_t oq_pi;
2506 pqi_index_t oq_ci;
2507 struct pqi_io_request *io_request;
2508 struct pqi_io_response *response;
2509 u16 request_id;
2510
2511 num_responses = 0;
2512 oq_ci = queue_group->oq_ci_copy;
2513
2514 while (1) {
2515 oq_pi = *queue_group->oq_pi;
2516 if (oq_pi == oq_ci)
2517 break;
2518
2519 num_responses++;
2520 response = queue_group->oq_element_array +
2521 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2522
2523 request_id = get_unaligned_le16(&response->request_id);
2524 WARN_ON(request_id >= ctrl_info->max_io_slots);
2525
2526 io_request = &ctrl_info->io_request_pool[request_id];
2527 WARN_ON(atomic_read(&io_request->refcount) == 0);
2528
2529 switch (response->header.iu_type) {
2530 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2531 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2532 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2533 break;
2534 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2535 io_request->status =
2536 pqi_interpret_task_management_response(
2537 (void *)response);
2538 break;
2539 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2540 pqi_aio_path_disabled(io_request);
2541 io_request->status = -EAGAIN;
2542 break;
2543 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2544 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2545 io_request->error_info = ctrl_info->error_buffer +
2546 (get_unaligned_le16(&response->error_index) *
2547 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2548 pqi_process_io_error(response->header.iu_type,
2549 io_request);
2550 break;
2551 default:
2552 dev_err(&ctrl_info->pci_dev->dev,
2553 "unexpected IU type: 0x%x\n",
2554 response->header.iu_type);
2555 WARN_ON(response->header.iu_type);
2556 break;
2557 }
2558
2559 io_request->io_complete_callback(io_request,
2560 io_request->context);
2561
2562 /*
2563 * Note that the I/O request structure CANNOT BE TOUCHED after
2564 * returning from the I/O completion callback!
2565 */
2566
2567 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2568 }
2569
2570 if (num_responses) {
2571 queue_group->oq_ci_copy = oq_ci;
2572 writel(oq_ci, queue_group->oq_ci);
2573 }
2574
2575 return num_responses;
2576 }
2577
2578 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2579 unsigned int ci,
2580 unsigned int elements_in_queue)
2581 {
2582 unsigned int num_elements_used;
2583
2584 if (pi >= ci)
2585 num_elements_used = pi - ci;
2586 else
2587 num_elements_used = elements_in_queue - ci + pi;
2588
2589 return elements_in_queue - num_elements_used - 1;
2590 }
2591
2592 #define PQI_EVENT_ACK_TIMEOUT 30
2593
2594 static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2595 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2596 {
2597 pqi_index_t iq_pi;
2598 pqi_index_t iq_ci;
2599 unsigned long flags;
2600 void *next_element;
2601 unsigned long timeout;
2602 struct pqi_queue_group *queue_group;
2603
2604 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2605 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2606
2607 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2608
2609 while (1) {
2610 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2611
2612 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2613 iq_ci = *queue_group->iq_ci[RAID_PATH];
2614
2615 if (pqi_num_elements_free(iq_pi, iq_ci,
2616 ctrl_info->num_elements_per_iq))
2617 break;
2618
2619 spin_unlock_irqrestore(
2620 &queue_group->submit_lock[RAID_PATH], flags);
2621
2622 if (time_after(jiffies, timeout)) {
2623 dev_err(&ctrl_info->pci_dev->dev,
2624 "sending event acknowledge timed out\n");
2625 return;
2626 }
2627 }
2628
2629 next_element = queue_group->iq_element_array[RAID_PATH] +
2630 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2631
2632 memcpy(next_element, iu, iu_length);
2633
2634 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2635
2636 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2637
2638 /*
2639 * This write notifies the controller that an IU is available to be
2640 * processed.
2641 */
2642 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2643
2644 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2645
2646 /*
2647 * We have to special-case this type of request because the firmware
2648 * does not generate an interrupt when this type of request completes.
2649 * Therefore, we have to poll until we see that the firmware has
2650 * consumed the request before we move on.
2651 */
2652
2653 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2654
2655 while (1) {
2656 if (*queue_group->iq_ci[RAID_PATH] == iq_pi)
2657 break;
2658 if (time_after(jiffies, timeout)) {
2659 dev_err(&ctrl_info->pci_dev->dev,
2660 "completing event acknowledge timed out\n");
2661 break;
2662 }
2663 usleep_range(1000, 2000);
2664 }
2665 }
2666
2667 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2668 struct pqi_event *event)
2669 {
2670 struct pqi_event_acknowledge_request request;
2671
2672 memset(&request, 0, sizeof(request));
2673
2674 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2675 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2676 &request.header.iu_length);
2677 request.event_type = event->event_type;
2678 request.event_id = event->event_id;
2679 request.additional_event_id = event->additional_event_id;
2680
2681 pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2682 }
2683
2684 static void pqi_event_worker(struct work_struct *work)
2685 {
2686 unsigned int i;
2687 struct pqi_ctrl_info *ctrl_info;
2688 struct pqi_event *pending_event;
2689 bool got_non_heartbeat_event = false;
2690
2691 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2692
2693 pending_event = ctrl_info->pending_events;
2694 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2695 if (pending_event->pending) {
2696 pending_event->pending = false;
2697 pqi_acknowledge_event(ctrl_info, pending_event);
2698 if (i != PQI_EVENT_HEARTBEAT)
2699 got_non_heartbeat_event = true;
2700 }
2701 pending_event++;
2702 }
2703
2704 if (got_non_heartbeat_event)
2705 pqi_schedule_rescan_worker(ctrl_info);
2706 }
2707
2708 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2709 {
2710 unsigned int i;
2711 unsigned int path;
2712 struct pqi_queue_group *queue_group;
2713 unsigned long flags;
2714 struct pqi_io_request *io_request;
2715 struct pqi_io_request *next;
2716 struct scsi_cmnd *scmd;
2717
2718 ctrl_info->controller_online = false;
2719 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2720
2721 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2722 queue_group = &ctrl_info->queue_groups[i];
2723
2724 for (path = 0; path < 2; path++) {
2725 spin_lock_irqsave(
2726 &queue_group->submit_lock[path], flags);
2727
2728 list_for_each_entry_safe(io_request, next,
2729 &queue_group->request_list[path],
2730 request_list_entry) {
2731
2732 scmd = io_request->scmd;
2733 if (scmd) {
2734 set_host_byte(scmd, DID_NO_CONNECT);
2735 pqi_scsi_done(scmd);
2736 }
2737
2738 list_del(&io_request->request_list_entry);
2739 }
2740
2741 spin_unlock_irqrestore(
2742 &queue_group->submit_lock[path], flags);
2743 }
2744 }
2745 }
2746
2747 #define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2748 #define PQI_MAX_HEARTBEAT_REQUESTS 5
2749
2750 static void pqi_heartbeat_timer_handler(unsigned long data)
2751 {
2752 int num_interrupts;
2753 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2754
2755 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2756
2757 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2758 ctrl_info->num_heartbeats_requested++;
2759 if (ctrl_info->num_heartbeats_requested >
2760 PQI_MAX_HEARTBEAT_REQUESTS) {
2761 pqi_take_ctrl_offline(ctrl_info);
2762 return;
2763 }
2764 ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2765 schedule_work(&ctrl_info->event_work);
2766 } else {
2767 ctrl_info->num_heartbeats_requested = 0;
2768 }
2769
2770 ctrl_info->previous_num_interrupts = num_interrupts;
2771 mod_timer(&ctrl_info->heartbeat_timer,
2772 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2773 }
2774
2775 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2776 {
2777 ctrl_info->previous_num_interrupts =
2778 atomic_read(&ctrl_info->num_interrupts);
2779
2780 init_timer(&ctrl_info->heartbeat_timer);
2781 ctrl_info->heartbeat_timer.expires =
2782 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2783 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2784 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2785 add_timer(&ctrl_info->heartbeat_timer);
2786 ctrl_info->heartbeat_timer_started = true;
2787 }
2788
2789 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2790 {
2791 if (ctrl_info->heartbeat_timer_started)
2792 del_timer_sync(&ctrl_info->heartbeat_timer);
2793 }
2794
2795 static int pqi_event_type_to_event_index(unsigned int event_type)
2796 {
2797 int index;
2798
2799 switch (event_type) {
2800 case PQI_EVENT_TYPE_HEARTBEAT:
2801 index = PQI_EVENT_HEARTBEAT;
2802 break;
2803 case PQI_EVENT_TYPE_HOTPLUG:
2804 index = PQI_EVENT_HOTPLUG;
2805 break;
2806 case PQI_EVENT_TYPE_HARDWARE:
2807 index = PQI_EVENT_HARDWARE;
2808 break;
2809 case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2810 index = PQI_EVENT_PHYSICAL_DEVICE;
2811 break;
2812 case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2813 index = PQI_EVENT_LOGICAL_DEVICE;
2814 break;
2815 case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2816 index = PQI_EVENT_AIO_STATE_CHANGE;
2817 break;
2818 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2819 index = PQI_EVENT_AIO_CONFIG_CHANGE;
2820 break;
2821 default:
2822 index = -1;
2823 break;
2824 }
2825
2826 return index;
2827 }
2828
2829 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2830 {
2831 unsigned int num_events;
2832 pqi_index_t oq_pi;
2833 pqi_index_t oq_ci;
2834 struct pqi_event_queue *event_queue;
2835 struct pqi_event_response *response;
2836 struct pqi_event *pending_event;
2837 bool need_delayed_work;
2838 int event_index;
2839
2840 event_queue = &ctrl_info->event_queue;
2841 num_events = 0;
2842 need_delayed_work = false;
2843 oq_ci = event_queue->oq_ci_copy;
2844
2845 while (1) {
2846 oq_pi = *event_queue->oq_pi;
2847 if (oq_pi == oq_ci)
2848 break;
2849
2850 num_events++;
2851 response = event_queue->oq_element_array +
2852 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2853
2854 event_index =
2855 pqi_event_type_to_event_index(response->event_type);
2856
2857 if (event_index >= 0) {
2858 if (response->request_acknowlege) {
2859 pending_event =
2860 &ctrl_info->pending_events[event_index];
2861 pending_event->event_type =
2862 response->event_type;
2863 pending_event->event_id = response->event_id;
2864 pending_event->additional_event_id =
2865 response->additional_event_id;
2866 if (event_index != PQI_EVENT_HEARTBEAT) {
2867 pending_event->pending = true;
2868 need_delayed_work = true;
2869 }
2870 }
2871 }
2872
2873 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2874 }
2875
2876 if (num_events) {
2877 event_queue->oq_ci_copy = oq_ci;
2878 writel(oq_ci, event_queue->oq_ci);
2879
2880 if (need_delayed_work)
2881 schedule_work(&ctrl_info->event_work);
2882 }
2883
2884 return num_events;
2885 }
2886
2887 static irqreturn_t pqi_irq_handler(int irq, void *data)
2888 {
2889 struct pqi_ctrl_info *ctrl_info;
2890 struct pqi_queue_group *queue_group;
2891 unsigned int num_responses_handled;
2892
2893 queue_group = data;
2894 ctrl_info = queue_group->ctrl_info;
2895
2896 if (!ctrl_info || !queue_group->oq_ci)
2897 return IRQ_NONE;
2898
2899 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2900
2901 if (irq == ctrl_info->event_irq)
2902 num_responses_handled += pqi_process_event_intr(ctrl_info);
2903
2904 if (num_responses_handled)
2905 atomic_inc(&ctrl_info->num_interrupts);
2906
2907 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2908 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2909
2910 return IRQ_HANDLED;
2911 }
2912
2913 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2914 {
2915 int i;
2916 int rc;
2917
2918 ctrl_info->event_irq = ctrl_info->msix_vectors[0];
2919
2920 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2921 rc = request_irq(ctrl_info->msix_vectors[i],
2922 pqi_irq_handler, 0,
2923 DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
2924 if (rc) {
2925 dev_err(&ctrl_info->pci_dev->dev,
2926 "irq %u init failed with error %d\n",
2927 ctrl_info->msix_vectors[i], rc);
2928 return rc;
2929 }
2930 ctrl_info->num_msix_vectors_initialized++;
2931 }
2932
2933 return 0;
2934 }
2935
2936 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2937 {
2938 int i;
2939
2940 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2941 free_irq(ctrl_info->msix_vectors[i],
2942 ctrl_info->intr_data[i]);
2943 }
2944
2945 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2946 {
2947 unsigned int i;
2948 int max_vectors;
2949 int num_vectors_enabled;
2950 struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
2951
2952 max_vectors = ctrl_info->num_queue_groups;
2953
2954 for (i = 0; i < max_vectors; i++)
2955 msix_entries[i].entry = i;
2956
2957 num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
2958 msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
2959
2960 if (num_vectors_enabled < 0) {
2961 dev_err(&ctrl_info->pci_dev->dev,
2962 "MSI-X init failed with error %d\n",
2963 num_vectors_enabled);
2964 return num_vectors_enabled;
2965 }
2966
2967 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
2968 for (i = 0; i < num_vectors_enabled; i++) {
2969 ctrl_info->msix_vectors[i] = msix_entries[i].vector;
2970 ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
2971 }
2972
2973 return 0;
2974 }
2975
2976 static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2977 {
2978 int i;
2979 int rc;
2980 int cpu;
2981
2982 cpu = cpumask_first(cpu_online_mask);
2983 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
2984 rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
2985 get_cpu_mask(cpu));
2986 if (rc)
2987 dev_err(&ctrl_info->pci_dev->dev,
2988 "error %d setting affinity hint for irq vector %u\n",
2989 rc, ctrl_info->msix_vectors[i]);
2990 cpu = cpumask_next(cpu, cpu_online_mask);
2991 }
2992 }
2993
2994 static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2995 {
2996 int i;
2997
2998 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2999 irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
3000 }
3001
3002 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3003 {
3004 unsigned int i;
3005 size_t alloc_length;
3006 size_t element_array_length_per_iq;
3007 size_t element_array_length_per_oq;
3008 void *element_array;
3009 void *next_queue_index;
3010 void *aligned_pointer;
3011 unsigned int num_inbound_queues;
3012 unsigned int num_outbound_queues;
3013 unsigned int num_queue_indexes;
3014 struct pqi_queue_group *queue_group;
3015
3016 element_array_length_per_iq =
3017 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3018 ctrl_info->num_elements_per_iq;
3019 element_array_length_per_oq =
3020 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3021 ctrl_info->num_elements_per_oq;
3022 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3023 num_outbound_queues = ctrl_info->num_queue_groups;
3024 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3025
3026 aligned_pointer = NULL;
3027
3028 for (i = 0; i < num_inbound_queues; i++) {
3029 aligned_pointer = PTR_ALIGN(aligned_pointer,
3030 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3031 aligned_pointer += element_array_length_per_iq;
3032 }
3033
3034 for (i = 0; i < num_outbound_queues; i++) {
3035 aligned_pointer = PTR_ALIGN(aligned_pointer,
3036 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3037 aligned_pointer += element_array_length_per_oq;
3038 }
3039
3040 aligned_pointer = PTR_ALIGN(aligned_pointer,
3041 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3042 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3043 PQI_EVENT_OQ_ELEMENT_LENGTH;
3044
3045 for (i = 0; i < num_queue_indexes; i++) {
3046 aligned_pointer = PTR_ALIGN(aligned_pointer,
3047 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3048 aligned_pointer += sizeof(pqi_index_t);
3049 }
3050
3051 alloc_length = (size_t)aligned_pointer +
3052 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3053
3054 ctrl_info->queue_memory_base =
3055 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3056 alloc_length,
3057 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3058
3059 if (!ctrl_info->queue_memory_base) {
3060 dev_err(&ctrl_info->pci_dev->dev,
3061 "failed to allocate memory for PQI admin queues\n");
3062 return -ENOMEM;
3063 }
3064
3065 ctrl_info->queue_memory_length = alloc_length;
3066
3067 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3068 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3069
3070 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3071 queue_group = &ctrl_info->queue_groups[i];
3072 queue_group->iq_element_array[RAID_PATH] = element_array;
3073 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3074 ctrl_info->queue_memory_base_dma_handle +
3075 (element_array - ctrl_info->queue_memory_base);
3076 element_array += element_array_length_per_iq;
3077 element_array = PTR_ALIGN(element_array,
3078 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3079 queue_group->iq_element_array[AIO_PATH] = element_array;
3080 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3081 ctrl_info->queue_memory_base_dma_handle +
3082 (element_array - ctrl_info->queue_memory_base);
3083 element_array += element_array_length_per_iq;
3084 element_array = PTR_ALIGN(element_array,
3085 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3086 }
3087
3088 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3089 queue_group = &ctrl_info->queue_groups[i];
3090 queue_group->oq_element_array = element_array;
3091 queue_group->oq_element_array_bus_addr =
3092 ctrl_info->queue_memory_base_dma_handle +
3093 (element_array - ctrl_info->queue_memory_base);
3094 element_array += element_array_length_per_oq;
3095 element_array = PTR_ALIGN(element_array,
3096 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3097 }
3098
3099 ctrl_info->event_queue.oq_element_array = element_array;
3100 ctrl_info->event_queue.oq_element_array_bus_addr =
3101 ctrl_info->queue_memory_base_dma_handle +
3102 (element_array - ctrl_info->queue_memory_base);
3103 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3104 PQI_EVENT_OQ_ELEMENT_LENGTH;
3105
3106 next_queue_index = PTR_ALIGN(element_array,
3107 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3108
3109 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3110 queue_group = &ctrl_info->queue_groups[i];
3111 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3112 queue_group->iq_ci_bus_addr[RAID_PATH] =
3113 ctrl_info->queue_memory_base_dma_handle +
3114 (next_queue_index - ctrl_info->queue_memory_base);
3115 next_queue_index += sizeof(pqi_index_t);
3116 next_queue_index = PTR_ALIGN(next_queue_index,
3117 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3118 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3119 queue_group->iq_ci_bus_addr[AIO_PATH] =
3120 ctrl_info->queue_memory_base_dma_handle +
3121 (next_queue_index - ctrl_info->queue_memory_base);
3122 next_queue_index += sizeof(pqi_index_t);
3123 next_queue_index = PTR_ALIGN(next_queue_index,
3124 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3125 queue_group->oq_pi = next_queue_index;
3126 queue_group->oq_pi_bus_addr =
3127 ctrl_info->queue_memory_base_dma_handle +
3128 (next_queue_index - ctrl_info->queue_memory_base);
3129 next_queue_index += sizeof(pqi_index_t);
3130 next_queue_index = PTR_ALIGN(next_queue_index,
3131 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3132 }
3133
3134 ctrl_info->event_queue.oq_pi = next_queue_index;
3135 ctrl_info->event_queue.oq_pi_bus_addr =
3136 ctrl_info->queue_memory_base_dma_handle +
3137 (next_queue_index - ctrl_info->queue_memory_base);
3138
3139 return 0;
3140 }
3141
3142 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3143 {
3144 unsigned int i;
3145 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3146 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3147
3148 /*
3149 * Initialize the backpointers to the controller structure in
3150 * each operational queue group structure.
3151 */
3152 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3153 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3154
3155 /*
3156 * Assign IDs to all operational queues. Note that the IDs
3157 * assigned to operational IQs are independent of the IDs
3158 * assigned to operational OQs.
3159 */
3160 ctrl_info->event_queue.oq_id = next_oq_id++;
3161 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3162 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3163 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3164 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3165 }
3166
3167 /*
3168 * Assign MSI-X table entry indexes to all queues. Note that the
3169 * interrupt for the event queue is shared with the first queue group.
3170 */
3171 ctrl_info->event_queue.int_msg_num = 0;
3172 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3173 ctrl_info->queue_groups[i].int_msg_num = i;
3174
3175 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3176 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3177 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3178 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3179 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3180 }
3181 }
3182
3183 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3184 {
3185 size_t alloc_length;
3186 struct pqi_admin_queues_aligned *admin_queues_aligned;
3187 struct pqi_admin_queues *admin_queues;
3188
3189 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3190 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3191
3192 ctrl_info->admin_queue_memory_base =
3193 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3194 alloc_length,
3195 &ctrl_info->admin_queue_memory_base_dma_handle,
3196 GFP_KERNEL);
3197
3198 if (!ctrl_info->admin_queue_memory_base)
3199 return -ENOMEM;
3200
3201 ctrl_info->admin_queue_memory_length = alloc_length;
3202
3203 admin_queues = &ctrl_info->admin_queues;
3204 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3206 admin_queues->iq_element_array =
3207 &admin_queues_aligned->iq_element_array;
3208 admin_queues->oq_element_array =
3209 &admin_queues_aligned->oq_element_array;
3210 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3211 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3212
3213 admin_queues->iq_element_array_bus_addr =
3214 ctrl_info->admin_queue_memory_base_dma_handle +
3215 (admin_queues->iq_element_array -
3216 ctrl_info->admin_queue_memory_base);
3217 admin_queues->oq_element_array_bus_addr =
3218 ctrl_info->admin_queue_memory_base_dma_handle +
3219 (admin_queues->oq_element_array -
3220 ctrl_info->admin_queue_memory_base);
3221 admin_queues->iq_ci_bus_addr =
3222 ctrl_info->admin_queue_memory_base_dma_handle +
3223 ((void *)admin_queues->iq_ci -
3224 ctrl_info->admin_queue_memory_base);
3225 admin_queues->oq_pi_bus_addr =
3226 ctrl_info->admin_queue_memory_base_dma_handle +
3227 ((void *)admin_queues->oq_pi -
3228 ctrl_info->admin_queue_memory_base);
3229
3230 return 0;
3231 }
3232
3233 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3234 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3235
3236 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3237 {
3238 struct pqi_device_registers __iomem *pqi_registers;
3239 struct pqi_admin_queues *admin_queues;
3240 unsigned long timeout;
3241 u8 status;
3242 u32 reg;
3243
3244 pqi_registers = ctrl_info->pqi_registers;
3245 admin_queues = &ctrl_info->admin_queues;
3246
3247 writeq((u64)admin_queues->iq_element_array_bus_addr,
3248 &pqi_registers->admin_iq_element_array_addr);
3249 writeq((u64)admin_queues->oq_element_array_bus_addr,
3250 &pqi_registers->admin_oq_element_array_addr);
3251 writeq((u64)admin_queues->iq_ci_bus_addr,
3252 &pqi_registers->admin_iq_ci_addr);
3253 writeq((u64)admin_queues->oq_pi_bus_addr,
3254 &pqi_registers->admin_oq_pi_addr);
3255
3256 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3257 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3258 (admin_queues->int_msg_num << 16);
3259 writel(reg, &pqi_registers->admin_iq_num_elements);
3260 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3261 &pqi_registers->function_and_status_code);
3262
3263 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3264 while (1) {
3265 status = readb(&pqi_registers->function_and_status_code);
3266 if (status == PQI_STATUS_IDLE)
3267 break;
3268 if (time_after(jiffies, timeout))
3269 return -ETIMEDOUT;
3270 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3271 }
3272
3273 /*
3274 * The offset registers are not initialized to the correct
3275 * offsets until *after* the create admin queue pair command
3276 * completes successfully.
3277 */
3278 admin_queues->iq_pi = ctrl_info->iomem_base +
3279 PQI_DEVICE_REGISTERS_OFFSET +
3280 readq(&pqi_registers->admin_iq_pi_offset);
3281 admin_queues->oq_ci = ctrl_info->iomem_base +
3282 PQI_DEVICE_REGISTERS_OFFSET +
3283 readq(&pqi_registers->admin_oq_ci_offset);
3284
3285 return 0;
3286 }
3287
3288 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3289 struct pqi_general_admin_request *request)
3290 {
3291 struct pqi_admin_queues *admin_queues;
3292 void *next_element;
3293 pqi_index_t iq_pi;
3294
3295 admin_queues = &ctrl_info->admin_queues;
3296 iq_pi = admin_queues->iq_pi_copy;
3297
3298 next_element = admin_queues->iq_element_array +
3299 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3300
3301 memcpy(next_element, request, sizeof(*request));
3302
3303 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3304 admin_queues->iq_pi_copy = iq_pi;
3305
3306 /*
3307 * This write notifies the controller that an IU is available to be
3308 * processed.
3309 */
3310 writel(iq_pi, admin_queues->iq_pi);
3311 }
3312
3313 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3314 struct pqi_general_admin_response *response)
3315 {
3316 struct pqi_admin_queues *admin_queues;
3317 pqi_index_t oq_pi;
3318 pqi_index_t oq_ci;
3319 unsigned long timeout;
3320
3321 admin_queues = &ctrl_info->admin_queues;
3322 oq_ci = admin_queues->oq_ci_copy;
3323
3324 timeout = (3 * HZ) + jiffies;
3325
3326 while (1) {
3327 oq_pi = *admin_queues->oq_pi;
3328 if (oq_pi != oq_ci)
3329 break;
3330 if (time_after(jiffies, timeout)) {
3331 dev_err(&ctrl_info->pci_dev->dev,
3332 "timed out waiting for admin response\n");
3333 return -ETIMEDOUT;
3334 }
3335 usleep_range(1000, 2000);
3336 }
3337
3338 memcpy(response, admin_queues->oq_element_array +
3339 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3340
3341 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3342 admin_queues->oq_ci_copy = oq_ci;
3343 writel(oq_ci, admin_queues->oq_ci);
3344
3345 return 0;
3346 }
3347
3348 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3349 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3350 struct pqi_io_request *io_request)
3351 {
3352 struct pqi_io_request *next;
3353 void *next_element;
3354 pqi_index_t iq_pi;
3355 pqi_index_t iq_ci;
3356 size_t iu_length;
3357 unsigned long flags;
3358 unsigned int num_elements_needed;
3359 unsigned int num_elements_to_end_of_queue;
3360 size_t copy_count;
3361 struct pqi_iu_header *request;
3362
3363 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3364
3365 if (io_request)
3366 list_add_tail(&io_request->request_list_entry,
3367 &queue_group->request_list[path]);
3368
3369 iq_pi = queue_group->iq_pi_copy[path];
3370
3371 list_for_each_entry_safe(io_request, next,
3372 &queue_group->request_list[path], request_list_entry) {
3373
3374 request = io_request->iu;
3375
3376 iu_length = get_unaligned_le16(&request->iu_length) +
3377 PQI_REQUEST_HEADER_LENGTH;
3378 num_elements_needed =
3379 DIV_ROUND_UP(iu_length,
3380 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3381
3382 iq_ci = *queue_group->iq_ci[path];
3383
3384 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3385 ctrl_info->num_elements_per_iq))
3386 break;
3387
3388 put_unaligned_le16(queue_group->oq_id,
3389 &request->response_queue_id);
3390
3391 next_element = queue_group->iq_element_array[path] +
3392 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3393
3394 num_elements_to_end_of_queue =
3395 ctrl_info->num_elements_per_iq - iq_pi;
3396
3397 if (num_elements_needed <= num_elements_to_end_of_queue) {
3398 memcpy(next_element, request, iu_length);
3399 } else {
3400 copy_count = num_elements_to_end_of_queue *
3401 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3402 memcpy(next_element, request, copy_count);
3403 memcpy(queue_group->iq_element_array[path],
3404 (u8 *)request + copy_count,
3405 iu_length - copy_count);
3406 }
3407
3408 iq_pi = (iq_pi + num_elements_needed) %
3409 ctrl_info->num_elements_per_iq;
3410
3411 list_del(&io_request->request_list_entry);
3412 }
3413
3414 if (iq_pi != queue_group->iq_pi_copy[path]) {
3415 queue_group->iq_pi_copy[path] = iq_pi;
3416 /*
3417 * This write notifies the controller that one or more IUs are
3418 * available to be processed.
3419 */
3420 writel(iq_pi, queue_group->iq_pi[path]);
3421 }
3422
3423 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3424 }
3425
3426 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3427 void *context)
3428 {
3429 struct completion *waiting = context;
3430
3431 complete(waiting);
3432 }
3433
3434 static int pqi_submit_raid_request_synchronous_with_io_request(
3435 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3436 unsigned long timeout_msecs)
3437 {
3438 int rc = 0;
3439 DECLARE_COMPLETION_ONSTACK(wait);
3440
3441 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3442 io_request->context = &wait;
3443
3444 pqi_start_io(ctrl_info,
3445 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3446 io_request);
3447
3448 if (timeout_msecs == NO_TIMEOUT) {
3449 wait_for_completion_io(&wait);
3450 } else {
3451 if (!wait_for_completion_io_timeout(&wait,
3452 msecs_to_jiffies(timeout_msecs))) {
3453 dev_warn(&ctrl_info->pci_dev->dev,
3454 "command timed out\n");
3455 rc = -ETIMEDOUT;
3456 }
3457 }
3458
3459 return rc;
3460 }
3461
3462 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3463 struct pqi_iu_header *request, unsigned int flags,
3464 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3465 {
3466 int rc;
3467 struct pqi_io_request *io_request;
3468 unsigned long start_jiffies;
3469 unsigned long msecs_blocked;
3470 size_t iu_length;
3471
3472 /*
3473 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3474 * are mutually exclusive.
3475 */
3476
3477 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3478 if (down_interruptible(&ctrl_info->sync_request_sem))
3479 return -ERESTARTSYS;
3480 } else {
3481 if (timeout_msecs == NO_TIMEOUT) {
3482 down(&ctrl_info->sync_request_sem);
3483 } else {
3484 start_jiffies = jiffies;
3485 if (down_timeout(&ctrl_info->sync_request_sem,
3486 msecs_to_jiffies(timeout_msecs)))
3487 return -ETIMEDOUT;
3488 msecs_blocked =
3489 jiffies_to_msecs(jiffies - start_jiffies);
3490 if (msecs_blocked >= timeout_msecs)
3491 return -ETIMEDOUT;
3492 timeout_msecs -= msecs_blocked;
3493 }
3494 }
3495
3496 io_request = pqi_alloc_io_request(ctrl_info);
3497
3498 put_unaligned_le16(io_request->index,
3499 &(((struct pqi_raid_path_request *)request)->request_id));
3500
3501 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3502 ((struct pqi_raid_path_request *)request)->error_index =
3503 ((struct pqi_raid_path_request *)request)->request_id;
3504
3505 iu_length = get_unaligned_le16(&request->iu_length) +
3506 PQI_REQUEST_HEADER_LENGTH;
3507 memcpy(io_request->iu, request, iu_length);
3508
3509 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3510 io_request, timeout_msecs);
3511
3512 if (error_info) {
3513 if (io_request->error_info)
3514 memcpy(error_info, io_request->error_info,
3515 sizeof(*error_info));
3516 else
3517 memset(error_info, 0, sizeof(*error_info));
3518 } else if (rc == 0 && io_request->error_info) {
3519 u8 scsi_status;
3520 struct pqi_raid_error_info *raid_error_info;
3521
3522 raid_error_info = io_request->error_info;
3523 scsi_status = raid_error_info->status;
3524
3525 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3526 raid_error_info->data_out_result ==
3527 PQI_DATA_IN_OUT_UNDERFLOW)
3528 scsi_status = SAM_STAT_GOOD;
3529
3530 if (scsi_status != SAM_STAT_GOOD)
3531 rc = -EIO;
3532 }
3533
3534 pqi_free_io_request(io_request);
3535
3536 up(&ctrl_info->sync_request_sem);
3537
3538 return rc;
3539 }
3540
3541 static int pqi_validate_admin_response(
3542 struct pqi_general_admin_response *response, u8 expected_function_code)
3543 {
3544 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3545 return -EINVAL;
3546
3547 if (get_unaligned_le16(&response->header.iu_length) !=
3548 PQI_GENERAL_ADMIN_IU_LENGTH)
3549 return -EINVAL;
3550
3551 if (response->function_code != expected_function_code)
3552 return -EINVAL;
3553
3554 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3555 return -EINVAL;
3556
3557 return 0;
3558 }
3559
3560 static int pqi_submit_admin_request_synchronous(
3561 struct pqi_ctrl_info *ctrl_info,
3562 struct pqi_general_admin_request *request,
3563 struct pqi_general_admin_response *response)
3564 {
3565 int rc;
3566
3567 pqi_submit_admin_request(ctrl_info, request);
3568
3569 rc = pqi_poll_for_admin_response(ctrl_info, response);
3570
3571 if (rc == 0)
3572 rc = pqi_validate_admin_response(response,
3573 request->function_code);
3574
3575 return rc;
3576 }
3577
3578 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3579 {
3580 int rc;
3581 struct pqi_general_admin_request request;
3582 struct pqi_general_admin_response response;
3583 struct pqi_device_capability *capability;
3584 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3585
3586 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3587 if (!capability)
3588 return -ENOMEM;
3589
3590 memset(&request, 0, sizeof(request));
3591
3592 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3593 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3594 &request.header.iu_length);
3595 request.function_code =
3596 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3597 put_unaligned_le32(sizeof(*capability),
3598 &request.data.report_device_capability.buffer_length);
3599
3600 rc = pqi_map_single(ctrl_info->pci_dev,
3601 &request.data.report_device_capability.sg_descriptor,
3602 capability, sizeof(*capability),
3603 PCI_DMA_FROMDEVICE);
3604 if (rc)
3605 goto out;
3606
3607 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3608 &response);
3609
3610 pqi_pci_unmap(ctrl_info->pci_dev,
3611 &request.data.report_device_capability.sg_descriptor, 1,
3612 PCI_DMA_FROMDEVICE);
3613
3614 if (rc)
3615 goto out;
3616
3617 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3618 rc = -EIO;
3619 goto out;
3620 }
3621
3622 ctrl_info->max_inbound_queues =
3623 get_unaligned_le16(&capability->max_inbound_queues);
3624 ctrl_info->max_elements_per_iq =
3625 get_unaligned_le16(&capability->max_elements_per_iq);
3626 ctrl_info->max_iq_element_length =
3627 get_unaligned_le16(&capability->max_iq_element_length)
3628 * 16;
3629 ctrl_info->max_outbound_queues =
3630 get_unaligned_le16(&capability->max_outbound_queues);
3631 ctrl_info->max_elements_per_oq =
3632 get_unaligned_le16(&capability->max_elements_per_oq);
3633 ctrl_info->max_oq_element_length =
3634 get_unaligned_le16(&capability->max_oq_element_length)
3635 * 16;
3636
3637 sop_iu_layer_descriptor =
3638 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3639
3640 ctrl_info->max_inbound_iu_length_per_firmware =
3641 get_unaligned_le16(
3642 &sop_iu_layer_descriptor->max_inbound_iu_length);
3643 ctrl_info->inbound_spanning_supported =
3644 sop_iu_layer_descriptor->inbound_spanning_supported;
3645 ctrl_info->outbound_spanning_supported =
3646 sop_iu_layer_descriptor->outbound_spanning_supported;
3647
3648 out:
3649 kfree(capability);
3650
3651 return rc;
3652 }
3653
3654 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3655 {
3656 if (ctrl_info->max_iq_element_length <
3657 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3658 dev_err(&ctrl_info->pci_dev->dev,
3659 "max. inbound queue element length of %d is less than the required length of %d\n",
3660 ctrl_info->max_iq_element_length,
3661 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3662 return -EINVAL;
3663 }
3664
3665 if (ctrl_info->max_oq_element_length <
3666 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3667 dev_err(&ctrl_info->pci_dev->dev,
3668 "max. outbound queue element length of %d is less than the required length of %d\n",
3669 ctrl_info->max_oq_element_length,
3670 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3671 return -EINVAL;
3672 }
3673
3674 if (ctrl_info->max_inbound_iu_length_per_firmware <
3675 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3676 dev_err(&ctrl_info->pci_dev->dev,
3677 "max. inbound IU length of %u is less than the min. required length of %d\n",
3678 ctrl_info->max_inbound_iu_length_per_firmware,
3679 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3680 return -EINVAL;
3681 }
3682
3683 if (!ctrl_info->inbound_spanning_supported) {
3684 dev_err(&ctrl_info->pci_dev->dev,
3685 "the controller does not support inbound spanning\n");
3686 return -EINVAL;
3687 }
3688
3689 if (ctrl_info->outbound_spanning_supported) {
3690 dev_err(&ctrl_info->pci_dev->dev,
3691 "the controller supports outbound spanning but this driver does not\n");
3692 return -EINVAL;
3693 }
3694
3695 return 0;
3696 }
3697
3698 static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3699 bool inbound_queue, u16 queue_id)
3700 {
3701 struct pqi_general_admin_request request;
3702 struct pqi_general_admin_response response;
3703
3704 memset(&request, 0, sizeof(request));
3705 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3706 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3707 &request.header.iu_length);
3708 if (inbound_queue)
3709 request.function_code =
3710 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3711 else
3712 request.function_code =
3713 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3714 put_unaligned_le16(queue_id,
3715 &request.data.delete_operational_queue.queue_id);
3716
3717 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3718 &response);
3719 }
3720
3721 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3722 {
3723 int rc;
3724 struct pqi_event_queue *event_queue;
3725 struct pqi_general_admin_request request;
3726 struct pqi_general_admin_response response;
3727
3728 event_queue = &ctrl_info->event_queue;
3729
3730 /*
3731 * Create OQ (Outbound Queue - device to host queue) to dedicate
3732 * to events.
3733 */
3734 memset(&request, 0, sizeof(request));
3735 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3736 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3737 &request.header.iu_length);
3738 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3739 put_unaligned_le16(event_queue->oq_id,
3740 &request.data.create_operational_oq.queue_id);
3741 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3742 &request.data.create_operational_oq.element_array_addr);
3743 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3744 &request.data.create_operational_oq.pi_addr);
3745 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3746 &request.data.create_operational_oq.num_elements);
3747 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3748 &request.data.create_operational_oq.element_length);
3749 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3750 put_unaligned_le16(event_queue->int_msg_num,
3751 &request.data.create_operational_oq.int_msg_num);
3752
3753 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3754 &response);
3755 if (rc)
3756 return rc;
3757
3758 event_queue->oq_ci = ctrl_info->iomem_base +
3759 PQI_DEVICE_REGISTERS_OFFSET +
3760 get_unaligned_le64(
3761 &response.data.create_operational_oq.oq_ci_offset);
3762
3763 return 0;
3764 }
3765
3766 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3767 {
3768 unsigned int i;
3769 int rc;
3770 struct pqi_queue_group *queue_group;
3771 struct pqi_general_admin_request request;
3772 struct pqi_general_admin_response response;
3773
3774 i = ctrl_info->num_active_queue_groups;
3775 queue_group = &ctrl_info->queue_groups[i];
3776
3777 /*
3778 * Create IQ (Inbound Queue - host to device queue) for
3779 * RAID path.
3780 */
3781 memset(&request, 0, sizeof(request));
3782 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3783 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3784 &request.header.iu_length);
3785 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3786 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3787 &request.data.create_operational_iq.queue_id);
3788 put_unaligned_le64(
3789 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3790 &request.data.create_operational_iq.element_array_addr);
3791 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3792 &request.data.create_operational_iq.ci_addr);
3793 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3794 &request.data.create_operational_iq.num_elements);
3795 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3796 &request.data.create_operational_iq.element_length);
3797 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3798
3799 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3800 &response);
3801 if (rc) {
3802 dev_err(&ctrl_info->pci_dev->dev,
3803 "error creating inbound RAID queue\n");
3804 return rc;
3805 }
3806
3807 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3808 PQI_DEVICE_REGISTERS_OFFSET +
3809 get_unaligned_le64(
3810 &response.data.create_operational_iq.iq_pi_offset);
3811
3812 /*
3813 * Create IQ (Inbound Queue - host to device queue) for
3814 * Advanced I/O (AIO) path.
3815 */
3816 memset(&request, 0, sizeof(request));
3817 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3818 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3819 &request.header.iu_length);
3820 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3821 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3822 &request.data.create_operational_iq.queue_id);
3823 put_unaligned_le64((u64)queue_group->
3824 iq_element_array_bus_addr[AIO_PATH],
3825 &request.data.create_operational_iq.element_array_addr);
3826 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3827 &request.data.create_operational_iq.ci_addr);
3828 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3829 &request.data.create_operational_iq.num_elements);
3830 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3831 &request.data.create_operational_iq.element_length);
3832 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3833
3834 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3835 &response);
3836 if (rc) {
3837 dev_err(&ctrl_info->pci_dev->dev,
3838 "error creating inbound AIO queue\n");
3839 goto delete_inbound_queue_raid;
3840 }
3841
3842 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3843 PQI_DEVICE_REGISTERS_OFFSET +
3844 get_unaligned_le64(
3845 &response.data.create_operational_iq.iq_pi_offset);
3846
3847 /*
3848 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3849 * assumed to be for RAID path I/O unless we change the queue's
3850 * property.
3851 */
3852 memset(&request, 0, sizeof(request));
3853 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3854 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3855 &request.header.iu_length);
3856 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3857 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3858 &request.data.change_operational_iq_properties.queue_id);
3859 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3860 &request.data.change_operational_iq_properties.vendor_specific);
3861
3862 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3863 &response);
3864 if (rc) {
3865 dev_err(&ctrl_info->pci_dev->dev,
3866 "error changing queue property\n");
3867 goto delete_inbound_queue_aio;
3868 }
3869
3870 /*
3871 * Create OQ (Outbound Queue - device to host queue).
3872 */
3873 memset(&request, 0, sizeof(request));
3874 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3875 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3876 &request.header.iu_length);
3877 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3878 put_unaligned_le16(queue_group->oq_id,
3879 &request.data.create_operational_oq.queue_id);
3880 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3881 &request.data.create_operational_oq.element_array_addr);
3882 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3883 &request.data.create_operational_oq.pi_addr);
3884 put_unaligned_le16(ctrl_info->num_elements_per_oq,
3885 &request.data.create_operational_oq.num_elements);
3886 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3887 &request.data.create_operational_oq.element_length);
3888 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3889 put_unaligned_le16(queue_group->int_msg_num,
3890 &request.data.create_operational_oq.int_msg_num);
3891
3892 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3893 &response);
3894 if (rc) {
3895 dev_err(&ctrl_info->pci_dev->dev,
3896 "error creating outbound queue\n");
3897 goto delete_inbound_queue_aio;
3898 }
3899
3900 queue_group->oq_ci = ctrl_info->iomem_base +
3901 PQI_DEVICE_REGISTERS_OFFSET +
3902 get_unaligned_le64(
3903 &response.data.create_operational_oq.oq_ci_offset);
3904
3905 ctrl_info->num_active_queue_groups++;
3906
3907 return 0;
3908
3909 delete_inbound_queue_aio:
3910 pqi_delete_operational_queue(ctrl_info, true,
3911 queue_group->iq_id[AIO_PATH]);
3912
3913 delete_inbound_queue_raid:
3914 pqi_delete_operational_queue(ctrl_info, true,
3915 queue_group->iq_id[RAID_PATH]);
3916
3917 return rc;
3918 }
3919
3920 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3921 {
3922 int rc;
3923 unsigned int i;
3924
3925 rc = pqi_create_event_queue(ctrl_info);
3926 if (rc) {
3927 dev_err(&ctrl_info->pci_dev->dev,
3928 "error creating event queue\n");
3929 return rc;
3930 }
3931
3932 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3933 rc = pqi_create_queue_group(ctrl_info);
3934 if (rc) {
3935 dev_err(&ctrl_info->pci_dev->dev,
3936 "error creating queue group number %u/%u\n",
3937 i, ctrl_info->num_queue_groups);
3938 return rc;
3939 }
3940 }
3941
3942 return 0;
3943 }
3944
3945 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3946 (offsetof(struct pqi_event_config, descriptors) + \
3947 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3948
3949 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3950 {
3951 int rc;
3952 unsigned int i;
3953 struct pqi_event_config *event_config;
3954 struct pqi_general_management_request request;
3955
3956 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3957 GFP_KERNEL);
3958 if (!event_config)
3959 return -ENOMEM;
3960
3961 memset(&request, 0, sizeof(request));
3962
3963 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3964 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3965 data.report_event_configuration.sg_descriptors[1]) -
3966 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3967 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3968 &request.data.report_event_configuration.buffer_length);
3969
3970 rc = pqi_map_single(ctrl_info->pci_dev,
3971 request.data.report_event_configuration.sg_descriptors,
3972 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3973 PCI_DMA_FROMDEVICE);
3974 if (rc)
3975 goto out;
3976
3977 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3978 0, NULL, NO_TIMEOUT);
3979
3980 pqi_pci_unmap(ctrl_info->pci_dev,
3981 request.data.report_event_configuration.sg_descriptors, 1,
3982 PCI_DMA_FROMDEVICE);
3983
3984 if (rc)
3985 goto out;
3986
3987 for (i = 0; i < event_config->num_event_descriptors; i++)
3988 put_unaligned_le16(ctrl_info->event_queue.oq_id,
3989 &event_config->descriptors[i].oq_id);
3990
3991 memset(&request, 0, sizeof(request));
3992
3993 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3994 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3995 data.report_event_configuration.sg_descriptors[1]) -
3996 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3997 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3998 &request.data.report_event_configuration.buffer_length);
3999
4000 rc = pqi_map_single(ctrl_info->pci_dev,
4001 request.data.report_event_configuration.sg_descriptors,
4002 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4003 PCI_DMA_TODEVICE);
4004 if (rc)
4005 goto out;
4006
4007 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4008 NULL, NO_TIMEOUT);
4009
4010 pqi_pci_unmap(ctrl_info->pci_dev,
4011 request.data.report_event_configuration.sg_descriptors, 1,
4012 PCI_DMA_TODEVICE);
4013
4014 out:
4015 kfree(event_config);
4016
4017 return rc;
4018 }
4019
4020 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4021 {
4022 unsigned int i;
4023 struct device *dev;
4024 size_t sg_chain_buffer_length;
4025 struct pqi_io_request *io_request;
4026
4027 if (!ctrl_info->io_request_pool)
4028 return;
4029
4030 dev = &ctrl_info->pci_dev->dev;
4031 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4032 io_request = ctrl_info->io_request_pool;
4033
4034 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4035 kfree(io_request->iu);
4036 if (!io_request->sg_chain_buffer)
4037 break;
4038 dma_free_coherent(dev, sg_chain_buffer_length,
4039 io_request->sg_chain_buffer,
4040 io_request->sg_chain_buffer_dma_handle);
4041 io_request++;
4042 }
4043
4044 kfree(ctrl_info->io_request_pool);
4045 ctrl_info->io_request_pool = NULL;
4046 }
4047
4048 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4049 {
4050 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4051 ctrl_info->error_buffer_length,
4052 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4053
4054 if (!ctrl_info->error_buffer)
4055 return -ENOMEM;
4056
4057 return 0;
4058 }
4059
4060 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4061 {
4062 unsigned int i;
4063 void *sg_chain_buffer;
4064 size_t sg_chain_buffer_length;
4065 dma_addr_t sg_chain_buffer_dma_handle;
4066 struct device *dev;
4067 struct pqi_io_request *io_request;
4068
4069 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4070 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4071
4072 if (!ctrl_info->io_request_pool) {
4073 dev_err(&ctrl_info->pci_dev->dev,
4074 "failed to allocate I/O request pool\n");
4075 goto error;
4076 }
4077
4078 dev = &ctrl_info->pci_dev->dev;
4079 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4080 io_request = ctrl_info->io_request_pool;
4081
4082 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4083 io_request->iu =
4084 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4085
4086 if (!io_request->iu) {
4087 dev_err(&ctrl_info->pci_dev->dev,
4088 "failed to allocate IU buffers\n");
4089 goto error;
4090 }
4091
4092 sg_chain_buffer = dma_alloc_coherent(dev,
4093 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4094 GFP_KERNEL);
4095
4096 if (!sg_chain_buffer) {
4097 dev_err(&ctrl_info->pci_dev->dev,
4098 "failed to allocate PQI scatter-gather chain buffers\n");
4099 goto error;
4100 }
4101
4102 io_request->index = i;
4103 io_request->sg_chain_buffer = sg_chain_buffer;
4104 io_request->sg_chain_buffer_dma_handle =
4105 sg_chain_buffer_dma_handle;
4106 io_request++;
4107 }
4108
4109 return 0;
4110
4111 error:
4112 pqi_free_all_io_requests(ctrl_info);
4113
4114 return -ENOMEM;
4115 }
4116
4117 /*
4118 * Calculate required resources that are sized based on max. outstanding
4119 * requests and max. transfer size.
4120 */
4121
4122 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4123 {
4124 u32 max_transfer_size;
4125 u32 max_sg_entries;
4126
4127 ctrl_info->scsi_ml_can_queue =
4128 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4129 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4130
4131 ctrl_info->error_buffer_length =
4132 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4133
4134 max_transfer_size =
4135 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4136
4137 max_sg_entries = max_transfer_size / PAGE_SIZE;
4138
4139 /* +1 to cover when the buffer is not page-aligned. */
4140 max_sg_entries++;
4141
4142 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4143
4144 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4145
4146 ctrl_info->sg_chain_buffer_length =
4147 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4148 ctrl_info->sg_tablesize = max_sg_entries;
4149 ctrl_info->max_sectors = max_transfer_size / 512;
4150 }
4151
4152 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4153 {
4154 int num_cpus;
4155 int max_queue_groups;
4156 int num_queue_groups;
4157 u16 num_elements_per_iq;
4158 u16 num_elements_per_oq;
4159
4160 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4161 ctrl_info->max_outbound_queues - 1);
4162 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4163
4164 num_cpus = num_online_cpus();
4165 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4166 num_queue_groups = min(num_queue_groups, max_queue_groups);
4167
4168 ctrl_info->num_queue_groups = num_queue_groups;
4169
4170 /*
4171 * Make sure that the max. inbound IU length is an even multiple
4172 * of our inbound element length.
4173 */
4174 ctrl_info->max_inbound_iu_length =
4175 (ctrl_info->max_inbound_iu_length_per_firmware /
4176 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4177 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4178
4179 num_elements_per_iq =
4180 (ctrl_info->max_inbound_iu_length /
4181 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4182
4183 /* Add one because one element in each queue is unusable. */
4184 num_elements_per_iq++;
4185
4186 num_elements_per_iq = min(num_elements_per_iq,
4187 ctrl_info->max_elements_per_iq);
4188
4189 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4190 num_elements_per_oq = min(num_elements_per_oq,
4191 ctrl_info->max_elements_per_oq);
4192
4193 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4194 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4195
4196 ctrl_info->max_sg_per_iu =
4197 ((ctrl_info->max_inbound_iu_length -
4198 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4199 sizeof(struct pqi_sg_descriptor)) +
4200 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4201 }
4202
4203 static inline void pqi_set_sg_descriptor(
4204 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4205 {
4206 u64 address = (u64)sg_dma_address(sg);
4207 unsigned int length = sg_dma_len(sg);
4208
4209 put_unaligned_le64(address, &sg_descriptor->address);
4210 put_unaligned_le32(length, &sg_descriptor->length);
4211 put_unaligned_le32(0, &sg_descriptor->flags);
4212 }
4213
4214 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4215 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4216 struct pqi_io_request *io_request)
4217 {
4218 int i;
4219 u16 iu_length;
4220 int sg_count;
4221 bool chained;
4222 unsigned int num_sg_in_iu;
4223 unsigned int max_sg_per_iu;
4224 struct scatterlist *sg;
4225 struct pqi_sg_descriptor *sg_descriptor;
4226
4227 sg_count = scsi_dma_map(scmd);
4228 if (sg_count < 0)
4229 return sg_count;
4230
4231 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4232 PQI_REQUEST_HEADER_LENGTH;
4233
4234 if (sg_count == 0)
4235 goto out;
4236
4237 sg = scsi_sglist(scmd);
4238 sg_descriptor = request->sg_descriptors;
4239 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4240 chained = false;
4241 num_sg_in_iu = 0;
4242 i = 0;
4243
4244 while (1) {
4245 pqi_set_sg_descriptor(sg_descriptor, sg);
4246 if (!chained)
4247 num_sg_in_iu++;
4248 i++;
4249 if (i == sg_count)
4250 break;
4251 sg_descriptor++;
4252 if (i == max_sg_per_iu) {
4253 put_unaligned_le64(
4254 (u64)io_request->sg_chain_buffer_dma_handle,
4255 &sg_descriptor->address);
4256 put_unaligned_le32((sg_count - num_sg_in_iu)
4257 * sizeof(*sg_descriptor),
4258 &sg_descriptor->length);
4259 put_unaligned_le32(CISS_SG_CHAIN,
4260 &sg_descriptor->flags);
4261 chained = true;
4262 num_sg_in_iu++;
4263 sg_descriptor = io_request->sg_chain_buffer;
4264 }
4265 sg = sg_next(sg);
4266 }
4267
4268 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4269 request->partial = chained;
4270 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4271
4272 out:
4273 put_unaligned_le16(iu_length, &request->header.iu_length);
4274
4275 return 0;
4276 }
4277
4278 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4279 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4280 struct pqi_io_request *io_request)
4281 {
4282 int i;
4283 u16 iu_length;
4284 int sg_count;
4285 bool chained;
4286 unsigned int num_sg_in_iu;
4287 unsigned int max_sg_per_iu;
4288 struct scatterlist *sg;
4289 struct pqi_sg_descriptor *sg_descriptor;
4290
4291 sg_count = scsi_dma_map(scmd);
4292 if (sg_count < 0)
4293 return sg_count;
4294
4295 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4296 PQI_REQUEST_HEADER_LENGTH;
4297 num_sg_in_iu = 0;
4298
4299 if (sg_count == 0)
4300 goto out;
4301
4302 sg = scsi_sglist(scmd);
4303 sg_descriptor = request->sg_descriptors;
4304 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4305 chained = false;
4306 i = 0;
4307
4308 while (1) {
4309 pqi_set_sg_descriptor(sg_descriptor, sg);
4310 if (!chained)
4311 num_sg_in_iu++;
4312 i++;
4313 if (i == sg_count)
4314 break;
4315 sg_descriptor++;
4316 if (i == max_sg_per_iu) {
4317 put_unaligned_le64(
4318 (u64)io_request->sg_chain_buffer_dma_handle,
4319 &sg_descriptor->address);
4320 put_unaligned_le32((sg_count - num_sg_in_iu)
4321 * sizeof(*sg_descriptor),
4322 &sg_descriptor->length);
4323 put_unaligned_le32(CISS_SG_CHAIN,
4324 &sg_descriptor->flags);
4325 chained = true;
4326 num_sg_in_iu++;
4327 sg_descriptor = io_request->sg_chain_buffer;
4328 }
4329 sg = sg_next(sg);
4330 }
4331
4332 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4333 request->partial = chained;
4334 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4335
4336 out:
4337 put_unaligned_le16(iu_length, &request->header.iu_length);
4338 request->num_sg_descriptors = num_sg_in_iu;
4339
4340 return 0;
4341 }
4342
4343 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4344 void *context)
4345 {
4346 struct scsi_cmnd *scmd;
4347
4348 scmd = io_request->scmd;
4349 pqi_free_io_request(io_request);
4350 scsi_dma_unmap(scmd);
4351 pqi_scsi_done(scmd);
4352 }
4353
4354 static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4355 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4356 struct pqi_queue_group *queue_group)
4357 {
4358 int rc;
4359 size_t cdb_length;
4360 struct pqi_io_request *io_request;
4361 struct pqi_raid_path_request *request;
4362
4363 io_request = pqi_alloc_io_request(ctrl_info);
4364 io_request->io_complete_callback = pqi_raid_io_complete;
4365 io_request->scmd = scmd;
4366
4367 scmd->host_scribble = (unsigned char *)io_request;
4368
4369 request = io_request->iu;
4370 memset(request, 0,
4371 offsetof(struct pqi_raid_path_request, sg_descriptors));
4372
4373 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4374 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4375 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4376 put_unaligned_le16(io_request->index, &request->request_id);
4377 request->error_index = request->request_id;
4378 memcpy(request->lun_number, device->scsi3addr,
4379 sizeof(request->lun_number));
4380
4381 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4382 memcpy(request->cdb, scmd->cmnd, cdb_length);
4383
4384 switch (cdb_length) {
4385 case 6:
4386 case 10:
4387 case 12:
4388 case 16:
4389 /* No bytes in the Additional CDB bytes field */
4390 request->additional_cdb_bytes_usage =
4391 SOP_ADDITIONAL_CDB_BYTES_0;
4392 break;
4393 case 20:
4394 /* 4 bytes in the Additional cdb field */
4395 request->additional_cdb_bytes_usage =
4396 SOP_ADDITIONAL_CDB_BYTES_4;
4397 break;
4398 case 24:
4399 /* 8 bytes in the Additional cdb field */
4400 request->additional_cdb_bytes_usage =
4401 SOP_ADDITIONAL_CDB_BYTES_8;
4402 break;
4403 case 28:
4404 /* 12 bytes in the Additional cdb field */
4405 request->additional_cdb_bytes_usage =
4406 SOP_ADDITIONAL_CDB_BYTES_12;
4407 break;
4408 case 32:
4409 default:
4410 /* 16 bytes in the Additional cdb field */
4411 request->additional_cdb_bytes_usage =
4412 SOP_ADDITIONAL_CDB_BYTES_16;
4413 break;
4414 }
4415
4416 switch (scmd->sc_data_direction) {
4417 case DMA_TO_DEVICE:
4418 request->data_direction = SOP_READ_FLAG;
4419 break;
4420 case DMA_FROM_DEVICE:
4421 request->data_direction = SOP_WRITE_FLAG;
4422 break;
4423 case DMA_NONE:
4424 request->data_direction = SOP_NO_DIRECTION_FLAG;
4425 break;
4426 case DMA_BIDIRECTIONAL:
4427 request->data_direction = SOP_BIDIRECTIONAL;
4428 break;
4429 default:
4430 dev_err(&ctrl_info->pci_dev->dev,
4431 "unknown data direction: %d\n",
4432 scmd->sc_data_direction);
4433 WARN_ON(scmd->sc_data_direction);
4434 break;
4435 }
4436
4437 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4438 if (rc) {
4439 pqi_free_io_request(io_request);
4440 return SCSI_MLQUEUE_HOST_BUSY;
4441 }
4442
4443 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4444
4445 return 0;
4446 }
4447
4448 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4449 void *context)
4450 {
4451 struct scsi_cmnd *scmd;
4452
4453 scmd = io_request->scmd;
4454 scsi_dma_unmap(scmd);
4455 if (io_request->status == -EAGAIN)
4456 set_host_byte(scmd, DID_IMM_RETRY);
4457 pqi_free_io_request(io_request);
4458 pqi_scsi_done(scmd);
4459 }
4460
4461 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4462 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4463 struct pqi_queue_group *queue_group)
4464 {
4465 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4466 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4467 }
4468
4469 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4470 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4471 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4472 struct pqi_encryption_info *encryption_info)
4473 {
4474 int rc;
4475 struct pqi_io_request *io_request;
4476 struct pqi_aio_path_request *request;
4477
4478 io_request = pqi_alloc_io_request(ctrl_info);
4479 io_request->io_complete_callback = pqi_aio_io_complete;
4480 io_request->scmd = scmd;
4481
4482 scmd->host_scribble = (unsigned char *)io_request;
4483
4484 request = io_request->iu;
4485 memset(request, 0,
4486 offsetof(struct pqi_raid_path_request, sg_descriptors));
4487
4488 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4489 put_unaligned_le32(aio_handle, &request->nexus_id);
4490 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4491 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4492 put_unaligned_le16(io_request->index, &request->request_id);
4493 request->error_index = request->request_id;
4494 if (cdb_length > sizeof(request->cdb))
4495 cdb_length = sizeof(request->cdb);
4496 request->cdb_length = cdb_length;
4497 memcpy(request->cdb, cdb, cdb_length);
4498
4499 switch (scmd->sc_data_direction) {
4500 case DMA_TO_DEVICE:
4501 request->data_direction = SOP_READ_FLAG;
4502 break;
4503 case DMA_FROM_DEVICE:
4504 request->data_direction = SOP_WRITE_FLAG;
4505 break;
4506 case DMA_NONE:
4507 request->data_direction = SOP_NO_DIRECTION_FLAG;
4508 break;
4509 case DMA_BIDIRECTIONAL:
4510 request->data_direction = SOP_BIDIRECTIONAL;
4511 break;
4512 default:
4513 dev_err(&ctrl_info->pci_dev->dev,
4514 "unknown data direction: %d\n",
4515 scmd->sc_data_direction);
4516 WARN_ON(scmd->sc_data_direction);
4517 break;
4518 }
4519
4520 if (encryption_info) {
4521 request->encryption_enable = true;
4522 put_unaligned_le16(encryption_info->data_encryption_key_index,
4523 &request->data_encryption_key_index);
4524 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4525 &request->encrypt_tweak_lower);
4526 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4527 &request->encrypt_tweak_upper);
4528 }
4529
4530 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4531 if (rc) {
4532 pqi_free_io_request(io_request);
4533 return SCSI_MLQUEUE_HOST_BUSY;
4534 }
4535
4536 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4537
4538 return 0;
4539 }
4540
4541 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4542 struct scsi_cmnd *scmd)
4543 {
4544 int rc;
4545 struct pqi_ctrl_info *ctrl_info;
4546 struct pqi_scsi_dev *device;
4547 u16 hwq;
4548 struct pqi_queue_group *queue_group;
4549 bool raid_bypassed;
4550
4551 device = scmd->device->hostdata;
4552 ctrl_info = shost_to_hba(shost);
4553
4554 if (pqi_ctrl_offline(ctrl_info)) {
4555 set_host_byte(scmd, DID_NO_CONNECT);
4556 pqi_scsi_done(scmd);
4557 return 0;
4558 }
4559
4560 hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4561 if (hwq >= ctrl_info->num_queue_groups)
4562 hwq = 0;
4563
4564 queue_group = &ctrl_info->queue_groups[hwq];
4565
4566 if (pqi_is_logical_device(device)) {
4567 raid_bypassed = false;
4568 if (device->offload_enabled &&
4569 scmd->request->cmd_type == REQ_TYPE_FS) {
4570 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4571 scmd, queue_group);
4572 if (rc == 0 ||
4573 rc == SCSI_MLQUEUE_HOST_BUSY ||
4574 rc == SAM_STAT_CHECK_CONDITION ||
4575 rc == SAM_STAT_RESERVATION_CONFLICT)
4576 raid_bypassed = true;
4577 }
4578 if (!raid_bypassed)
4579 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4580 queue_group);
4581 } else {
4582 if (device->aio_enabled)
4583 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4584 queue_group);
4585 else
4586 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4587 queue_group);
4588 }
4589
4590 return rc;
4591 }
4592
4593 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4594 void *context)
4595 {
4596 struct completion *waiting = context;
4597
4598 complete(waiting);
4599 }
4600
4601 #define PQI_LUN_RESET_TIMEOUT_SECS 10
4602
4603 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4604 struct pqi_scsi_dev *device, struct completion *wait)
4605 {
4606 int rc;
4607 unsigned int wait_secs = 0;
4608
4609 while (1) {
4610 if (wait_for_completion_io_timeout(wait,
4611 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4612 rc = 0;
4613 break;
4614 }
4615
4616 pqi_check_ctrl_health(ctrl_info);
4617 if (pqi_ctrl_offline(ctrl_info)) {
4618 rc = -ETIMEDOUT;
4619 break;
4620 }
4621
4622 wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
4623
4624 dev_err(&ctrl_info->pci_dev->dev,
4625 "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4626 ctrl_info->scsi_host->host_no, device->bus,
4627 device->target, device->lun, wait_secs);
4628 }
4629
4630 return rc;
4631 }
4632
4633 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
4634 struct pqi_scsi_dev *device)
4635 {
4636 int rc;
4637 struct pqi_io_request *io_request;
4638 DECLARE_COMPLETION_ONSTACK(wait);
4639 struct pqi_task_management_request *request;
4640
4641 down(&ctrl_info->lun_reset_sem);
4642
4643 io_request = pqi_alloc_io_request(ctrl_info);
4644 io_request->io_complete_callback = pqi_lun_reset_complete;
4645 io_request->context = &wait;
4646
4647 request = io_request->iu;
4648 memset(request, 0, sizeof(*request));
4649
4650 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4651 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4652 &request->header.iu_length);
4653 put_unaligned_le16(io_request->index, &request->request_id);
4654 memcpy(request->lun_number, device->scsi3addr,
4655 sizeof(request->lun_number));
4656 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4657
4658 pqi_start_io(ctrl_info,
4659 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4660 io_request);
4661
4662 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4663 if (rc == 0)
4664 rc = io_request->status;
4665
4666 pqi_free_io_request(io_request);
4667 up(&ctrl_info->lun_reset_sem);
4668
4669 return rc;
4670 }
4671
4672 /* Performs a reset at the LUN level. */
4673
4674 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4675 struct pqi_scsi_dev *device)
4676 {
4677 int rc;
4678
4679 pqi_check_ctrl_health(ctrl_info);
4680 if (pqi_ctrl_offline(ctrl_info))
4681 return FAILED;
4682
4683 rc = pqi_lun_reset(ctrl_info, device);
4684
4685 return rc == 0 ? SUCCESS : FAILED;
4686 }
4687
4688 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4689 {
4690 int rc;
4691 struct pqi_ctrl_info *ctrl_info;
4692 struct pqi_scsi_dev *device;
4693
4694 ctrl_info = shost_to_hba(scmd->device->host);
4695 device = scmd->device->hostdata;
4696
4697 dev_err(&ctrl_info->pci_dev->dev,
4698 "resetting scsi %d:%d:%d:%d\n",
4699 ctrl_info->scsi_host->host_no,
4700 device->bus, device->target, device->lun);
4701
4702 rc = pqi_device_reset(ctrl_info, device);
4703
4704 dev_err(&ctrl_info->pci_dev->dev,
4705 "reset of scsi %d:%d:%d:%d: %s\n",
4706 ctrl_info->scsi_host->host_no,
4707 device->bus, device->target, device->lun,
4708 rc == SUCCESS ? "SUCCESS" : "FAILED");
4709
4710 return rc;
4711 }
4712
4713 static int pqi_slave_alloc(struct scsi_device *sdev)
4714 {
4715 struct pqi_scsi_dev *device;
4716 unsigned long flags;
4717 struct pqi_ctrl_info *ctrl_info;
4718 struct scsi_target *starget;
4719 struct sas_rphy *rphy;
4720
4721 ctrl_info = shost_to_hba(sdev->host);
4722
4723 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4724
4725 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4726 starget = scsi_target(sdev);
4727 rphy = target_to_rphy(starget);
4728 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4729 if (device) {
4730 device->target = sdev_id(sdev);
4731 device->lun = sdev->lun;
4732 device->target_lun_valid = true;
4733 }
4734 } else {
4735 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4736 sdev_id(sdev), sdev->lun);
4737 }
4738
4739 if (device && device->expose_device) {
4740 sdev->hostdata = device;
4741 device->sdev = sdev;
4742 if (device->queue_depth) {
4743 device->advertised_queue_depth = device->queue_depth;
4744 scsi_change_queue_depth(sdev,
4745 device->advertised_queue_depth);
4746 }
4747 }
4748
4749 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4750
4751 return 0;
4752 }
4753
4754 static int pqi_slave_configure(struct scsi_device *sdev)
4755 {
4756 struct pqi_scsi_dev *device;
4757
4758 device = sdev->hostdata;
4759 if (!device->expose_device)
4760 sdev->no_uld_attach = true;
4761
4762 return 0;
4763 }
4764
4765 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4766 void __user *arg)
4767 {
4768 struct pci_dev *pci_dev;
4769 u32 subsystem_vendor;
4770 u32 subsystem_device;
4771 cciss_pci_info_struct pciinfo;
4772
4773 if (!arg)
4774 return -EINVAL;
4775
4776 pci_dev = ctrl_info->pci_dev;
4777
4778 pciinfo.domain = pci_domain_nr(pci_dev->bus);
4779 pciinfo.bus = pci_dev->bus->number;
4780 pciinfo.dev_fn = pci_dev->devfn;
4781 subsystem_vendor = pci_dev->subsystem_vendor;
4782 subsystem_device = pci_dev->subsystem_device;
4783 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4784 subsystem_vendor;
4785
4786 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4787 return -EFAULT;
4788
4789 return 0;
4790 }
4791
4792 static int pqi_getdrivver_ioctl(void __user *arg)
4793 {
4794 u32 version;
4795
4796 if (!arg)
4797 return -EINVAL;
4798
4799 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4800 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
4801
4802 if (copy_to_user(arg, &version, sizeof(version)))
4803 return -EFAULT;
4804
4805 return 0;
4806 }
4807
4808 struct ciss_error_info {
4809 u8 scsi_status;
4810 int command_status;
4811 size_t sense_data_length;
4812 };
4813
4814 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4815 struct ciss_error_info *ciss_error_info)
4816 {
4817 int ciss_cmd_status;
4818 size_t sense_data_length;
4819
4820 switch (pqi_error_info->data_out_result) {
4821 case PQI_DATA_IN_OUT_GOOD:
4822 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4823 break;
4824 case PQI_DATA_IN_OUT_UNDERFLOW:
4825 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4826 break;
4827 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4828 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4829 break;
4830 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4831 case PQI_DATA_IN_OUT_BUFFER_ERROR:
4832 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4833 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4834 case PQI_DATA_IN_OUT_ERROR:
4835 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4836 break;
4837 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4838 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4839 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4840 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4841 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4842 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4843 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4844 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4845 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4846 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4847 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4848 break;
4849 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4850 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4851 break;
4852 case PQI_DATA_IN_OUT_ABORTED:
4853 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4854 break;
4855 case PQI_DATA_IN_OUT_TIMEOUT:
4856 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4857 break;
4858 default:
4859 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4860 break;
4861 }
4862
4863 sense_data_length =
4864 get_unaligned_le16(&pqi_error_info->sense_data_length);
4865 if (sense_data_length == 0)
4866 sense_data_length =
4867 get_unaligned_le16(&pqi_error_info->response_data_length);
4868 if (sense_data_length)
4869 if (sense_data_length > sizeof(pqi_error_info->data))
4870 sense_data_length = sizeof(pqi_error_info->data);
4871
4872 ciss_error_info->scsi_status = pqi_error_info->status;
4873 ciss_error_info->command_status = ciss_cmd_status;
4874 ciss_error_info->sense_data_length = sense_data_length;
4875 }
4876
4877 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4878 {
4879 int rc;
4880 char *kernel_buffer = NULL;
4881 u16 iu_length;
4882 size_t sense_data_length;
4883 IOCTL_Command_struct iocommand;
4884 struct pqi_raid_path_request request;
4885 struct pqi_raid_error_info pqi_error_info;
4886 struct ciss_error_info ciss_error_info;
4887
4888 if (pqi_ctrl_offline(ctrl_info))
4889 return -ENXIO;
4890 if (!arg)
4891 return -EINVAL;
4892 if (!capable(CAP_SYS_RAWIO))
4893 return -EPERM;
4894 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4895 return -EFAULT;
4896 if (iocommand.buf_size < 1 &&
4897 iocommand.Request.Type.Direction != XFER_NONE)
4898 return -EINVAL;
4899 if (iocommand.Request.CDBLen > sizeof(request.cdb))
4900 return -EINVAL;
4901 if (iocommand.Request.Type.Type != TYPE_CMD)
4902 return -EINVAL;
4903
4904 switch (iocommand.Request.Type.Direction) {
4905 case XFER_NONE:
4906 case XFER_WRITE:
4907 case XFER_READ:
4908 break;
4909 default:
4910 return -EINVAL;
4911 }
4912
4913 if (iocommand.buf_size > 0) {
4914 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4915 if (!kernel_buffer)
4916 return -ENOMEM;
4917 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4918 if (copy_from_user(kernel_buffer, iocommand.buf,
4919 iocommand.buf_size)) {
4920 rc = -EFAULT;
4921 goto out;
4922 }
4923 } else {
4924 memset(kernel_buffer, 0, iocommand.buf_size);
4925 }
4926 }
4927
4928 memset(&request, 0, sizeof(request));
4929
4930 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4931 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4932 PQI_REQUEST_HEADER_LENGTH;
4933 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4934 sizeof(request.lun_number));
4935 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4936 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4937
4938 switch (iocommand.Request.Type.Direction) {
4939 case XFER_NONE:
4940 request.data_direction = SOP_NO_DIRECTION_FLAG;
4941 break;
4942 case XFER_WRITE:
4943 request.data_direction = SOP_WRITE_FLAG;
4944 break;
4945 case XFER_READ:
4946 request.data_direction = SOP_READ_FLAG;
4947 break;
4948 }
4949
4950 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4951
4952 if (iocommand.buf_size > 0) {
4953 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4954
4955 rc = pqi_map_single(ctrl_info->pci_dev,
4956 &request.sg_descriptors[0], kernel_buffer,
4957 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4958 if (rc)
4959 goto out;
4960
4961 iu_length += sizeof(request.sg_descriptors[0]);
4962 }
4963
4964 put_unaligned_le16(iu_length, &request.header.iu_length);
4965
4966 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4967 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4968
4969 if (iocommand.buf_size > 0)
4970 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4971 PCI_DMA_BIDIRECTIONAL);
4972
4973 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4974
4975 if (rc == 0) {
4976 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4977 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4978 iocommand.error_info.CommandStatus =
4979 ciss_error_info.command_status;
4980 sense_data_length = ciss_error_info.sense_data_length;
4981 if (sense_data_length) {
4982 if (sense_data_length >
4983 sizeof(iocommand.error_info.SenseInfo))
4984 sense_data_length =
4985 sizeof(iocommand.error_info.SenseInfo);
4986 memcpy(iocommand.error_info.SenseInfo,
4987 pqi_error_info.data, sense_data_length);
4988 iocommand.error_info.SenseLen = sense_data_length;
4989 }
4990 }
4991
4992 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
4993 rc = -EFAULT;
4994 goto out;
4995 }
4996
4997 if (rc == 0 && iocommand.buf_size > 0 &&
4998 (iocommand.Request.Type.Direction & XFER_READ)) {
4999 if (copy_to_user(iocommand.buf, kernel_buffer,
5000 iocommand.buf_size)) {
5001 rc = -EFAULT;
5002 }
5003 }
5004
5005 out:
5006 kfree(kernel_buffer);
5007
5008 return rc;
5009 }
5010
5011 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5012 {
5013 int rc;
5014 struct pqi_ctrl_info *ctrl_info;
5015
5016 ctrl_info = shost_to_hba(sdev->host);
5017
5018 switch (cmd) {
5019 case CCISS_DEREGDISK:
5020 case CCISS_REGNEWDISK:
5021 case CCISS_REGNEWD:
5022 rc = pqi_scan_scsi_devices(ctrl_info);
5023 break;
5024 case CCISS_GETPCIINFO:
5025 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5026 break;
5027 case CCISS_GETDRIVVER:
5028 rc = pqi_getdrivver_ioctl(arg);
5029 break;
5030 case CCISS_PASSTHRU:
5031 rc = pqi_passthru_ioctl(ctrl_info, arg);
5032 break;
5033 default:
5034 rc = -EINVAL;
5035 break;
5036 }
5037
5038 return rc;
5039 }
5040
5041 static ssize_t pqi_version_show(struct device *dev,
5042 struct device_attribute *attr, char *buffer)
5043 {
5044 ssize_t count = 0;
5045 struct Scsi_Host *shost;
5046 struct pqi_ctrl_info *ctrl_info;
5047
5048 shost = class_to_shost(dev);
5049 ctrl_info = shost_to_hba(shost);
5050
5051 count += snprintf(buffer + count, PAGE_SIZE - count,
5052 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5053
5054 count += snprintf(buffer + count, PAGE_SIZE - count,
5055 "firmware: %s\n", ctrl_info->firmware_version);
5056
5057 return count;
5058 }
5059
5060 static ssize_t pqi_host_rescan_store(struct device *dev,
5061 struct device_attribute *attr, const char *buffer, size_t count)
5062 {
5063 struct Scsi_Host *shost = class_to_shost(dev);
5064
5065 pqi_scan_start(shost);
5066
5067 return count;
5068 }
5069
5070 static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5071 static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5072
5073 static struct device_attribute *pqi_shost_attrs[] = {
5074 &dev_attr_version,
5075 &dev_attr_rescan,
5076 NULL
5077 };
5078
5079 static ssize_t pqi_sas_address_show(struct device *dev,
5080 struct device_attribute *attr, char *buffer)
5081 {
5082 struct pqi_ctrl_info *ctrl_info;
5083 struct scsi_device *sdev;
5084 struct pqi_scsi_dev *device;
5085 unsigned long flags;
5086 u64 sas_address;
5087
5088 sdev = to_scsi_device(dev);
5089 ctrl_info = shost_to_hba(sdev->host);
5090
5091 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5092
5093 device = sdev->hostdata;
5094 if (pqi_is_logical_device(device)) {
5095 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5096 flags);
5097 return -ENODEV;
5098 }
5099 sas_address = device->sas_address;
5100
5101 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5102
5103 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5104 }
5105
5106 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5107 struct device_attribute *attr, char *buffer)
5108 {
5109 struct pqi_ctrl_info *ctrl_info;
5110 struct scsi_device *sdev;
5111 struct pqi_scsi_dev *device;
5112 unsigned long flags;
5113
5114 sdev = to_scsi_device(dev);
5115 ctrl_info = shost_to_hba(sdev->host);
5116
5117 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5118
5119 device = sdev->hostdata;
5120 buffer[0] = device->offload_enabled ? '1' : '0';
5121 buffer[1] = '\n';
5122 buffer[2] = '\0';
5123
5124 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5125
5126 return 2;
5127 }
5128
5129 static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5130 static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5131 pqi_ssd_smart_path_enabled_show, NULL);
5132
5133 static struct device_attribute *pqi_sdev_attrs[] = {
5134 &dev_attr_sas_address,
5135 &dev_attr_ssd_smart_path_enabled,
5136 NULL
5137 };
5138
5139 static struct scsi_host_template pqi_driver_template = {
5140 .module = THIS_MODULE,
5141 .name = DRIVER_NAME_SHORT,
5142 .proc_name = DRIVER_NAME_SHORT,
5143 .queuecommand = pqi_scsi_queue_command,
5144 .scan_start = pqi_scan_start,
5145 .scan_finished = pqi_scan_finished,
5146 .this_id = -1,
5147 .use_clustering = ENABLE_CLUSTERING,
5148 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5149 .ioctl = pqi_ioctl,
5150 .slave_alloc = pqi_slave_alloc,
5151 .slave_configure = pqi_slave_configure,
5152 .sdev_attrs = pqi_sdev_attrs,
5153 .shost_attrs = pqi_shost_attrs,
5154 };
5155
5156 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5157 {
5158 int rc;
5159 struct Scsi_Host *shost;
5160
5161 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5162 if (!shost) {
5163 dev_err(&ctrl_info->pci_dev->dev,
5164 "scsi_host_alloc failed for controller %u\n",
5165 ctrl_info->ctrl_id);
5166 return -ENOMEM;
5167 }
5168
5169 shost->io_port = 0;
5170 shost->n_io_port = 0;
5171 shost->this_id = -1;
5172 shost->max_channel = PQI_MAX_BUS;
5173 shost->max_cmd_len = MAX_COMMAND_SIZE;
5174 shost->max_lun = ~0;
5175 shost->max_id = ~0;
5176 shost->max_sectors = ctrl_info->max_sectors;
5177 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5178 shost->cmd_per_lun = shost->can_queue;
5179 shost->sg_tablesize = ctrl_info->sg_tablesize;
5180 shost->transportt = pqi_sas_transport_template;
5181 shost->irq = ctrl_info->msix_vectors[0];
5182 shost->unique_id = shost->irq;
5183 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5184 shost->hostdata[0] = (unsigned long)ctrl_info;
5185
5186 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5187 if (rc) {
5188 dev_err(&ctrl_info->pci_dev->dev,
5189 "scsi_add_host failed for controller %u\n",
5190 ctrl_info->ctrl_id);
5191 goto free_host;
5192 }
5193
5194 rc = pqi_add_sas_host(shost, ctrl_info);
5195 if (rc) {
5196 dev_err(&ctrl_info->pci_dev->dev,
5197 "add SAS host failed for controller %u\n",
5198 ctrl_info->ctrl_id);
5199 goto remove_host;
5200 }
5201
5202 ctrl_info->scsi_host = shost;
5203
5204 return 0;
5205
5206 remove_host:
5207 scsi_remove_host(shost);
5208 free_host:
5209 scsi_host_put(shost);
5210
5211 return rc;
5212 }
5213
5214 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5215 {
5216 struct Scsi_Host *shost;
5217
5218 pqi_delete_sas_host(ctrl_info);
5219
5220 shost = ctrl_info->scsi_host;
5221 if (!shost)
5222 return;
5223
5224 scsi_remove_host(shost);
5225 scsi_host_put(shost);
5226 }
5227
5228 #define PQI_RESET_ACTION_RESET 0x1
5229
5230 #define PQI_RESET_TYPE_NO_RESET 0x0
5231 #define PQI_RESET_TYPE_SOFT_RESET 0x1
5232 #define PQI_RESET_TYPE_FIRM_RESET 0x2
5233 #define PQI_RESET_TYPE_HARD_RESET 0x3
5234
5235 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5236 {
5237 int rc;
5238 u32 reset_params;
5239
5240 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5241 PQI_RESET_TYPE_HARD_RESET;
5242
5243 writel(reset_params,
5244 &ctrl_info->pqi_registers->device_reset);
5245
5246 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5247 if (rc)
5248 dev_err(&ctrl_info->pci_dev->dev,
5249 "PQI reset failed\n");
5250
5251 return rc;
5252 }
5253
5254 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5255 {
5256 int rc;
5257 struct bmic_identify_controller *identify;
5258
5259 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5260 if (!identify)
5261 return -ENOMEM;
5262
5263 rc = pqi_identify_controller(ctrl_info, identify);
5264 if (rc)
5265 goto out;
5266
5267 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5268 sizeof(identify->firmware_version));
5269 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5270 snprintf(ctrl_info->firmware_version +
5271 strlen(ctrl_info->firmware_version),
5272 sizeof(ctrl_info->firmware_version),
5273 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5274
5275 out:
5276 kfree(identify);
5277
5278 return rc;
5279 }
5280
5281 static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
5282 {
5283 if (!sis_is_firmware_running(ctrl_info))
5284 return -ENXIO;
5285
5286 if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
5287 sis_disable_msix(ctrl_info);
5288 if (pqi_reset(ctrl_info) == 0)
5289 sis_reenable_sis_mode(ctrl_info);
5290 }
5291
5292 return 0;
5293 }
5294
5295 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5296 {
5297 int rc;
5298
5299 if (reset_devices) {
5300 rc = pqi_kdump_init(ctrl_info);
5301 if (rc)
5302 return rc;
5303 }
5304
5305 /*
5306 * When the controller comes out of reset, it is always running
5307 * in legacy SIS mode. This is so that it can be compatible
5308 * with legacy drivers shipped with OSes. So we have to talk
5309 * to it using SIS commands at first. Once we are satisified
5310 * that the controller supports PQI, we transition it into PQI
5311 * mode.
5312 */
5313
5314 /*
5315 * Wait until the controller is ready to start accepting SIS
5316 * commands.
5317 */
5318 rc = sis_wait_for_ctrl_ready(ctrl_info);
5319 if (rc) {
5320 dev_err(&ctrl_info->pci_dev->dev,
5321 "error initializing SIS interface\n");
5322 return rc;
5323 }
5324
5325 /*
5326 * Get the controller properties. This allows us to determine
5327 * whether or not it supports PQI mode.
5328 */
5329 rc = sis_get_ctrl_properties(ctrl_info);
5330 if (rc) {
5331 dev_err(&ctrl_info->pci_dev->dev,
5332 "error obtaining controller properties\n");
5333 return rc;
5334 }
5335
5336 rc = sis_get_pqi_capabilities(ctrl_info);
5337 if (rc) {
5338 dev_err(&ctrl_info->pci_dev->dev,
5339 "error obtaining controller capabilities\n");
5340 return rc;
5341 }
5342
5343 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5344 ctrl_info->max_outstanding_requests =
5345 PQI_MAX_OUTSTANDING_REQUESTS;
5346
5347 pqi_calculate_io_resources(ctrl_info);
5348
5349 rc = pqi_alloc_error_buffer(ctrl_info);
5350 if (rc) {
5351 dev_err(&ctrl_info->pci_dev->dev,
5352 "failed to allocate PQI error buffer\n");
5353 return rc;
5354 }
5355
5356 /*
5357 * If the function we are about to call succeeds, the
5358 * controller will transition from legacy SIS mode
5359 * into PQI mode.
5360 */
5361 rc = sis_init_base_struct_addr(ctrl_info);
5362 if (rc) {
5363 dev_err(&ctrl_info->pci_dev->dev,
5364 "error initializing PQI mode\n");
5365 return rc;
5366 }
5367
5368 /* Wait for the controller to complete the SIS -> PQI transition. */
5369 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5370 if (rc) {
5371 dev_err(&ctrl_info->pci_dev->dev,
5372 "transition to PQI mode failed\n");
5373 return rc;
5374 }
5375
5376 /* From here on, we are running in PQI mode. */
5377 ctrl_info->pqi_mode_enabled = true;
5378 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
5379
5380 rc = pqi_alloc_admin_queues(ctrl_info);
5381 if (rc) {
5382 dev_err(&ctrl_info->pci_dev->dev,
5383 "error allocating admin queues\n");
5384 return rc;
5385 }
5386
5387 rc = pqi_create_admin_queues(ctrl_info);
5388 if (rc) {
5389 dev_err(&ctrl_info->pci_dev->dev,
5390 "error creating admin queues\n");
5391 return rc;
5392 }
5393
5394 rc = pqi_report_device_capability(ctrl_info);
5395 if (rc) {
5396 dev_err(&ctrl_info->pci_dev->dev,
5397 "obtaining device capability failed\n");
5398 return rc;
5399 }
5400
5401 rc = pqi_validate_device_capability(ctrl_info);
5402 if (rc)
5403 return rc;
5404
5405 pqi_calculate_queue_resources(ctrl_info);
5406
5407 rc = pqi_enable_msix_interrupts(ctrl_info);
5408 if (rc)
5409 return rc;
5410
5411 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5412 ctrl_info->max_msix_vectors =
5413 ctrl_info->num_msix_vectors_enabled;
5414 pqi_calculate_queue_resources(ctrl_info);
5415 }
5416
5417 rc = pqi_alloc_io_resources(ctrl_info);
5418 if (rc)
5419 return rc;
5420
5421 rc = pqi_alloc_operational_queues(ctrl_info);
5422 if (rc)
5423 return rc;
5424
5425 pqi_init_operational_queues(ctrl_info);
5426
5427 rc = pqi_request_irqs(ctrl_info);
5428 if (rc)
5429 return rc;
5430
5431 pqi_irq_set_affinity_hint(ctrl_info);
5432
5433 rc = pqi_create_queues(ctrl_info);
5434 if (rc)
5435 return rc;
5436
5437 sis_enable_msix(ctrl_info);
5438
5439 rc = pqi_configure_events(ctrl_info);
5440 if (rc) {
5441 dev_err(&ctrl_info->pci_dev->dev,
5442 "error configuring events\n");
5443 return rc;
5444 }
5445
5446 pqi_start_heartbeat_timer(ctrl_info);
5447
5448 ctrl_info->controller_online = true;
5449
5450 /* Register with the SCSI subsystem. */
5451 rc = pqi_register_scsi(ctrl_info);
5452 if (rc)
5453 return rc;
5454
5455 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5456 if (rc) {
5457 dev_err(&ctrl_info->pci_dev->dev,
5458 "error obtaining firmware version\n");
5459 return rc;
5460 }
5461
5462 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5463 if (rc) {
5464 dev_err(&ctrl_info->pci_dev->dev,
5465 "error updating host wellness\n");
5466 return rc;
5467 }
5468
5469 pqi_schedule_update_time_worker(ctrl_info);
5470
5471 pqi_scan_scsi_devices(ctrl_info);
5472
5473 return 0;
5474 }
5475
5476 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5477 {
5478 int rc;
5479 u64 mask;
5480
5481 rc = pci_enable_device(ctrl_info->pci_dev);
5482 if (rc) {
5483 dev_err(&ctrl_info->pci_dev->dev,
5484 "failed to enable PCI device\n");
5485 return rc;
5486 }
5487
5488 if (sizeof(dma_addr_t) > 4)
5489 mask = DMA_BIT_MASK(64);
5490 else
5491 mask = DMA_BIT_MASK(32);
5492
5493 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5494 if (rc) {
5495 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5496 goto disable_device;
5497 }
5498
5499 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5500 if (rc) {
5501 dev_err(&ctrl_info->pci_dev->dev,
5502 "failed to obtain PCI resources\n");
5503 goto disable_device;
5504 }
5505
5506 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5507 ctrl_info->pci_dev, 0),
5508 sizeof(struct pqi_ctrl_registers));
5509 if (!ctrl_info->iomem_base) {
5510 dev_err(&ctrl_info->pci_dev->dev,
5511 "failed to map memory for controller registers\n");
5512 rc = -ENOMEM;
5513 goto release_regions;
5514 }
5515
5516 ctrl_info->registers = ctrl_info->iomem_base;
5517 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5518
5519 /* Enable bus mastering. */
5520 pci_set_master(ctrl_info->pci_dev);
5521
5522 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5523
5524 return 0;
5525
5526 release_regions:
5527 pci_release_regions(ctrl_info->pci_dev);
5528 disable_device:
5529 pci_disable_device(ctrl_info->pci_dev);
5530
5531 return rc;
5532 }
5533
5534 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5535 {
5536 iounmap(ctrl_info->iomem_base);
5537 pci_release_regions(ctrl_info->pci_dev);
5538 pci_disable_device(ctrl_info->pci_dev);
5539 pci_set_drvdata(ctrl_info->pci_dev, NULL);
5540 }
5541
5542 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5543 {
5544 struct pqi_ctrl_info *ctrl_info;
5545
5546 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5547 GFP_KERNEL, numa_node);
5548 if (!ctrl_info)
5549 return NULL;
5550
5551 mutex_init(&ctrl_info->scan_mutex);
5552
5553 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5554 spin_lock_init(&ctrl_info->scsi_device_list_lock);
5555
5556 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5557 atomic_set(&ctrl_info->num_interrupts, 0);
5558
5559 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5560 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5561
5562 sema_init(&ctrl_info->sync_request_sem,
5563 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5564 sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5565
5566 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5567 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5568
5569 return ctrl_info;
5570 }
5571
5572 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5573 {
5574 kfree(ctrl_info);
5575 }
5576
5577 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5578 {
5579 pqi_irq_unset_affinity_hint(ctrl_info);
5580 pqi_free_irqs(ctrl_info);
5581 if (ctrl_info->num_msix_vectors_enabled)
5582 pci_disable_msix(ctrl_info->pci_dev);
5583 }
5584
5585 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5586 {
5587 pqi_stop_heartbeat_timer(ctrl_info);
5588 pqi_free_interrupts(ctrl_info);
5589 if (ctrl_info->queue_memory_base)
5590 dma_free_coherent(&ctrl_info->pci_dev->dev,
5591 ctrl_info->queue_memory_length,
5592 ctrl_info->queue_memory_base,
5593 ctrl_info->queue_memory_base_dma_handle);
5594 if (ctrl_info->admin_queue_memory_base)
5595 dma_free_coherent(&ctrl_info->pci_dev->dev,
5596 ctrl_info->admin_queue_memory_length,
5597 ctrl_info->admin_queue_memory_base,
5598 ctrl_info->admin_queue_memory_base_dma_handle);
5599 pqi_free_all_io_requests(ctrl_info);
5600 if (ctrl_info->error_buffer)
5601 dma_free_coherent(&ctrl_info->pci_dev->dev,
5602 ctrl_info->error_buffer_length,
5603 ctrl_info->error_buffer,
5604 ctrl_info->error_buffer_dma_handle);
5605 if (ctrl_info->iomem_base)
5606 pqi_cleanup_pci_init(ctrl_info);
5607 pqi_free_ctrl_info(ctrl_info);
5608 }
5609
5610 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5611 {
5612 int rc;
5613
5614 if (ctrl_info->controller_online) {
5615 cancel_delayed_work_sync(&ctrl_info->rescan_work);
5616 cancel_delayed_work_sync(&ctrl_info->update_time_work);
5617 pqi_remove_all_scsi_devices(ctrl_info);
5618 pqi_unregister_scsi(ctrl_info);
5619 ctrl_info->controller_online = false;
5620 }
5621 if (ctrl_info->pqi_mode_enabled) {
5622 sis_disable_msix(ctrl_info);
5623 rc = pqi_reset(ctrl_info);
5624 if (rc == 0)
5625 sis_reenable_sis_mode(ctrl_info);
5626 }
5627 pqi_free_ctrl_resources(ctrl_info);
5628 }
5629
5630 static void pqi_print_ctrl_info(struct pci_dev *pdev,
5631 const struct pci_device_id *id)
5632 {
5633 char *ctrl_description;
5634
5635 if (id->driver_data) {
5636 ctrl_description = (char *)id->driver_data;
5637 } else {
5638 switch (id->subvendor) {
5639 case PCI_VENDOR_ID_HP:
5640 ctrl_description = hpe_branded_controller;
5641 break;
5642 case PCI_VENDOR_ID_ADAPTEC2:
5643 default:
5644 ctrl_description = microsemi_branded_controller;
5645 break;
5646 }
5647 }
5648
5649 dev_info(&pdev->dev, "%s found\n", ctrl_description);
5650 }
5651
5652 static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5653 {
5654 int rc;
5655 int node;
5656 struct pqi_ctrl_info *ctrl_info;
5657
5658 pqi_print_ctrl_info(pdev, id);
5659
5660 if (pqi_disable_device_id_wildcards &&
5661 id->subvendor == PCI_ANY_ID &&
5662 id->subdevice == PCI_ANY_ID) {
5663 dev_warn(&pdev->dev,
5664 "controller not probed because device ID wildcards are disabled\n");
5665 return -ENODEV;
5666 }
5667
5668 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5669 dev_warn(&pdev->dev,
5670 "controller device ID matched using wildcards\n");
5671
5672 node = dev_to_node(&pdev->dev);
5673 if (node == NUMA_NO_NODE)
5674 set_dev_node(&pdev->dev, 0);
5675
5676 ctrl_info = pqi_alloc_ctrl_info(node);
5677 if (!ctrl_info) {
5678 dev_err(&pdev->dev,
5679 "failed to allocate controller info block\n");
5680 return -ENOMEM;
5681 }
5682
5683 ctrl_info->pci_dev = pdev;
5684
5685 rc = pqi_pci_init(ctrl_info);
5686 if (rc)
5687 goto error;
5688
5689 rc = pqi_ctrl_init(ctrl_info);
5690 if (rc)
5691 goto error;
5692
5693 return 0;
5694
5695 error:
5696 pqi_remove_ctrl(ctrl_info);
5697
5698 return rc;
5699 }
5700
5701 static void pqi_pci_remove(struct pci_dev *pdev)
5702 {
5703 struct pqi_ctrl_info *ctrl_info;
5704
5705 ctrl_info = pci_get_drvdata(pdev);
5706 if (!ctrl_info)
5707 return;
5708
5709 pqi_remove_ctrl(ctrl_info);
5710 }
5711
5712 static void pqi_shutdown(struct pci_dev *pdev)
5713 {
5714 int rc;
5715 struct pqi_ctrl_info *ctrl_info;
5716
5717 ctrl_info = pci_get_drvdata(pdev);
5718 if (!ctrl_info)
5719 goto error;
5720
5721 /*
5722 * Write all data in the controller's battery-backed cache to
5723 * storage.
5724 */
5725 rc = pqi_flush_cache(ctrl_info);
5726 if (rc == 0)
5727 return;
5728
5729 error:
5730 dev_warn(&pdev->dev,
5731 "unable to flush controller cache\n");
5732 }
5733
5734 /* Define the PCI IDs for the controllers that we support. */
5735 static const struct pci_device_id pqi_pci_id_table[] = {
5736 {
5737 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5738 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5739 },
5740 {
5741 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5742 PCI_VENDOR_ID_HP, 0x0600)
5743 },
5744 {
5745 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5746 PCI_VENDOR_ID_HP, 0x0601)
5747 },
5748 {
5749 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5750 PCI_VENDOR_ID_HP, 0x0602)
5751 },
5752 {
5753 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5754 PCI_VENDOR_ID_HP, 0x0603)
5755 },
5756 {
5757 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5758 PCI_VENDOR_ID_HP, 0x0650)
5759 },
5760 {
5761 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5762 PCI_VENDOR_ID_HP, 0x0651)
5763 },
5764 {
5765 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5766 PCI_VENDOR_ID_HP, 0x0652)
5767 },
5768 {
5769 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5770 PCI_VENDOR_ID_HP, 0x0653)
5771 },
5772 {
5773 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5774 PCI_VENDOR_ID_HP, 0x0654)
5775 },
5776 {
5777 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5778 PCI_VENDOR_ID_HP, 0x0655)
5779 },
5780 {
5781 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5782 PCI_VENDOR_ID_HP, 0x0700)
5783 },
5784 {
5785 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5786 PCI_VENDOR_ID_HP, 0x0701)
5787 },
5788 {
5789 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5790 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5791 },
5792 {
5793 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5794 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5795 },
5796 {
5797 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5798 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5799 },
5800 {
5801 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5802 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5803 },
5804 {
5805 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5806 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5807 },
5808 {
5809 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5810 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5811 },
5812 {
5813 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5814 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5815 },
5816 {
5817 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5818 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5819 },
5820 {
5821 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5822 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5823 },
5824 {
5825 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5826 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5827 },
5828 {
5829 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5830 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5831 },
5832 {
5833 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5834 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5835 },
5836 {
5837 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5838 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5839 },
5840 {
5841 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5842 PCI_VENDOR_ID_HP, 0x1001)
5843 },
5844 {
5845 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5846 PCI_VENDOR_ID_HP, 0x1100)
5847 },
5848 {
5849 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5850 PCI_VENDOR_ID_HP, 0x1101)
5851 },
5852 {
5853 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5854 PCI_VENDOR_ID_HP, 0x1102)
5855 },
5856 {
5857 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5858 PCI_VENDOR_ID_HP, 0x1150)
5859 },
5860 {
5861 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5862 PCI_ANY_ID, PCI_ANY_ID)
5863 },
5864 { 0 }
5865 };
5866
5867 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5868
5869 static struct pci_driver pqi_pci_driver = {
5870 .name = DRIVER_NAME_SHORT,
5871 .id_table = pqi_pci_id_table,
5872 .probe = pqi_pci_probe,
5873 .remove = pqi_pci_remove,
5874 .shutdown = pqi_shutdown,
5875 };
5876
5877 static int __init pqi_init(void)
5878 {
5879 int rc;
5880
5881 pr_info(DRIVER_NAME "\n");
5882
5883 pqi_sas_transport_template =
5884 sas_attach_transport(&pqi_sas_transport_functions);
5885 if (!pqi_sas_transport_template)
5886 return -ENODEV;
5887
5888 rc = pci_register_driver(&pqi_pci_driver);
5889 if (rc)
5890 sas_release_transport(pqi_sas_transport_template);
5891
5892 return rc;
5893 }
5894
5895 static void __exit pqi_cleanup(void)
5896 {
5897 pci_unregister_driver(&pqi_pci_driver);
5898 sas_release_transport(pqi_sas_transport_template);
5899 }
5900
5901 module_init(pqi_init);
5902 module_exit(pqi_cleanup);
5903
5904 static void __attribute__((unused)) verify_structures(void)
5905 {
5906 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5907 sis_host_to_ctrl_doorbell) != 0x20);
5908 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5909 sis_interrupt_mask) != 0x34);
5910 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5911 sis_ctrl_to_host_doorbell) != 0x9c);
5912 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5913 sis_ctrl_to_host_doorbell_clear) != 0xa0);
5914 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5915 sis_driver_scratch) != 0xb0);
5916 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5917 sis_firmware_status) != 0xbc);
5918 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5919 sis_mailbox) != 0x1000);
5920 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5921 pqi_registers) != 0x4000);
5922
5923 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5924 iu_type) != 0x0);
5925 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5926 iu_length) != 0x2);
5927 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5928 response_queue_id) != 0x4);
5929 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5930 work_area) != 0x6);
5931 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5932
5933 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5934 status) != 0x0);
5935 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5936 service_response) != 0x1);
5937 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5938 data_present) != 0x2);
5939 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5940 reserved) != 0x3);
5941 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5942 residual_count) != 0x4);
5943 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5944 data_length) != 0x8);
5945 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5946 reserved1) != 0xa);
5947 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5948 data) != 0xc);
5949 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5950
5951 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5952 data_in_result) != 0x0);
5953 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5954 data_out_result) != 0x1);
5955 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5956 reserved) != 0x2);
5957 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5958 status) != 0x5);
5959 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5960 status_qualifier) != 0x6);
5961 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5962 sense_data_length) != 0x8);
5963 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5964 response_data_length) != 0xa);
5965 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5966 data_in_transferred) != 0xc);
5967 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5968 data_out_transferred) != 0x10);
5969 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5970 data) != 0x14);
5971 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5972
5973 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5974 signature) != 0x0);
5975 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5976 function_and_status_code) != 0x8);
5977 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5978 max_admin_iq_elements) != 0x10);
5979 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5980 max_admin_oq_elements) != 0x11);
5981 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5982 admin_iq_element_length) != 0x12);
5983 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5984 admin_oq_element_length) != 0x13);
5985 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5986 max_reset_timeout) != 0x14);
5987 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5988 legacy_intx_status) != 0x18);
5989 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5990 legacy_intx_mask_set) != 0x1c);
5991 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5992 legacy_intx_mask_clear) != 0x20);
5993 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5994 device_status) != 0x40);
5995 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5996 admin_iq_pi_offset) != 0x48);
5997 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5998 admin_oq_ci_offset) != 0x50);
5999 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6000 admin_iq_element_array_addr) != 0x58);
6001 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6002 admin_oq_element_array_addr) != 0x60);
6003 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6004 admin_iq_ci_addr) != 0x68);
6005 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6006 admin_oq_pi_addr) != 0x70);
6007 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6008 admin_iq_num_elements) != 0x78);
6009 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6010 admin_oq_num_elements) != 0x79);
6011 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6012 admin_queue_int_msg_num) != 0x7a);
6013 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6014 device_error) != 0x80);
6015 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6016 error_details) != 0x88);
6017 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6018 device_reset) != 0x90);
6019 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6020 power_action) != 0x94);
6021 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6022
6023 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6024 header.iu_type) != 0);
6025 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6026 header.iu_length) != 2);
6027 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6028 header.work_area) != 6);
6029 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6030 request_id) != 8);
6031 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6032 function_code) != 10);
6033 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6034 data.report_device_capability.buffer_length) != 44);
6035 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6036 data.report_device_capability.sg_descriptor) != 48);
6037 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6038 data.create_operational_iq.queue_id) != 12);
6039 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6040 data.create_operational_iq.element_array_addr) != 16);
6041 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6042 data.create_operational_iq.ci_addr) != 24);
6043 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6044 data.create_operational_iq.num_elements) != 32);
6045 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6046 data.create_operational_iq.element_length) != 34);
6047 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6048 data.create_operational_iq.queue_protocol) != 36);
6049 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6050 data.create_operational_oq.queue_id) != 12);
6051 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6052 data.create_operational_oq.element_array_addr) != 16);
6053 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6054 data.create_operational_oq.pi_addr) != 24);
6055 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6056 data.create_operational_oq.num_elements) != 32);
6057 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6058 data.create_operational_oq.element_length) != 34);
6059 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6060 data.create_operational_oq.queue_protocol) != 36);
6061 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6062 data.create_operational_oq.int_msg_num) != 40);
6063 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6064 data.create_operational_oq.coalescing_count) != 42);
6065 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6066 data.create_operational_oq.min_coalescing_time) != 44);
6067 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6068 data.create_operational_oq.max_coalescing_time) != 48);
6069 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6070 data.delete_operational_queue.queue_id) != 12);
6071 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6072 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6073 data.create_operational_iq) != 64 - 11);
6074 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6075 data.create_operational_oq) != 64 - 11);
6076 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6077 data.delete_operational_queue) != 64 - 11);
6078
6079 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6080 header.iu_type) != 0);
6081 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6082 header.iu_length) != 2);
6083 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6084 header.work_area) != 6);
6085 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6086 request_id) != 8);
6087 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6088 function_code) != 10);
6089 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6090 status) != 11);
6091 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6092 data.create_operational_iq.status_descriptor) != 12);
6093 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6094 data.create_operational_iq.iq_pi_offset) != 16);
6095 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6096 data.create_operational_oq.status_descriptor) != 12);
6097 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6098 data.create_operational_oq.oq_ci_offset) != 16);
6099 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6100
6101 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6102 header.iu_type) != 0);
6103 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6104 header.iu_length) != 2);
6105 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6106 header.response_queue_id) != 4);
6107 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6108 header.work_area) != 6);
6109 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6110 request_id) != 8);
6111 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6112 nexus_id) != 10);
6113 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6114 buffer_length) != 12);
6115 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6116 lun_number) != 16);
6117 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6118 protocol_specific) != 24);
6119 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6120 error_index) != 27);
6121 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6122 cdb) != 32);
6123 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6124 sg_descriptors) != 64);
6125 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6126 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6127
6128 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6129 header.iu_type) != 0);
6130 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6131 header.iu_length) != 2);
6132 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6133 header.response_queue_id) != 4);
6134 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6135 header.work_area) != 6);
6136 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6137 request_id) != 8);
6138 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6139 nexus_id) != 12);
6140 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6141 buffer_length) != 16);
6142 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6143 data_encryption_key_index) != 22);
6144 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6145 encrypt_tweak_lower) != 24);
6146 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6147 encrypt_tweak_upper) != 28);
6148 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6149 cdb) != 32);
6150 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6151 error_index) != 48);
6152 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6153 num_sg_descriptors) != 50);
6154 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6155 cdb_length) != 51);
6156 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6157 lun_number) != 52);
6158 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6159 sg_descriptors) != 64);
6160 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6161 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6162
6163 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6164 header.iu_type) != 0);
6165 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6166 header.iu_length) != 2);
6167 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6168 request_id) != 8);
6169 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6170 error_index) != 10);
6171
6172 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6173 header.iu_type) != 0);
6174 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6175 header.iu_length) != 2);
6176 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6177 header.response_queue_id) != 4);
6178 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6179 request_id) != 8);
6180 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6181 data.report_event_configuration.buffer_length) != 12);
6182 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6183 data.report_event_configuration.sg_descriptors) != 16);
6184 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6185 data.set_event_configuration.global_event_oq_id) != 10);
6186 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6187 data.set_event_configuration.buffer_length) != 12);
6188 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6189 data.set_event_configuration.sg_descriptors) != 16);
6190
6191 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6192 max_inbound_iu_length) != 6);
6193 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6194 max_outbound_iu_length) != 14);
6195 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6196
6197 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6198 data_length) != 0);
6199 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6200 iq_arbitration_priority_support_bitmask) != 8);
6201 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6202 maximum_aw_a) != 9);
6203 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6204 maximum_aw_b) != 10);
6205 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6206 maximum_aw_c) != 11);
6207 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6208 max_inbound_queues) != 16);
6209 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6210 max_elements_per_iq) != 18);
6211 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6212 max_iq_element_length) != 24);
6213 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6214 min_iq_element_length) != 26);
6215 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6216 max_outbound_queues) != 30);
6217 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6218 max_elements_per_oq) != 32);
6219 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6220 intr_coalescing_time_granularity) != 34);
6221 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6222 max_oq_element_length) != 36);
6223 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6224 min_oq_element_length) != 38);
6225 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6226 iu_layer_descriptors) != 64);
6227 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6228
6229 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6230 event_type) != 0);
6231 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6232 oq_id) != 2);
6233 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6234
6235 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6236 num_event_descriptors) != 2);
6237 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6238 descriptors) != 4);
6239
6240 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6241 header.iu_type) != 0);
6242 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6243 header.iu_length) != 2);
6244 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6245 event_type) != 8);
6246 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6247 event_id) != 10);
6248 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6249 additional_event_id) != 12);
6250 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6251 data) != 16);
6252 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6253
6254 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6255 header.iu_type) != 0);
6256 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6257 header.iu_length) != 2);
6258 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6259 event_type) != 8);
6260 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6261 event_id) != 10);
6262 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6263 additional_event_id) != 12);
6264 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6265
6266 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6267 header.iu_type) != 0);
6268 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6269 header.iu_length) != 2);
6270 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6271 request_id) != 8);
6272 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6273 nexus_id) != 10);
6274 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6275 lun_number) != 16);
6276 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6277 protocol_specific) != 24);
6278 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6279 outbound_queue_id_to_manage) != 26);
6280 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6281 request_id_to_manage) != 28);
6282 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6283 task_management_function) != 30);
6284 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6285
6286 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6287 header.iu_type) != 0);
6288 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6289 header.iu_length) != 2);
6290 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6291 request_id) != 8);
6292 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6293 nexus_id) != 10);
6294 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6295 additional_response_info) != 12);
6296 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6297 response_code) != 15);
6298 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6299
6300 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6301 configured_logical_drive_count) != 0);
6302 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6303 configuration_signature) != 1);
6304 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6305 firmware_version) != 5);
6306 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6307 extended_logical_unit_count) != 154);
6308 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6309 firmware_build_number) != 190);
6310 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6311 controller_mode) != 292);
6312
6313 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6314 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6315 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6316 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6317 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6318 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6319 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6320 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6321 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6322 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6323 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6324 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6325
6326 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6327 }