]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/smartpqi/smartpqi_init.c
scsi: allow LLDDs to expose the queue mapping to blk-mq
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
6c223761
KB
1/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_transport_sas.h>
33#include <asm/unaligned.h>
34#include "smartpqi.h"
35#include "smartpqi_sis.h"
36
37#if !defined(BUILD_TIMESTAMP)
38#define BUILD_TIMESTAMP
39#endif
40
699bed75 41#define DRIVER_VERSION "0.9.13-370"
6c223761
KB
42#define DRIVER_MAJOR 0
43#define DRIVER_MINOR 9
699bed75
KB
44#define DRIVER_RELEASE 13
45#define DRIVER_REVISION 370
6c223761
KB
46
47#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
48#define DRIVER_NAME_SHORT "smartpqi"
49
50MODULE_AUTHOR("Microsemi");
51MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
52 DRIVER_VERSION);
53MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
54MODULE_VERSION(DRIVER_VERSION);
55MODULE_LICENSE("GPL");
56
57#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
58
59static char *hpe_branded_controller = "HPE Smart Array Controller";
60static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
61
62static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
63static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
64static void pqi_scan_start(struct Scsi_Host *shost);
65static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
66 struct pqi_queue_group *queue_group, enum pqi_io_path path,
67 struct pqi_io_request *io_request);
68static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
69 struct pqi_iu_header *request, unsigned int flags,
70 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
71static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
72 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
73 unsigned int cdb_length, struct pqi_queue_group *queue_group,
74 struct pqi_encryption_info *encryption_info);
75
76/* for flags argument to pqi_submit_raid_request_synchronous() */
77#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
78
79static struct scsi_transport_template *pqi_sas_transport_template;
80
81static atomic_t pqi_controller_count = ATOMIC_INIT(0);
82
83static int pqi_disable_device_id_wildcards;
84module_param_named(disable_device_id_wildcards,
85 pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
86MODULE_PARM_DESC(disable_device_id_wildcards,
87 "Disable device ID wildcards.");
88
89static char *raid_levels[] = {
90 "RAID-0",
91 "RAID-4",
92 "RAID-1(1+0)",
93 "RAID-5",
94 "RAID-5+1",
95 "RAID-ADG",
96 "RAID-1(ADM)",
97};
98
99static char *pqi_raid_level_to_string(u8 raid_level)
100{
101 if (raid_level < ARRAY_SIZE(raid_levels))
102 return raid_levels[raid_level];
103
104 return "";
105}
106
107#define SA_RAID_0 0
108#define SA_RAID_4 1
109#define SA_RAID_1 2 /* also used for RAID 10 */
110#define SA_RAID_5 3 /* also used for RAID 50 */
111#define SA_RAID_51 4
112#define SA_RAID_6 5 /* also used for RAID 60 */
113#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
114#define SA_RAID_MAX SA_RAID_ADM
115#define SA_RAID_UNKNOWN 0xff
116
117static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
118{
119 scmd->scsi_done(scmd);
120}
121
122static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
123{
124 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
125}
126
127static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
128{
129 void *hostdata = shost_priv(shost);
130
131 return *((struct pqi_ctrl_info **)hostdata);
132}
133
134static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
135{
136 return !device->is_physical_device;
137}
138
139static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
140{
141 return !ctrl_info->controller_online;
142}
143
144static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
145{
146 if (ctrl_info->controller_online)
147 if (!sis_is_firmware_running(ctrl_info))
148 pqi_take_ctrl_offline(ctrl_info);
149}
150
151static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
152{
153 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
154}
155
ff6abb73
KB
156static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
157 struct pqi_ctrl_info *ctrl_info)
158{
159 return sis_read_driver_scratch(ctrl_info);
160}
161
162static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
163 enum pqi_ctrl_mode mode)
164{
165 sis_write_driver_scratch(ctrl_info, mode);
166}
167
6c223761
KB
168#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
169
170static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
171{
172 schedule_delayed_work(&ctrl_info->rescan_work,
173 PQI_RESCAN_WORK_INTERVAL);
174}
175
176static int pqi_map_single(struct pci_dev *pci_dev,
177 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
178 size_t buffer_length, int data_direction)
179{
180 dma_addr_t bus_address;
181
182 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
183 return 0;
184
185 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
186 data_direction);
187 if (pci_dma_mapping_error(pci_dev, bus_address))
188 return -ENOMEM;
189
190 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
191 put_unaligned_le32(buffer_length, &sg_descriptor->length);
192 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
193
194 return 0;
195}
196
197static void pqi_pci_unmap(struct pci_dev *pci_dev,
198 struct pqi_sg_descriptor *descriptors, int num_descriptors,
199 int data_direction)
200{
201 int i;
202
203 if (data_direction == PCI_DMA_NONE)
204 return;
205
206 for (i = 0; i < num_descriptors; i++)
207 pci_unmap_single(pci_dev,
208 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
209 get_unaligned_le32(&descriptors[i].length),
210 data_direction);
211}
212
213static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
214 struct pqi_raid_path_request *request, u8 cmd,
215 u8 *scsi3addr, void *buffer, size_t buffer_length,
216 u16 vpd_page, int *pci_direction)
217{
218 u8 *cdb;
219 int pci_dir;
220
221 memset(request, 0, sizeof(*request));
222
223 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
224 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
225 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
226 &request->header.iu_length);
227 put_unaligned_le32(buffer_length, &request->buffer_length);
228 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
229 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
230 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
231
232 cdb = request->cdb;
233
234 switch (cmd) {
235 case INQUIRY:
236 request->data_direction = SOP_READ_FLAG;
237 cdb[0] = INQUIRY;
238 if (vpd_page & VPD_PAGE) {
239 cdb[1] = 0x1;
240 cdb[2] = (u8)vpd_page;
241 }
242 cdb[4] = (u8)buffer_length;
243 break;
244 case CISS_REPORT_LOG:
245 case CISS_REPORT_PHYS:
246 request->data_direction = SOP_READ_FLAG;
247 cdb[0] = cmd;
248 if (cmd == CISS_REPORT_PHYS)
249 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
250 else
251 cdb[1] = CISS_REPORT_LOG_EXTENDED;
252 put_unaligned_be32(buffer_length, &cdb[6]);
253 break;
254 case CISS_GET_RAID_MAP:
255 request->data_direction = SOP_READ_FLAG;
256 cdb[0] = CISS_READ;
257 cdb[1] = CISS_GET_RAID_MAP;
258 put_unaligned_be32(buffer_length, &cdb[6]);
259 break;
260 case SA_CACHE_FLUSH:
261 request->data_direction = SOP_WRITE_FLAG;
262 cdb[0] = BMIC_WRITE;
263 cdb[6] = BMIC_CACHE_FLUSH;
264 put_unaligned_be16(buffer_length, &cdb[7]);
265 break;
266 case BMIC_IDENTIFY_CONTROLLER:
267 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
268 request->data_direction = SOP_READ_FLAG;
269 cdb[0] = BMIC_READ;
270 cdb[6] = cmd;
271 put_unaligned_be16(buffer_length, &cdb[7]);
272 break;
273 case BMIC_WRITE_HOST_WELLNESS:
274 request->data_direction = SOP_WRITE_FLAG;
275 cdb[0] = BMIC_WRITE;
276 cdb[6] = cmd;
277 put_unaligned_be16(buffer_length, &cdb[7]);
278 break;
279 default:
280 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
281 cmd);
282 WARN_ON(cmd);
283 break;
284 }
285
286 switch (request->data_direction) {
287 case SOP_READ_FLAG:
288 pci_dir = PCI_DMA_FROMDEVICE;
289 break;
290 case SOP_WRITE_FLAG:
291 pci_dir = PCI_DMA_TODEVICE;
292 break;
293 case SOP_NO_DIRECTION_FLAG:
294 pci_dir = PCI_DMA_NONE;
295 break;
296 default:
297 pci_dir = PCI_DMA_BIDIRECTIONAL;
298 break;
299 }
300
301 *pci_direction = pci_dir;
302
303 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
304 buffer, buffer_length, pci_dir);
305}
306
307static struct pqi_io_request *pqi_alloc_io_request(
308 struct pqi_ctrl_info *ctrl_info)
309{
310 struct pqi_io_request *io_request;
311 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
312
313 while (1) {
314 io_request = &ctrl_info->io_request_pool[i];
315 if (atomic_inc_return(&io_request->refcount) == 1)
316 break;
317 atomic_dec(&io_request->refcount);
318 i = (i + 1) % ctrl_info->max_io_slots;
319 }
320
321 /* benignly racy */
322 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
323
324 io_request->scmd = NULL;
325 io_request->status = 0;
326 io_request->error_info = NULL;
327
328 return io_request;
329}
330
331static void pqi_free_io_request(struct pqi_io_request *io_request)
332{
333 atomic_dec(&io_request->refcount);
334}
335
336static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
337 struct bmic_identify_controller *buffer)
338{
339 int rc;
340 int pci_direction;
341 struct pqi_raid_path_request request;
342
343 rc = pqi_build_raid_path_request(ctrl_info, &request,
344 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
345 sizeof(*buffer), 0, &pci_direction);
346 if (rc)
347 return rc;
348
349 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
350 NULL, NO_TIMEOUT);
351
352 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
353 pci_direction);
354
355 return rc;
356}
357
358static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
359 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
360{
361 int rc;
362 int pci_direction;
363 struct pqi_raid_path_request request;
364
365 rc = pqi_build_raid_path_request(ctrl_info, &request,
366 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
367 &pci_direction);
368 if (rc)
369 return rc;
370
371 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
372 NULL, NO_TIMEOUT);
373
374 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
375 pci_direction);
376
377 return rc;
378}
379
380static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
381 struct pqi_scsi_dev *device,
382 struct bmic_identify_physical_device *buffer,
383 size_t buffer_length)
384{
385 int rc;
386 int pci_direction;
387 u16 bmic_device_index;
388 struct pqi_raid_path_request request;
389
390 rc = pqi_build_raid_path_request(ctrl_info, &request,
391 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
392 buffer_length, 0, &pci_direction);
393 if (rc)
394 return rc;
395
396 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
397 request.cdb[2] = (u8)bmic_device_index;
398 request.cdb[9] = (u8)(bmic_device_index >> 8);
399
400 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
401 0, NULL, NO_TIMEOUT);
402
403 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
404 pci_direction);
405
406 return rc;
407}
408
409#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
6c223761
KB
410
411static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
412{
413 int rc;
414 struct pqi_raid_path_request request;
415 int pci_direction;
416 u8 *buffer;
417
418 /*
419 * Don't bother trying to flush the cache if the controller is
420 * locked up.
421 */
422 if (pqi_ctrl_offline(ctrl_info))
423 return -ENXIO;
424
425 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
426 if (!buffer)
427 return -ENOMEM;
428
429 rc = pqi_build_raid_path_request(ctrl_info, &request,
430 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
431 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
432 if (rc)
433 goto out;
434
435 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
d48f8fad 436 0, NULL, NO_TIMEOUT);
6c223761
KB
437
438 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
439 pci_direction);
440
441out:
442 kfree(buffer);
443
444 return rc;
445}
446
447static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
448 void *buffer, size_t buffer_length)
449{
450 int rc;
451 struct pqi_raid_path_request request;
452 int pci_direction;
453
454 rc = pqi_build_raid_path_request(ctrl_info, &request,
455 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
456 buffer_length, 0, &pci_direction);
457 if (rc)
458 return rc;
459
460 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
461 0, NULL, NO_TIMEOUT);
462
463 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
464 pci_direction);
465
466 return rc;
467}
468
469#pragma pack(1)
470
471struct bmic_host_wellness_driver_version {
472 u8 start_tag[4];
473 u8 driver_version_tag[2];
474 __le16 driver_version_length;
475 char driver_version[32];
476 u8 end_tag[2];
477};
478
479#pragma pack()
480
481static int pqi_write_driver_version_to_host_wellness(
482 struct pqi_ctrl_info *ctrl_info)
483{
484 int rc;
485 struct bmic_host_wellness_driver_version *buffer;
486 size_t buffer_length;
487
488 buffer_length = sizeof(*buffer);
489
490 buffer = kmalloc(buffer_length, GFP_KERNEL);
491 if (!buffer)
492 return -ENOMEM;
493
494 buffer->start_tag[0] = '<';
495 buffer->start_tag[1] = 'H';
496 buffer->start_tag[2] = 'W';
497 buffer->start_tag[3] = '>';
498 buffer->driver_version_tag[0] = 'D';
499 buffer->driver_version_tag[1] = 'V';
500 put_unaligned_le16(sizeof(buffer->driver_version),
501 &buffer->driver_version_length);
502 strncpy(buffer->driver_version, DRIVER_VERSION,
503 sizeof(buffer->driver_version) - 1);
504 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
505 buffer->end_tag[0] = 'Z';
506 buffer->end_tag[1] = 'Z';
507
508 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
509
510 kfree(buffer);
511
512 return rc;
513}
514
515#pragma pack(1)
516
517struct bmic_host_wellness_time {
518 u8 start_tag[4];
519 u8 time_tag[2];
520 __le16 time_length;
521 u8 time[8];
522 u8 dont_write_tag[2];
523 u8 end_tag[2];
524};
525
526#pragma pack()
527
528static int pqi_write_current_time_to_host_wellness(
529 struct pqi_ctrl_info *ctrl_info)
530{
531 int rc;
532 struct bmic_host_wellness_time *buffer;
533 size_t buffer_length;
534 time64_t local_time;
535 unsigned int year;
536 struct timeval time;
537 struct rtc_time tm;
538
539 buffer_length = sizeof(*buffer);
540
541 buffer = kmalloc(buffer_length, GFP_KERNEL);
542 if (!buffer)
543 return -ENOMEM;
544
545 buffer->start_tag[0] = '<';
546 buffer->start_tag[1] = 'H';
547 buffer->start_tag[2] = 'W';
548 buffer->start_tag[3] = '>';
549 buffer->time_tag[0] = 'T';
550 buffer->time_tag[1] = 'D';
551 put_unaligned_le16(sizeof(buffer->time),
552 &buffer->time_length);
553
554 do_gettimeofday(&time);
555 local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
556 rtc_time64_to_tm(local_time, &tm);
557 year = tm.tm_year + 1900;
558
559 buffer->time[0] = bin2bcd(tm.tm_hour);
560 buffer->time[1] = bin2bcd(tm.tm_min);
561 buffer->time[2] = bin2bcd(tm.tm_sec);
562 buffer->time[3] = 0;
563 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
564 buffer->time[5] = bin2bcd(tm.tm_mday);
565 buffer->time[6] = bin2bcd(year / 100);
566 buffer->time[7] = bin2bcd(year % 100);
567
568 buffer->dont_write_tag[0] = 'D';
569 buffer->dont_write_tag[1] = 'W';
570 buffer->end_tag[0] = 'Z';
571 buffer->end_tag[1] = 'Z';
572
573 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
574
575 kfree(buffer);
576
577 return rc;
578}
579
580#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
581
582static void pqi_update_time_worker(struct work_struct *work)
583{
584 int rc;
585 struct pqi_ctrl_info *ctrl_info;
586
587 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
588 update_time_work);
589
6c223761
KB
590 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
591 if (rc)
592 dev_warn(&ctrl_info->pci_dev->dev,
593 "error updating time on controller\n");
594
595 schedule_delayed_work(&ctrl_info->update_time_work,
596 PQI_UPDATE_TIME_WORK_INTERVAL);
597}
598
599static inline void pqi_schedule_update_time_worker(
4fbebf1a 600 struct pqi_ctrl_info *ctrl_info)
6c223761 601{
4fbebf1a 602 schedule_delayed_work(&ctrl_info->update_time_work, 0);
6c223761
KB
603}
604
605static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
606 void *buffer, size_t buffer_length)
607{
608 int rc;
609 int pci_direction;
610 struct pqi_raid_path_request request;
611
612 rc = pqi_build_raid_path_request(ctrl_info, &request,
613 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
614 if (rc)
615 return rc;
616
617 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
618 NULL, NO_TIMEOUT);
619
620 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
621 pci_direction);
622
623 return rc;
624}
625
626static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
627 void **buffer)
628{
629 int rc;
630 size_t lun_list_length;
631 size_t lun_data_length;
632 size_t new_lun_list_length;
633 void *lun_data = NULL;
634 struct report_lun_header *report_lun_header;
635
636 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
637 if (!report_lun_header) {
638 rc = -ENOMEM;
639 goto out;
640 }
641
642 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
643 sizeof(*report_lun_header));
644 if (rc)
645 goto out;
646
647 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
648
649again:
650 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
651
652 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
653 if (!lun_data) {
654 rc = -ENOMEM;
655 goto out;
656 }
657
658 if (lun_list_length == 0) {
659 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
660 goto out;
661 }
662
663 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
664 if (rc)
665 goto out;
666
667 new_lun_list_length = get_unaligned_be32(
668 &((struct report_lun_header *)lun_data)->list_length);
669
670 if (new_lun_list_length > lun_list_length) {
671 lun_list_length = new_lun_list_length;
672 kfree(lun_data);
673 goto again;
674 }
675
676out:
677 kfree(report_lun_header);
678
679 if (rc) {
680 kfree(lun_data);
681 lun_data = NULL;
682 }
683
684 *buffer = lun_data;
685
686 return rc;
687}
688
689static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
690 void **buffer)
691{
692 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
693 buffer);
694}
695
696static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
697 void **buffer)
698{
699 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
700}
701
702static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
703 struct report_phys_lun_extended **physdev_list,
704 struct report_log_lun_extended **logdev_list)
705{
706 int rc;
707 size_t logdev_list_length;
708 size_t logdev_data_length;
709 struct report_log_lun_extended *internal_logdev_list;
710 struct report_log_lun_extended *logdev_data;
711 struct report_lun_header report_lun_header;
712
713 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
714 if (rc)
715 dev_err(&ctrl_info->pci_dev->dev,
716 "report physical LUNs failed\n");
717
718 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
719 if (rc)
720 dev_err(&ctrl_info->pci_dev->dev,
721 "report logical LUNs failed\n");
722
723 /*
724 * Tack the controller itself onto the end of the logical device list.
725 */
726
727 logdev_data = *logdev_list;
728
729 if (logdev_data) {
730 logdev_list_length =
731 get_unaligned_be32(&logdev_data->header.list_length);
732 } else {
733 memset(&report_lun_header, 0, sizeof(report_lun_header));
734 logdev_data =
735 (struct report_log_lun_extended *)&report_lun_header;
736 logdev_list_length = 0;
737 }
738
739 logdev_data_length = sizeof(struct report_lun_header) +
740 logdev_list_length;
741
742 internal_logdev_list = kmalloc(logdev_data_length +
743 sizeof(struct report_log_lun_extended), GFP_KERNEL);
744 if (!internal_logdev_list) {
745 kfree(*logdev_list);
746 *logdev_list = NULL;
747 return -ENOMEM;
748 }
749
750 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
751 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
752 sizeof(struct report_log_lun_extended_entry));
753 put_unaligned_be32(logdev_list_length +
754 sizeof(struct report_log_lun_extended_entry),
755 &internal_logdev_list->header.list_length);
756
757 kfree(*logdev_list);
758 *logdev_list = internal_logdev_list;
759
760 return 0;
761}
762
763static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
764 int bus, int target, int lun)
765{
766 device->bus = bus;
767 device->target = target;
768 device->lun = lun;
769}
770
771static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
772{
773 u8 *scsi3addr;
774 u32 lunid;
775
776 scsi3addr = device->scsi3addr;
777 lunid = get_unaligned_le32(scsi3addr);
778
779 if (pqi_is_hba_lunid(scsi3addr)) {
780 /* The specified device is the controller. */
781 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
782 device->target_lun_valid = true;
783 return;
784 }
785
786 if (pqi_is_logical_device(device)) {
787 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
788 lunid & 0x3fff);
789 device->target_lun_valid = true;
790 return;
791 }
792
793 /*
794 * Defer target and LUN assignment for non-controller physical devices
795 * because the SAS transport layer will make these assignments later.
796 */
797 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
798}
799
800static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
801 struct pqi_scsi_dev *device)
802{
803 int rc;
804 u8 raid_level;
805 u8 *buffer;
806
807 raid_level = SA_RAID_UNKNOWN;
808
809 buffer = kmalloc(64, GFP_KERNEL);
810 if (buffer) {
811 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
812 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
813 if (rc == 0) {
814 raid_level = buffer[8];
815 if (raid_level > SA_RAID_MAX)
816 raid_level = SA_RAID_UNKNOWN;
817 }
818 kfree(buffer);
819 }
820
821 device->raid_level = raid_level;
822}
823
824static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
825 struct pqi_scsi_dev *device, struct raid_map *raid_map)
826{
827 char *err_msg;
828 u32 raid_map_size;
829 u32 r5or6_blocks_per_row;
830 unsigned int num_phys_disks;
831 unsigned int num_raid_map_entries;
832
833 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
834
835 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
836 err_msg = "RAID map too small";
837 goto bad_raid_map;
838 }
839
840 if (raid_map_size > sizeof(*raid_map)) {
841 err_msg = "RAID map too large";
842 goto bad_raid_map;
843 }
844
845 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
846 (get_unaligned_le16(&raid_map->data_disks_per_row) +
847 get_unaligned_le16(&raid_map->metadata_disks_per_row));
848 num_raid_map_entries = num_phys_disks *
849 get_unaligned_le16(&raid_map->row_cnt);
850
851 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
852 err_msg = "invalid number of map entries in RAID map";
853 goto bad_raid_map;
854 }
855
856 if (device->raid_level == SA_RAID_1) {
857 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
858 err_msg = "invalid RAID-1 map";
859 goto bad_raid_map;
860 }
861 } else if (device->raid_level == SA_RAID_ADM) {
862 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
863 err_msg = "invalid RAID-1(ADM) map";
864 goto bad_raid_map;
865 }
866 } else if ((device->raid_level == SA_RAID_5 ||
867 device->raid_level == SA_RAID_6) &&
868 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
869 /* RAID 50/60 */
870 r5or6_blocks_per_row =
871 get_unaligned_le16(&raid_map->strip_size) *
872 get_unaligned_le16(&raid_map->data_disks_per_row);
873 if (r5or6_blocks_per_row == 0) {
874 err_msg = "invalid RAID-5 or RAID-6 map";
875 goto bad_raid_map;
876 }
877 }
878
879 return 0;
880
881bad_raid_map:
882 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
883
884 return -EINVAL;
885}
886
887static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
888 struct pqi_scsi_dev *device)
889{
890 int rc;
891 int pci_direction;
892 struct pqi_raid_path_request request;
893 struct raid_map *raid_map;
894
895 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
896 if (!raid_map)
897 return -ENOMEM;
898
899 rc = pqi_build_raid_path_request(ctrl_info, &request,
900 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
901 sizeof(*raid_map), 0, &pci_direction);
902 if (rc)
903 goto error;
904
905 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
906 NULL, NO_TIMEOUT);
907
908 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
909 pci_direction);
910
911 if (rc)
912 goto error;
913
914 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
915 if (rc)
916 goto error;
917
918 device->raid_map = raid_map;
919
920 return 0;
921
922error:
923 kfree(raid_map);
924
925 return rc;
926}
927
928static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
929 struct pqi_scsi_dev *device)
930{
931 int rc;
932 u8 *buffer;
933 u8 offload_status;
934
935 buffer = kmalloc(64, GFP_KERNEL);
936 if (!buffer)
937 return;
938
939 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
940 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
941 if (rc)
942 goto out;
943
944#define OFFLOAD_STATUS_BYTE 4
945#define OFFLOAD_CONFIGURED_BIT 0x1
946#define OFFLOAD_ENABLED_BIT 0x2
947
948 offload_status = buffer[OFFLOAD_STATUS_BYTE];
949 device->offload_configured =
950 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
951 if (device->offload_configured) {
952 device->offload_enabled_pending =
953 !!(offload_status & OFFLOAD_ENABLED_BIT);
954 if (pqi_get_raid_map(ctrl_info, device))
955 device->offload_enabled_pending = false;
956 }
957
958out:
959 kfree(buffer);
960}
961
962/*
963 * Use vendor-specific VPD to determine online/offline status of a volume.
964 */
965
966static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
967 struct pqi_scsi_dev *device)
968{
969 int rc;
970 size_t page_length;
971 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
972 bool volume_offline = true;
973 u32 volume_flags;
974 struct ciss_vpd_logical_volume_status *vpd;
975
976 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
977 if (!vpd)
978 goto no_buffer;
979
980 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
981 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
982 if (rc)
983 goto out;
984
985 page_length = offsetof(struct ciss_vpd_logical_volume_status,
986 volume_status) + vpd->page_length;
987 if (page_length < sizeof(*vpd))
988 goto out;
989
990 volume_status = vpd->volume_status;
991 volume_flags = get_unaligned_be32(&vpd->flags);
992 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
993
994out:
995 kfree(vpd);
996no_buffer:
997 device->volume_status = volume_status;
998 device->volume_offline = volume_offline;
999}
1000
1001static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1002 struct pqi_scsi_dev *device)
1003{
1004 int rc;
1005 u8 *buffer;
1006
1007 buffer = kmalloc(64, GFP_KERNEL);
1008 if (!buffer)
1009 return -ENOMEM;
1010
1011 /* Send an inquiry to the device to see what it is. */
1012 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1013 if (rc)
1014 goto out;
1015
1016 scsi_sanitize_inquiry_string(&buffer[8], 8);
1017 scsi_sanitize_inquiry_string(&buffer[16], 16);
1018
1019 device->devtype = buffer[0] & 0x1f;
1020 memcpy(device->vendor, &buffer[8],
1021 sizeof(device->vendor));
1022 memcpy(device->model, &buffer[16],
1023 sizeof(device->model));
1024
1025 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1026 pqi_get_raid_level(ctrl_info, device);
1027 pqi_get_offload_status(ctrl_info, device);
1028 pqi_get_volume_status(ctrl_info, device);
1029 }
1030
1031out:
1032 kfree(buffer);
1033
1034 return rc;
1035}
1036
1037static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1038 struct pqi_scsi_dev *device,
1039 struct bmic_identify_physical_device *id_phys)
1040{
1041 int rc;
1042
1043 memset(id_phys, 0, sizeof(*id_phys));
1044
1045 rc = pqi_identify_physical_device(ctrl_info, device,
1046 id_phys, sizeof(*id_phys));
1047 if (rc) {
1048 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1049 return;
1050 }
1051
1052 device->queue_depth =
1053 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1054 device->device_type = id_phys->device_type;
1055 device->active_path_index = id_phys->active_path_number;
1056 device->path_map = id_phys->redundant_path_present_map;
1057 memcpy(&device->box,
1058 &id_phys->alternate_paths_phys_box_on_port,
1059 sizeof(device->box));
1060 memcpy(&device->phys_connector,
1061 &id_phys->alternate_paths_phys_connector,
1062 sizeof(device->phys_connector));
1063 device->bay = id_phys->phys_bay_in_box;
1064}
1065
1066static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1067 struct pqi_scsi_dev *device)
1068{
1069 char *status;
1070 static const char unknown_state_str[] =
1071 "Volume is in an unknown state (%u)";
1072 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1073
1074 switch (device->volume_status) {
1075 case CISS_LV_OK:
1076 status = "Volume online";
1077 break;
1078 case CISS_LV_FAILED:
1079 status = "Volume failed";
1080 break;
1081 case CISS_LV_NOT_CONFIGURED:
1082 status = "Volume not configured";
1083 break;
1084 case CISS_LV_DEGRADED:
1085 status = "Volume degraded";
1086 break;
1087 case CISS_LV_READY_FOR_RECOVERY:
1088 status = "Volume ready for recovery operation";
1089 break;
1090 case CISS_LV_UNDERGOING_RECOVERY:
1091 status = "Volume undergoing recovery";
1092 break;
1093 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1094 status = "Wrong physical drive was replaced";
1095 break;
1096 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1097 status = "A physical drive not properly connected";
1098 break;
1099 case CISS_LV_HARDWARE_OVERHEATING:
1100 status = "Hardware is overheating";
1101 break;
1102 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1103 status = "Hardware has overheated";
1104 break;
1105 case CISS_LV_UNDERGOING_EXPANSION:
1106 status = "Volume undergoing expansion";
1107 break;
1108 case CISS_LV_NOT_AVAILABLE:
1109 status = "Volume waiting for transforming volume";
1110 break;
1111 case CISS_LV_QUEUED_FOR_EXPANSION:
1112 status = "Volume queued for expansion";
1113 break;
1114 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1115 status = "Volume disabled due to SCSI ID conflict";
1116 break;
1117 case CISS_LV_EJECTED:
1118 status = "Volume has been ejected";
1119 break;
1120 case CISS_LV_UNDERGOING_ERASE:
1121 status = "Volume undergoing background erase";
1122 break;
1123 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1124 status = "Volume ready for predictive spare rebuild";
1125 break;
1126 case CISS_LV_UNDERGOING_RPI:
1127 status = "Volume undergoing rapid parity initialization";
1128 break;
1129 case CISS_LV_PENDING_RPI:
1130 status = "Volume queued for rapid parity initialization";
1131 break;
1132 case CISS_LV_ENCRYPTED_NO_KEY:
1133 status = "Encrypted volume inaccessible - key not present";
1134 break;
1135 case CISS_LV_UNDERGOING_ENCRYPTION:
1136 status = "Volume undergoing encryption process";
1137 break;
1138 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1139 status = "Volume undergoing encryption re-keying process";
1140 break;
1141 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1142 status =
1143 "Encrypted volume inaccessible - disabled on ctrl";
1144 break;
1145 case CISS_LV_PENDING_ENCRYPTION:
1146 status = "Volume pending migration to encrypted state";
1147 break;
1148 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1149 status = "Volume pending encryption rekeying";
1150 break;
1151 case CISS_LV_NOT_SUPPORTED:
1152 status = "Volume not supported on this controller";
1153 break;
1154 case CISS_LV_STATUS_UNAVAILABLE:
1155 status = "Volume status not available";
1156 break;
1157 default:
1158 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1159 unknown_state_str, device->volume_status);
1160 status = unknown_state_buffer;
1161 break;
1162 }
1163
1164 dev_info(&ctrl_info->pci_dev->dev,
1165 "scsi %d:%d:%d:%d %s\n",
1166 ctrl_info->scsi_host->host_no,
1167 device->bus, device->target, device->lun, status);
1168}
1169
1170static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1171 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1172{
1173 struct pqi_scsi_dev *device;
1174
1175 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1176 scsi_device_list_entry) {
1177 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1178 continue;
1179 if (pqi_is_logical_device(device))
1180 continue;
1181 if (device->aio_handle == aio_handle)
1182 return device;
1183 }
1184
1185 return NULL;
1186}
1187
1188static void pqi_update_logical_drive_queue_depth(
1189 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1190{
1191 unsigned int i;
1192 struct raid_map *raid_map;
1193 struct raid_map_disk_data *disk_data;
1194 struct pqi_scsi_dev *phys_disk;
1195 unsigned int num_phys_disks;
1196 unsigned int num_raid_map_entries;
1197 unsigned int queue_depth;
1198
1199 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1200
1201 raid_map = logical_drive->raid_map;
1202 if (!raid_map)
1203 return;
1204
1205 disk_data = raid_map->disk_data;
1206 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1207 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1208 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1209 num_raid_map_entries = num_phys_disks *
1210 get_unaligned_le16(&raid_map->row_cnt);
1211
1212 queue_depth = 0;
1213 for (i = 0; i < num_raid_map_entries; i++) {
1214 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1215 disk_data[i].aio_handle);
1216
1217 if (!phys_disk) {
1218 dev_warn(&ctrl_info->pci_dev->dev,
1219 "failed to find physical disk for logical drive %016llx\n",
1220 get_unaligned_be64(logical_drive->scsi3addr));
1221 logical_drive->offload_enabled = false;
1222 logical_drive->offload_enabled_pending = false;
1223 kfree(raid_map);
1224 logical_drive->raid_map = NULL;
1225 return;
1226 }
1227
1228 queue_depth += phys_disk->queue_depth;
1229 }
1230
1231 logical_drive->queue_depth = queue_depth;
1232}
1233
1234static void pqi_update_all_logical_drive_queue_depths(
1235 struct pqi_ctrl_info *ctrl_info)
1236{
1237 struct pqi_scsi_dev *device;
1238
1239 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1240 scsi_device_list_entry) {
1241 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1242 continue;
1243 if (!pqi_is_logical_device(device))
1244 continue;
1245 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1246 }
1247}
1248
1249static void pqi_rescan_worker(struct work_struct *work)
1250{
1251 struct pqi_ctrl_info *ctrl_info;
1252
1253 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1254 rescan_work);
1255
1256 pqi_scan_scsi_devices(ctrl_info);
1257}
1258
1259static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1260 struct pqi_scsi_dev *device)
1261{
1262 int rc;
1263
1264 if (pqi_is_logical_device(device))
1265 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1266 device->target, device->lun);
1267 else
1268 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1269
1270 return rc;
1271}
1272
1273static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1274 struct pqi_scsi_dev *device)
1275{
1276 if (pqi_is_logical_device(device))
1277 scsi_remove_device(device->sdev);
1278 else
1279 pqi_remove_sas_device(device);
1280}
1281
1282/* Assumes the SCSI device list lock is held. */
1283
1284static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1285 int bus, int target, int lun)
1286{
1287 struct pqi_scsi_dev *device;
1288
1289 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1290 scsi_device_list_entry)
1291 if (device->bus == bus && device->target == target &&
1292 device->lun == lun)
1293 return device;
1294
1295 return NULL;
1296}
1297
1298static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1299 struct pqi_scsi_dev *dev2)
1300{
1301 if (dev1->is_physical_device != dev2->is_physical_device)
1302 return false;
1303
1304 if (dev1->is_physical_device)
1305 return dev1->wwid == dev2->wwid;
1306
1307 return memcmp(dev1->volume_id, dev2->volume_id,
1308 sizeof(dev1->volume_id)) == 0;
1309}
1310
1311enum pqi_find_result {
1312 DEVICE_NOT_FOUND,
1313 DEVICE_CHANGED,
1314 DEVICE_SAME,
1315};
1316
1317static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1318 struct pqi_scsi_dev *device_to_find,
1319 struct pqi_scsi_dev **matching_device)
1320{
1321 struct pqi_scsi_dev *device;
1322
1323 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1324 scsi_device_list_entry) {
1325 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1326 device->scsi3addr)) {
1327 *matching_device = device;
1328 if (pqi_device_equal(device_to_find, device)) {
1329 if (device_to_find->volume_offline)
1330 return DEVICE_CHANGED;
1331 return DEVICE_SAME;
1332 }
1333 return DEVICE_CHANGED;
1334 }
1335 }
1336
1337 return DEVICE_NOT_FOUND;
1338}
1339
1340static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1341 char *action, struct pqi_scsi_dev *device)
1342{
1343 dev_info(&ctrl_info->pci_dev->dev,
1344 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1345 action,
1346 ctrl_info->scsi_host->host_no,
1347 device->bus,
1348 device->target,
1349 device->lun,
1350 scsi_device_type(device->devtype),
1351 device->vendor,
1352 device->model,
1353 pqi_raid_level_to_string(device->raid_level),
1354 device->offload_configured ? '+' : '-',
1355 device->offload_enabled_pending ? '+' : '-',
1356 device->expose_device ? '+' : '-',
1357 device->queue_depth);
1358}
1359
1360/* Assumes the SCSI device list lock is held. */
1361
1362static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1363 struct pqi_scsi_dev *new_device)
1364{
1365 existing_device->devtype = new_device->devtype;
1366 existing_device->device_type = new_device->device_type;
1367 existing_device->bus = new_device->bus;
1368 if (new_device->target_lun_valid) {
1369 existing_device->target = new_device->target;
1370 existing_device->lun = new_device->lun;
1371 existing_device->target_lun_valid = true;
1372 }
1373
1374 /* By definition, the scsi3addr and wwid fields are already the same. */
1375
1376 existing_device->is_physical_device = new_device->is_physical_device;
1377 existing_device->expose_device = new_device->expose_device;
1378 existing_device->no_uld_attach = new_device->no_uld_attach;
1379 existing_device->aio_enabled = new_device->aio_enabled;
1380 memcpy(existing_device->vendor, new_device->vendor,
1381 sizeof(existing_device->vendor));
1382 memcpy(existing_device->model, new_device->model,
1383 sizeof(existing_device->model));
1384 existing_device->sas_address = new_device->sas_address;
1385 existing_device->raid_level = new_device->raid_level;
1386 existing_device->queue_depth = new_device->queue_depth;
1387 existing_device->aio_handle = new_device->aio_handle;
1388 existing_device->volume_status = new_device->volume_status;
1389 existing_device->active_path_index = new_device->active_path_index;
1390 existing_device->path_map = new_device->path_map;
1391 existing_device->bay = new_device->bay;
1392 memcpy(existing_device->box, new_device->box,
1393 sizeof(existing_device->box));
1394 memcpy(existing_device->phys_connector, new_device->phys_connector,
1395 sizeof(existing_device->phys_connector));
1396 existing_device->offload_configured = new_device->offload_configured;
1397 existing_device->offload_enabled = false;
1398 existing_device->offload_enabled_pending =
1399 new_device->offload_enabled_pending;
1400 existing_device->offload_to_mirror = 0;
1401 kfree(existing_device->raid_map);
1402 existing_device->raid_map = new_device->raid_map;
1403
1404 /* To prevent this from being freed later. */
1405 new_device->raid_map = NULL;
1406}
1407
1408static inline void pqi_free_device(struct pqi_scsi_dev *device)
1409{
1410 if (device) {
1411 kfree(device->raid_map);
1412 kfree(device);
1413 }
1414}
1415
1416/*
1417 * Called when exposing a new device to the OS fails in order to re-adjust
1418 * our internal SCSI device list to match the SCSI ML's view.
1419 */
1420
1421static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1422 struct pqi_scsi_dev *device)
1423{
1424 unsigned long flags;
1425
1426 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1427 list_del(&device->scsi_device_list_entry);
1428 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1429
1430 /* Allow the device structure to be freed later. */
1431 device->keep_device = false;
1432}
1433
1434static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1435 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1436{
1437 int rc;
1438 unsigned int i;
1439 unsigned long flags;
1440 enum pqi_find_result find_result;
1441 struct pqi_scsi_dev *device;
1442 struct pqi_scsi_dev *next;
1443 struct pqi_scsi_dev *matching_device;
1444 struct list_head add_list;
1445 struct list_head delete_list;
1446
1447 INIT_LIST_HEAD(&add_list);
1448 INIT_LIST_HEAD(&delete_list);
1449
1450 /*
1451 * The idea here is to do as little work as possible while holding the
1452 * spinlock. That's why we go to great pains to defer anything other
1453 * than updating the internal device list until after we release the
1454 * spinlock.
1455 */
1456
1457 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1458
1459 /* Assume that all devices in the existing list have gone away. */
1460 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1461 scsi_device_list_entry)
1462 device->device_gone = true;
1463
1464 for (i = 0; i < num_new_devices; i++) {
1465 device = new_device_list[i];
1466
1467 find_result = pqi_scsi_find_entry(ctrl_info, device,
1468 &matching_device);
1469
1470 switch (find_result) {
1471 case DEVICE_SAME:
1472 /*
1473 * The newly found device is already in the existing
1474 * device list.
1475 */
1476 device->new_device = false;
1477 matching_device->device_gone = false;
1478 pqi_scsi_update_device(matching_device, device);
1479 break;
1480 case DEVICE_NOT_FOUND:
1481 /*
1482 * The newly found device is NOT in the existing device
1483 * list.
1484 */
1485 device->new_device = true;
1486 break;
1487 case DEVICE_CHANGED:
1488 /*
1489 * The original device has gone away and we need to add
1490 * the new device.
1491 */
1492 device->new_device = true;
1493 break;
1494 default:
1495 WARN_ON(find_result);
1496 break;
1497 }
1498 }
1499
1500 /* Process all devices that have gone away. */
1501 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1502 scsi_device_list_entry) {
1503 if (device->device_gone) {
1504 list_del(&device->scsi_device_list_entry);
1505 list_add_tail(&device->delete_list_entry, &delete_list);
1506 }
1507 }
1508
1509 /* Process all new devices. */
1510 for (i = 0; i < num_new_devices; i++) {
1511 device = new_device_list[i];
1512 if (!device->new_device)
1513 continue;
1514 if (device->volume_offline)
1515 continue;
1516 list_add_tail(&device->scsi_device_list_entry,
1517 &ctrl_info->scsi_device_list);
1518 list_add_tail(&device->add_list_entry, &add_list);
1519 /* To prevent this device structure from being freed later. */
1520 device->keep_device = true;
1521 }
1522
1523 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1524
1525 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1526 scsi_device_list_entry)
1527 device->offload_enabled =
1528 device->offload_enabled_pending;
1529
1530 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1531
1532 /* Remove all devices that have gone away. */
1533 list_for_each_entry_safe(device, next, &delete_list,
1534 delete_list_entry) {
1535 if (device->sdev)
1536 pqi_remove_device(ctrl_info, device);
1537 if (device->volume_offline) {
1538 pqi_dev_info(ctrl_info, "offline", device);
1539 pqi_show_volume_status(ctrl_info, device);
1540 } else {
1541 pqi_dev_info(ctrl_info, "removed", device);
1542 }
1543 list_del(&device->delete_list_entry);
1544 pqi_free_device(device);
1545 }
1546
1547 /*
1548 * Notify the SCSI ML if the queue depth of any existing device has
1549 * changed.
1550 */
1551 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1552 scsi_device_list_entry) {
1553 if (device->sdev && device->queue_depth !=
1554 device->advertised_queue_depth) {
1555 device->advertised_queue_depth = device->queue_depth;
1556 scsi_change_queue_depth(device->sdev,
1557 device->advertised_queue_depth);
1558 }
1559 }
1560
1561 /* Expose any new devices. */
1562 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1563 if (device->expose_device && !device->sdev) {
1564 rc = pqi_add_device(ctrl_info, device);
1565 if (rc) {
1566 dev_warn(&ctrl_info->pci_dev->dev,
1567 "scsi %d:%d:%d:%d addition failed, device not added\n",
1568 ctrl_info->scsi_host->host_no,
1569 device->bus, device->target,
1570 device->lun);
1571 pqi_fixup_botched_add(ctrl_info, device);
1572 continue;
1573 }
1574 }
1575 pqi_dev_info(ctrl_info, "added", device);
1576 }
1577}
1578
1579static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1580{
1581 bool is_supported = false;
1582
1583 switch (device->devtype) {
1584 case TYPE_DISK:
1585 case TYPE_ZBC:
1586 case TYPE_TAPE:
1587 case TYPE_MEDIUM_CHANGER:
1588 case TYPE_ENCLOSURE:
1589 is_supported = true;
1590 break;
1591 case TYPE_RAID:
1592 /*
1593 * Only support the HBA controller itself as a RAID
1594 * controller. If it's a RAID controller other than
1595 * the HBA itself (an external RAID controller, MSA500
1596 * or similar), we don't support it.
1597 */
1598 if (pqi_is_hba_lunid(device->scsi3addr))
1599 is_supported = true;
1600 break;
1601 }
1602
1603 return is_supported;
1604}
1605
1606static inline bool pqi_skip_device(u8 *scsi3addr,
1607 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1608{
1609 u8 device_flags;
1610
1611 if (!MASKED_DEVICE(scsi3addr))
1612 return false;
1613
1614 /* The device is masked. */
1615
1616 device_flags = phys_lun_ext_entry->device_flags;
1617
1618 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1619 /*
1620 * It's a non-disk device. We ignore all devices of this type
1621 * when they're masked.
1622 */
1623 return true;
1624 }
1625
1626 return false;
1627}
1628
1629static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1630{
1631 /* Expose all devices except for physical devices that are masked. */
1632 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1633 return false;
1634
1635 return true;
1636}
1637
1638static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1639{
1640 int i;
1641 int rc;
1642 struct list_head new_device_list_head;
1643 struct report_phys_lun_extended *physdev_list = NULL;
1644 struct report_log_lun_extended *logdev_list = NULL;
1645 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1646 struct report_log_lun_extended_entry *log_lun_ext_entry;
1647 struct bmic_identify_physical_device *id_phys = NULL;
1648 u32 num_physicals;
1649 u32 num_logicals;
1650 struct pqi_scsi_dev **new_device_list = NULL;
1651 struct pqi_scsi_dev *device;
1652 struct pqi_scsi_dev *next;
1653 unsigned int num_new_devices;
1654 unsigned int num_valid_devices;
1655 bool is_physical_device;
1656 u8 *scsi3addr;
1657 static char *out_of_memory_msg =
1658 "out of memory, device discovery stopped";
1659
1660 INIT_LIST_HEAD(&new_device_list_head);
1661
1662 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1663 if (rc)
1664 goto out;
1665
1666 if (physdev_list)
1667 num_physicals =
1668 get_unaligned_be32(&physdev_list->header.list_length)
1669 / sizeof(physdev_list->lun_entries[0]);
1670 else
1671 num_physicals = 0;
1672
1673 if (logdev_list)
1674 num_logicals =
1675 get_unaligned_be32(&logdev_list->header.list_length)
1676 / sizeof(logdev_list->lun_entries[0]);
1677 else
1678 num_logicals = 0;
1679
1680 if (num_physicals) {
1681 /*
1682 * We need this buffer for calls to pqi_get_physical_disk_info()
1683 * below. We allocate it here instead of inside
1684 * pqi_get_physical_disk_info() because it's a fairly large
1685 * buffer.
1686 */
1687 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1688 if (!id_phys) {
1689 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1690 out_of_memory_msg);
1691 rc = -ENOMEM;
1692 goto out;
1693 }
1694 }
1695
1696 num_new_devices = num_physicals + num_logicals;
1697
1698 new_device_list = kmalloc(sizeof(*new_device_list) *
1699 num_new_devices, GFP_KERNEL);
1700 if (!new_device_list) {
1701 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1702 rc = -ENOMEM;
1703 goto out;
1704 }
1705
1706 for (i = 0; i < num_new_devices; i++) {
1707 device = kzalloc(sizeof(*device), GFP_KERNEL);
1708 if (!device) {
1709 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1710 out_of_memory_msg);
1711 rc = -ENOMEM;
1712 goto out;
1713 }
1714 list_add_tail(&device->new_device_list_entry,
1715 &new_device_list_head);
1716 }
1717
1718 device = NULL;
1719 num_valid_devices = 0;
1720
1721 for (i = 0; i < num_new_devices; i++) {
1722
1723 if (i < num_physicals) {
1724 is_physical_device = true;
1725 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1726 log_lun_ext_entry = NULL;
1727 scsi3addr = phys_lun_ext_entry->lunid;
1728 } else {
1729 is_physical_device = false;
1730 phys_lun_ext_entry = NULL;
1731 log_lun_ext_entry =
1732 &logdev_list->lun_entries[i - num_physicals];
1733 scsi3addr = log_lun_ext_entry->lunid;
1734 }
1735
1736 if (is_physical_device &&
1737 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1738 continue;
1739
1740 if (device)
1741 device = list_next_entry(device, new_device_list_entry);
1742 else
1743 device = list_first_entry(&new_device_list_head,
1744 struct pqi_scsi_dev, new_device_list_entry);
1745
1746 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1747 device->is_physical_device = is_physical_device;
1748 device->raid_level = SA_RAID_UNKNOWN;
1749
1750 /* Gather information about the device. */
1751 rc = pqi_get_device_info(ctrl_info, device);
1752 if (rc == -ENOMEM) {
1753 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1754 out_of_memory_msg);
1755 goto out;
1756 }
1757 if (rc) {
1758 dev_warn(&ctrl_info->pci_dev->dev,
1759 "obtaining device info failed, skipping device %016llx\n",
1760 get_unaligned_be64(device->scsi3addr));
1761 rc = 0;
1762 continue;
1763 }
1764
1765 if (!pqi_is_supported_device(device))
1766 continue;
1767
1768 pqi_assign_bus_target_lun(device);
1769
1770 device->expose_device = pqi_expose_device(device);
1771
1772 if (device->is_physical_device) {
1773 device->wwid = phys_lun_ext_entry->wwid;
1774 if ((phys_lun_ext_entry->device_flags &
1775 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1776 phys_lun_ext_entry->aio_handle)
1777 device->aio_enabled = true;
1778 } else {
1779 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1780 sizeof(device->volume_id));
1781 }
1782
1783 switch (device->devtype) {
1784 case TYPE_DISK:
1785 case TYPE_ZBC:
1786 case TYPE_ENCLOSURE:
1787 if (device->is_physical_device) {
1788 device->sas_address =
1789 get_unaligned_be64(&device->wwid);
1790 if (device->devtype == TYPE_DISK ||
1791 device->devtype == TYPE_ZBC) {
1792 device->aio_handle =
1793 phys_lun_ext_entry->aio_handle;
1794 pqi_get_physical_disk_info(ctrl_info,
1795 device, id_phys);
1796 }
1797 }
1798 break;
1799 }
1800
1801 new_device_list[num_valid_devices++] = device;
1802 }
1803
1804 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1805
1806out:
1807 list_for_each_entry_safe(device, next, &new_device_list_head,
1808 new_device_list_entry) {
1809 if (device->keep_device)
1810 continue;
1811 list_del(&device->new_device_list_entry);
1812 pqi_free_device(device);
1813 }
1814
1815 kfree(new_device_list);
1816 kfree(physdev_list);
1817 kfree(logdev_list);
1818 kfree(id_phys);
1819
1820 return rc;
1821}
1822
1823static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1824{
1825 unsigned long flags;
1826 struct pqi_scsi_dev *device;
1827 struct pqi_scsi_dev *next;
1828
1829 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1830
1831 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1832 scsi_device_list_entry) {
1833 if (device->sdev)
1834 pqi_remove_device(ctrl_info, device);
1835 list_del(&device->scsi_device_list_entry);
1836 pqi_free_device(device);
1837 }
1838
1839 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1840}
1841
1842static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1843{
1844 int rc;
1845
1846 if (pqi_ctrl_offline(ctrl_info))
1847 return -ENXIO;
1848
1849 mutex_lock(&ctrl_info->scan_mutex);
1850
1851 rc = pqi_update_scsi_devices(ctrl_info);
1852 if (rc)
1853 pqi_schedule_rescan_worker(ctrl_info);
1854
1855 mutex_unlock(&ctrl_info->scan_mutex);
1856
1857 return rc;
1858}
1859
1860static void pqi_scan_start(struct Scsi_Host *shost)
1861{
1862 pqi_scan_scsi_devices(shost_to_hba(shost));
1863}
1864
1865/* Returns TRUE if scan is finished. */
1866
1867static int pqi_scan_finished(struct Scsi_Host *shost,
1868 unsigned long elapsed_time)
1869{
1870 struct pqi_ctrl_info *ctrl_info;
1871
1872 ctrl_info = shost_priv(shost);
1873
1874 return !mutex_is_locked(&ctrl_info->scan_mutex);
1875}
1876
1877static inline void pqi_set_encryption_info(
1878 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1879 u64 first_block)
1880{
1881 u32 volume_blk_size;
1882
1883 /*
1884 * Set the encryption tweak values based on logical block address.
1885 * If the block size is 512, the tweak value is equal to the LBA.
1886 * For other block sizes, tweak value is (LBA * block size) / 512.
1887 */
1888 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1889 if (volume_blk_size != 512)
1890 first_block = (first_block * volume_blk_size) / 512;
1891
1892 encryption_info->data_encryption_key_index =
1893 get_unaligned_le16(&raid_map->data_encryption_key_index);
1894 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1895 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1896}
1897
1898/*
1899 * Attempt to perform offload RAID mapping for a logical volume I/O.
1900 */
1901
1902#define PQI_RAID_BYPASS_INELIGIBLE 1
1903
1904static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1905 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1906 struct pqi_queue_group *queue_group)
1907{
1908 struct raid_map *raid_map;
1909 bool is_write = false;
1910 u32 map_index;
1911 u64 first_block;
1912 u64 last_block;
1913 u32 block_cnt;
1914 u32 blocks_per_row;
1915 u64 first_row;
1916 u64 last_row;
1917 u32 first_row_offset;
1918 u32 last_row_offset;
1919 u32 first_column;
1920 u32 last_column;
1921 u64 r0_first_row;
1922 u64 r0_last_row;
1923 u32 r5or6_blocks_per_row;
1924 u64 r5or6_first_row;
1925 u64 r5or6_last_row;
1926 u32 r5or6_first_row_offset;
1927 u32 r5or6_last_row_offset;
1928 u32 r5or6_first_column;
1929 u32 r5or6_last_column;
1930 u16 data_disks_per_row;
1931 u32 total_disks_per_row;
1932 u16 layout_map_count;
1933 u32 stripesize;
1934 u16 strip_size;
1935 u32 first_group;
1936 u32 last_group;
1937 u32 current_group;
1938 u32 map_row;
1939 u32 aio_handle;
1940 u64 disk_block;
1941 u32 disk_block_cnt;
1942 u8 cdb[16];
1943 u8 cdb_length;
1944 int offload_to_mirror;
1945 struct pqi_encryption_info *encryption_info_ptr;
1946 struct pqi_encryption_info encryption_info;
1947#if BITS_PER_LONG == 32
1948 u64 tmpdiv;
1949#endif
1950
1951 /* Check for valid opcode, get LBA and block count. */
1952 switch (scmd->cmnd[0]) {
1953 case WRITE_6:
1954 is_write = true;
1955 /* fall through */
1956 case READ_6:
e018ef57
B
1957 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
1958 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
6c223761
KB
1959 block_cnt = (u32)scmd->cmnd[4];
1960 if (block_cnt == 0)
1961 block_cnt = 256;
1962 break;
1963 case WRITE_10:
1964 is_write = true;
1965 /* fall through */
1966 case READ_10:
1967 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1968 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1969 break;
1970 case WRITE_12:
1971 is_write = true;
1972 /* fall through */
1973 case READ_12:
1974 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1975 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1976 break;
1977 case WRITE_16:
1978 is_write = true;
1979 /* fall through */
1980 case READ_16:
1981 first_block = get_unaligned_be64(&scmd->cmnd[2]);
1982 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1983 break;
1984 default:
1985 /* Process via normal I/O path. */
1986 return PQI_RAID_BYPASS_INELIGIBLE;
1987 }
1988
1989 /* Check for write to non-RAID-0. */
1990 if (is_write && device->raid_level != SA_RAID_0)
1991 return PQI_RAID_BYPASS_INELIGIBLE;
1992
1993 if (unlikely(block_cnt == 0))
1994 return PQI_RAID_BYPASS_INELIGIBLE;
1995
1996 last_block = first_block + block_cnt - 1;
1997 raid_map = device->raid_map;
1998
1999 /* Check for invalid block or wraparound. */
2000 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2001 last_block < first_block)
2002 return PQI_RAID_BYPASS_INELIGIBLE;
2003
2004 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2005 strip_size = get_unaligned_le16(&raid_map->strip_size);
2006 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2007
2008 /* Calculate stripe information for the request. */
2009 blocks_per_row = data_disks_per_row * strip_size;
2010#if BITS_PER_LONG == 32
2011 tmpdiv = first_block;
2012 do_div(tmpdiv, blocks_per_row);
2013 first_row = tmpdiv;
2014 tmpdiv = last_block;
2015 do_div(tmpdiv, blocks_per_row);
2016 last_row = tmpdiv;
2017 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2018 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2019 tmpdiv = first_row_offset;
2020 do_div(tmpdiv, strip_size);
2021 first_column = tmpdiv;
2022 tmpdiv = last_row_offset;
2023 do_div(tmpdiv, strip_size);
2024 last_column = tmpdiv;
2025#else
2026 first_row = first_block / blocks_per_row;
2027 last_row = last_block / blocks_per_row;
2028 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2029 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2030 first_column = first_row_offset / strip_size;
2031 last_column = last_row_offset / strip_size;
2032#endif
2033
2034 /* If this isn't a single row/column then give to the controller. */
2035 if (first_row != last_row || first_column != last_column)
2036 return PQI_RAID_BYPASS_INELIGIBLE;
2037
2038 /* Proceeding with driver mapping. */
2039 total_disks_per_row = data_disks_per_row +
2040 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2041 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2042 get_unaligned_le16(&raid_map->row_cnt);
2043 map_index = (map_row * total_disks_per_row) + first_column;
2044
2045 /* RAID 1 */
2046 if (device->raid_level == SA_RAID_1) {
2047 if (device->offload_to_mirror)
2048 map_index += data_disks_per_row;
2049 device->offload_to_mirror = !device->offload_to_mirror;
2050 } else if (device->raid_level == SA_RAID_ADM) {
2051 /* RAID ADM */
2052 /*
2053 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2054 * divisible by 3.
2055 */
2056 offload_to_mirror = device->offload_to_mirror;
2057 if (offload_to_mirror == 0) {
2058 /* use physical disk in the first mirrored group. */
2059 map_index %= data_disks_per_row;
2060 } else {
2061 do {
2062 /*
2063 * Determine mirror group that map_index
2064 * indicates.
2065 */
2066 current_group = map_index / data_disks_per_row;
2067
2068 if (offload_to_mirror != current_group) {
2069 if (current_group <
2070 layout_map_count - 1) {
2071 /*
2072 * Select raid index from
2073 * next group.
2074 */
2075 map_index += data_disks_per_row;
2076 current_group++;
2077 } else {
2078 /*
2079 * Select raid index from first
2080 * group.
2081 */
2082 map_index %= data_disks_per_row;
2083 current_group = 0;
2084 }
2085 }
2086 } while (offload_to_mirror != current_group);
2087 }
2088
2089 /* Set mirror group to use next time. */
2090 offload_to_mirror =
2091 (offload_to_mirror >= layout_map_count - 1) ?
2092 0 : offload_to_mirror + 1;
2093 WARN_ON(offload_to_mirror >= layout_map_count);
2094 device->offload_to_mirror = offload_to_mirror;
2095 /*
2096 * Avoid direct use of device->offload_to_mirror within this
2097 * function since multiple threads might simultaneously
2098 * increment it beyond the range of device->layout_map_count -1.
2099 */
2100 } else if ((device->raid_level == SA_RAID_5 ||
2101 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2102 /* RAID 50/60 */
2103 /* Verify first and last block are in same RAID group */
2104 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2105 stripesize = r5or6_blocks_per_row * layout_map_count;
2106#if BITS_PER_LONG == 32
2107 tmpdiv = first_block;
2108 first_group = do_div(tmpdiv, stripesize);
2109 tmpdiv = first_group;
2110 do_div(tmpdiv, r5or6_blocks_per_row);
2111 first_group = tmpdiv;
2112 tmpdiv = last_block;
2113 last_group = do_div(tmpdiv, stripesize);
2114 tmpdiv = last_group;
2115 do_div(tmpdiv, r5or6_blocks_per_row);
2116 last_group = tmpdiv;
2117#else
2118 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2119 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2120#endif
2121 if (first_group != last_group)
2122 return PQI_RAID_BYPASS_INELIGIBLE;
2123
2124 /* Verify request is in a single row of RAID 5/6 */
2125#if BITS_PER_LONG == 32
2126 tmpdiv = first_block;
2127 do_div(tmpdiv, stripesize);
2128 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2129 tmpdiv = last_block;
2130 do_div(tmpdiv, stripesize);
2131 r5or6_last_row = r0_last_row = tmpdiv;
2132#else
2133 first_row = r5or6_first_row = r0_first_row =
2134 first_block / stripesize;
2135 r5or6_last_row = r0_last_row = last_block / stripesize;
2136#endif
2137 if (r5or6_first_row != r5or6_last_row)
2138 return PQI_RAID_BYPASS_INELIGIBLE;
2139
2140 /* Verify request is in a single column */
2141#if BITS_PER_LONG == 32
2142 tmpdiv = first_block;
2143 first_row_offset = do_div(tmpdiv, stripesize);
2144 tmpdiv = first_row_offset;
2145 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2146 r5or6_first_row_offset = first_row_offset;
2147 tmpdiv = last_block;
2148 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2149 tmpdiv = r5or6_last_row_offset;
2150 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2151 tmpdiv = r5or6_first_row_offset;
2152 do_div(tmpdiv, strip_size);
2153 first_column = r5or6_first_column = tmpdiv;
2154 tmpdiv = r5or6_last_row_offset;
2155 do_div(tmpdiv, strip_size);
2156 r5or6_last_column = tmpdiv;
2157#else
2158 first_row_offset = r5or6_first_row_offset =
2159 (u32)((first_block % stripesize) %
2160 r5or6_blocks_per_row);
2161
2162 r5or6_last_row_offset =
2163 (u32)((last_block % stripesize) %
2164 r5or6_blocks_per_row);
2165
2166 first_column = r5or6_first_row_offset / strip_size;
2167 r5or6_first_column = first_column;
2168 r5or6_last_column = r5or6_last_row_offset / strip_size;
2169#endif
2170 if (r5or6_first_column != r5or6_last_column)
2171 return PQI_RAID_BYPASS_INELIGIBLE;
2172
2173 /* Request is eligible */
2174 map_row =
2175 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2176 get_unaligned_le16(&raid_map->row_cnt);
2177
2178 map_index = (first_group *
2179 (get_unaligned_le16(&raid_map->row_cnt) *
2180 total_disks_per_row)) +
2181 (map_row * total_disks_per_row) + first_column;
2182 }
2183
2184 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2185 return PQI_RAID_BYPASS_INELIGIBLE;
2186
2187 aio_handle = raid_map->disk_data[map_index].aio_handle;
2188 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2189 first_row * strip_size +
2190 (first_row_offset - first_column * strip_size);
2191 disk_block_cnt = block_cnt;
2192
2193 /* Handle differing logical/physical block sizes. */
2194 if (raid_map->phys_blk_shift) {
2195 disk_block <<= raid_map->phys_blk_shift;
2196 disk_block_cnt <<= raid_map->phys_blk_shift;
2197 }
2198
2199 if (unlikely(disk_block_cnt > 0xffff))
2200 return PQI_RAID_BYPASS_INELIGIBLE;
2201
2202 /* Build the new CDB for the physical disk I/O. */
2203 if (disk_block > 0xffffffff) {
2204 cdb[0] = is_write ? WRITE_16 : READ_16;
2205 cdb[1] = 0;
2206 put_unaligned_be64(disk_block, &cdb[2]);
2207 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2208 cdb[14] = 0;
2209 cdb[15] = 0;
2210 cdb_length = 16;
2211 } else {
2212 cdb[0] = is_write ? WRITE_10 : READ_10;
2213 cdb[1] = 0;
2214 put_unaligned_be32((u32)disk_block, &cdb[2]);
2215 cdb[6] = 0;
2216 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2217 cdb[9] = 0;
2218 cdb_length = 10;
2219 }
2220
2221 if (get_unaligned_le16(&raid_map->flags) &
2222 RAID_MAP_ENCRYPTION_ENABLED) {
2223 pqi_set_encryption_info(&encryption_info, raid_map,
2224 first_block);
2225 encryption_info_ptr = &encryption_info;
2226 } else {
2227 encryption_info_ptr = NULL;
2228 }
2229
2230 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2231 cdb, cdb_length, queue_group, encryption_info_ptr);
2232}
2233
2234#define PQI_STATUS_IDLE 0x0
2235
2236#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2237#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2238
2239#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2240#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2241#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2242#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2243#define PQI_DEVICE_STATE_ERROR 0x4
2244
2245#define PQI_MODE_READY_TIMEOUT_SECS 30
2246#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2247
2248static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2249{
2250 struct pqi_device_registers __iomem *pqi_registers;
2251 unsigned long timeout;
2252 u64 signature;
2253 u8 status;
2254
2255 pqi_registers = ctrl_info->pqi_registers;
2256 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2257
2258 while (1) {
2259 signature = readq(&pqi_registers->signature);
2260 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2261 sizeof(signature)) == 0)
2262 break;
2263 if (time_after(jiffies, timeout)) {
2264 dev_err(&ctrl_info->pci_dev->dev,
2265 "timed out waiting for PQI signature\n");
2266 return -ETIMEDOUT;
2267 }
2268 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2269 }
2270
2271 while (1) {
2272 status = readb(&pqi_registers->function_and_status_code);
2273 if (status == PQI_STATUS_IDLE)
2274 break;
2275 if (time_after(jiffies, timeout)) {
2276 dev_err(&ctrl_info->pci_dev->dev,
2277 "timed out waiting for PQI IDLE\n");
2278 return -ETIMEDOUT;
2279 }
2280 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2281 }
2282
2283 while (1) {
2284 if (readl(&pqi_registers->device_status) ==
2285 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2286 break;
2287 if (time_after(jiffies, timeout)) {
2288 dev_err(&ctrl_info->pci_dev->dev,
2289 "timed out waiting for PQI all registers ready\n");
2290 return -ETIMEDOUT;
2291 }
2292 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2293 }
2294
2295 return 0;
2296}
2297
2298static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2299{
2300 struct pqi_scsi_dev *device;
2301
2302 device = io_request->scmd->device->hostdata;
2303 device->offload_enabled = false;
2304}
2305
2306static inline void pqi_take_device_offline(struct scsi_device *sdev)
2307{
2308 struct pqi_ctrl_info *ctrl_info;
e58081a7 2309 struct pqi_scsi_dev *device;
6c223761
KB
2310
2311 if (scsi_device_online(sdev)) {
2312 scsi_device_set_state(sdev, SDEV_OFFLINE);
2313 ctrl_info = shost_to_hba(sdev->host);
2314 schedule_delayed_work(&ctrl_info->rescan_work, 0);
e58081a7
KB
2315 device = sdev->hostdata;
2316 dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2317 ctrl_info->scsi_host->host_no, device->bus,
2318 device->target, device->lun);
6c223761
KB
2319 }
2320}
2321
2322static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2323{
2324 u8 scsi_status;
2325 u8 host_byte;
2326 struct scsi_cmnd *scmd;
2327 struct pqi_raid_error_info *error_info;
2328 size_t sense_data_length;
2329 int residual_count;
2330 int xfer_count;
2331 struct scsi_sense_hdr sshdr;
2332
2333 scmd = io_request->scmd;
2334 if (!scmd)
2335 return;
2336
2337 error_info = io_request->error_info;
2338 scsi_status = error_info->status;
2339 host_byte = DID_OK;
2340
2341 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2342 xfer_count =
2343 get_unaligned_le32(&error_info->data_out_transferred);
2344 residual_count = scsi_bufflen(scmd) - xfer_count;
2345 scsi_set_resid(scmd, residual_count);
2346 if (xfer_count < scmd->underflow)
2347 host_byte = DID_SOFT_ERROR;
2348 }
2349
2350 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2351 if (sense_data_length == 0)
2352 sense_data_length =
2353 get_unaligned_le16(&error_info->response_data_length);
2354 if (sense_data_length) {
2355 if (sense_data_length > sizeof(error_info->data))
2356 sense_data_length = sizeof(error_info->data);
2357
2358 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2359 scsi_normalize_sense(error_info->data,
2360 sense_data_length, &sshdr) &&
2361 sshdr.sense_key == HARDWARE_ERROR &&
2362 sshdr.asc == 0x3e &&
2363 sshdr.ascq == 0x1) {
2364 pqi_take_device_offline(scmd->device);
2365 host_byte = DID_NO_CONNECT;
2366 }
2367
2368 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2369 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2370 memcpy(scmd->sense_buffer, error_info->data,
2371 sense_data_length);
2372 }
2373
2374 scmd->result = scsi_status;
2375 set_host_byte(scmd, host_byte);
2376}
2377
2378static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2379{
2380 u8 scsi_status;
2381 u8 host_byte;
2382 struct scsi_cmnd *scmd;
2383 struct pqi_aio_error_info *error_info;
2384 size_t sense_data_length;
2385 int residual_count;
2386 int xfer_count;
2387 bool device_offline;
2388
2389 scmd = io_request->scmd;
2390 error_info = io_request->error_info;
2391 host_byte = DID_OK;
2392 sense_data_length = 0;
2393 device_offline = false;
2394
2395 switch (error_info->service_response) {
2396 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2397 scsi_status = error_info->status;
2398 break;
2399 case PQI_AIO_SERV_RESPONSE_FAILURE:
2400 switch (error_info->status) {
2401 case PQI_AIO_STATUS_IO_ABORTED:
2402 scsi_status = SAM_STAT_TASK_ABORTED;
2403 break;
2404 case PQI_AIO_STATUS_UNDERRUN:
2405 scsi_status = SAM_STAT_GOOD;
2406 residual_count = get_unaligned_le32(
2407 &error_info->residual_count);
2408 scsi_set_resid(scmd, residual_count);
2409 xfer_count = scsi_bufflen(scmd) - residual_count;
2410 if (xfer_count < scmd->underflow)
2411 host_byte = DID_SOFT_ERROR;
2412 break;
2413 case PQI_AIO_STATUS_OVERRUN:
2414 scsi_status = SAM_STAT_GOOD;
2415 break;
2416 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2417 pqi_aio_path_disabled(io_request);
2418 scsi_status = SAM_STAT_GOOD;
2419 io_request->status = -EAGAIN;
2420 break;
2421 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2422 case PQI_AIO_STATUS_INVALID_DEVICE:
2423 device_offline = true;
2424 pqi_take_device_offline(scmd->device);
2425 host_byte = DID_NO_CONNECT;
2426 scsi_status = SAM_STAT_CHECK_CONDITION;
2427 break;
2428 case PQI_AIO_STATUS_IO_ERROR:
2429 default:
2430 scsi_status = SAM_STAT_CHECK_CONDITION;
2431 break;
2432 }
2433 break;
2434 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2435 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2436 scsi_status = SAM_STAT_GOOD;
2437 break;
2438 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2439 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2440 default:
2441 scsi_status = SAM_STAT_CHECK_CONDITION;
2442 break;
2443 }
2444
2445 if (error_info->data_present) {
2446 sense_data_length =
2447 get_unaligned_le16(&error_info->data_length);
2448 if (sense_data_length) {
2449 if (sense_data_length > sizeof(error_info->data))
2450 sense_data_length = sizeof(error_info->data);
2451 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2452 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2453 memcpy(scmd->sense_buffer, error_info->data,
2454 sense_data_length);
2455 }
2456 }
2457
2458 if (device_offline && sense_data_length == 0)
2459 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2460 0x3e, 0x1);
2461
2462 scmd->result = scsi_status;
2463 set_host_byte(scmd, host_byte);
2464}
2465
2466static void pqi_process_io_error(unsigned int iu_type,
2467 struct pqi_io_request *io_request)
2468{
2469 switch (iu_type) {
2470 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2471 pqi_process_raid_io_error(io_request);
2472 break;
2473 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2474 pqi_process_aio_io_error(io_request);
2475 break;
2476 }
2477}
2478
2479static int pqi_interpret_task_management_response(
2480 struct pqi_task_management_response *response)
2481{
2482 int rc;
2483
2484 switch (response->response_code) {
b17f0486
KB
2485 case SOP_TMF_COMPLETE:
2486 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
2487 rc = 0;
2488 break;
2489 default:
2490 rc = -EIO;
2491 break;
2492 }
2493
2494 return rc;
2495}
2496
2497static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2498 struct pqi_queue_group *queue_group)
2499{
2500 unsigned int num_responses;
2501 pqi_index_t oq_pi;
2502 pqi_index_t oq_ci;
2503 struct pqi_io_request *io_request;
2504 struct pqi_io_response *response;
2505 u16 request_id;
2506
2507 num_responses = 0;
2508 oq_ci = queue_group->oq_ci_copy;
2509
2510 while (1) {
2511 oq_pi = *queue_group->oq_pi;
2512 if (oq_pi == oq_ci)
2513 break;
2514
2515 num_responses++;
2516 response = queue_group->oq_element_array +
2517 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2518
2519 request_id = get_unaligned_le16(&response->request_id);
2520 WARN_ON(request_id >= ctrl_info->max_io_slots);
2521
2522 io_request = &ctrl_info->io_request_pool[request_id];
2523 WARN_ON(atomic_read(&io_request->refcount) == 0);
2524
2525 switch (response->header.iu_type) {
2526 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2527 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2528 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2529 break;
2530 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2531 io_request->status =
2532 pqi_interpret_task_management_response(
2533 (void *)response);
2534 break;
2535 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2536 pqi_aio_path_disabled(io_request);
2537 io_request->status = -EAGAIN;
2538 break;
2539 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2540 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2541 io_request->error_info = ctrl_info->error_buffer +
2542 (get_unaligned_le16(&response->error_index) *
2543 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2544 pqi_process_io_error(response->header.iu_type,
2545 io_request);
2546 break;
2547 default:
2548 dev_err(&ctrl_info->pci_dev->dev,
2549 "unexpected IU type: 0x%x\n",
2550 response->header.iu_type);
2551 WARN_ON(response->header.iu_type);
2552 break;
2553 }
2554
2555 io_request->io_complete_callback(io_request,
2556 io_request->context);
2557
2558 /*
2559 * Note that the I/O request structure CANNOT BE TOUCHED after
2560 * returning from the I/O completion callback!
2561 */
2562
2563 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2564 }
2565
2566 if (num_responses) {
2567 queue_group->oq_ci_copy = oq_ci;
2568 writel(oq_ci, queue_group->oq_ci);
2569 }
2570
2571 return num_responses;
2572}
2573
2574static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 2575 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
2576{
2577 unsigned int num_elements_used;
2578
2579 if (pi >= ci)
2580 num_elements_used = pi - ci;
2581 else
2582 num_elements_used = elements_in_queue - ci + pi;
2583
2584 return elements_in_queue - num_elements_used - 1;
2585}
2586
2587#define PQI_EVENT_ACK_TIMEOUT 30
2588
2589static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2590 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2591{
2592 pqi_index_t iq_pi;
2593 pqi_index_t iq_ci;
2594 unsigned long flags;
2595 void *next_element;
2596 unsigned long timeout;
2597 struct pqi_queue_group *queue_group;
2598
2599 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2600 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2601
2602 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2603
2604 while (1) {
2605 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2606
2607 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2608 iq_ci = *queue_group->iq_ci[RAID_PATH];
2609
2610 if (pqi_num_elements_free(iq_pi, iq_ci,
2611 ctrl_info->num_elements_per_iq))
2612 break;
2613
2614 spin_unlock_irqrestore(
2615 &queue_group->submit_lock[RAID_PATH], flags);
2616
2617 if (time_after(jiffies, timeout)) {
2618 dev_err(&ctrl_info->pci_dev->dev,
2619 "sending event acknowledge timed out\n");
2620 return;
2621 }
2622 }
2623
2624 next_element = queue_group->iq_element_array[RAID_PATH] +
2625 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2626
2627 memcpy(next_element, iu, iu_length);
2628
2629 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2630
2631 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2632
2633 /*
2634 * This write notifies the controller that an IU is available to be
2635 * processed.
2636 */
2637 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2638
2639 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
2640}
2641
2642static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2643 struct pqi_event *event)
2644{
2645 struct pqi_event_acknowledge_request request;
2646
2647 memset(&request, 0, sizeof(request));
2648
2649 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2650 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2651 &request.header.iu_length);
2652 request.event_type = event->event_type;
2653 request.event_id = event->event_id;
2654 request.additional_event_id = event->additional_event_id;
2655
2656 pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2657}
2658
2659static void pqi_event_worker(struct work_struct *work)
2660{
2661 unsigned int i;
2662 struct pqi_ctrl_info *ctrl_info;
2663 struct pqi_event *pending_event;
2664 bool got_non_heartbeat_event = false;
2665
2666 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2667
2668 pending_event = ctrl_info->pending_events;
2669 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2670 if (pending_event->pending) {
2671 pending_event->pending = false;
2672 pqi_acknowledge_event(ctrl_info, pending_event);
2673 if (i != PQI_EVENT_HEARTBEAT)
2674 got_non_heartbeat_event = true;
2675 }
2676 pending_event++;
2677 }
2678
2679 if (got_non_heartbeat_event)
2680 pqi_schedule_rescan_worker(ctrl_info);
2681}
2682
2683static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2684{
2685 unsigned int i;
2686 unsigned int path;
2687 struct pqi_queue_group *queue_group;
2688 unsigned long flags;
2689 struct pqi_io_request *io_request;
2690 struct pqi_io_request *next;
2691 struct scsi_cmnd *scmd;
2692
2693 ctrl_info->controller_online = false;
2694 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2695
2696 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2697 queue_group = &ctrl_info->queue_groups[i];
2698
2699 for (path = 0; path < 2; path++) {
2700 spin_lock_irqsave(
2701 &queue_group->submit_lock[path], flags);
2702
2703 list_for_each_entry_safe(io_request, next,
2704 &queue_group->request_list[path],
2705 request_list_entry) {
2706
2707 scmd = io_request->scmd;
2708 if (scmd) {
2709 set_host_byte(scmd, DID_NO_CONNECT);
2710 pqi_scsi_done(scmd);
2711 }
2712
2713 list_del(&io_request->request_list_entry);
2714 }
2715
2716 spin_unlock_irqrestore(
2717 &queue_group->submit_lock[path], flags);
2718 }
2719 }
2720}
2721
2722#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2723#define PQI_MAX_HEARTBEAT_REQUESTS 5
2724
2725static void pqi_heartbeat_timer_handler(unsigned long data)
2726{
2727 int num_interrupts;
2728 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2729
2730 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2731
2732 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2733 ctrl_info->num_heartbeats_requested++;
2734 if (ctrl_info->num_heartbeats_requested >
2735 PQI_MAX_HEARTBEAT_REQUESTS) {
2736 pqi_take_ctrl_offline(ctrl_info);
2737 return;
2738 }
2739 ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2740 schedule_work(&ctrl_info->event_work);
2741 } else {
2742 ctrl_info->num_heartbeats_requested = 0;
2743 }
2744
2745 ctrl_info->previous_num_interrupts = num_interrupts;
2746 mod_timer(&ctrl_info->heartbeat_timer,
2747 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2748}
2749
2750static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2751{
2752 ctrl_info->previous_num_interrupts =
2753 atomic_read(&ctrl_info->num_interrupts);
2754
2755 init_timer(&ctrl_info->heartbeat_timer);
2756 ctrl_info->heartbeat_timer.expires =
2757 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2758 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2759 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2760 add_timer(&ctrl_info->heartbeat_timer);
2761 ctrl_info->heartbeat_timer_started = true;
2762}
2763
2764static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2765{
2766 if (ctrl_info->heartbeat_timer_started)
2767 del_timer_sync(&ctrl_info->heartbeat_timer);
2768}
2769
2770static int pqi_event_type_to_event_index(unsigned int event_type)
2771{
2772 int index;
2773
2774 switch (event_type) {
2775 case PQI_EVENT_TYPE_HEARTBEAT:
2776 index = PQI_EVENT_HEARTBEAT;
2777 break;
2778 case PQI_EVENT_TYPE_HOTPLUG:
2779 index = PQI_EVENT_HOTPLUG;
2780 break;
2781 case PQI_EVENT_TYPE_HARDWARE:
2782 index = PQI_EVENT_HARDWARE;
2783 break;
2784 case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2785 index = PQI_EVENT_PHYSICAL_DEVICE;
2786 break;
2787 case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2788 index = PQI_EVENT_LOGICAL_DEVICE;
2789 break;
2790 case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2791 index = PQI_EVENT_AIO_STATE_CHANGE;
2792 break;
2793 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2794 index = PQI_EVENT_AIO_CONFIG_CHANGE;
2795 break;
2796 default:
2797 index = -1;
2798 break;
2799 }
2800
2801 return index;
2802}
2803
2804static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2805{
2806 unsigned int num_events;
2807 pqi_index_t oq_pi;
2808 pqi_index_t oq_ci;
2809 struct pqi_event_queue *event_queue;
2810 struct pqi_event_response *response;
2811 struct pqi_event *pending_event;
2812 bool need_delayed_work;
2813 int event_index;
2814
2815 event_queue = &ctrl_info->event_queue;
2816 num_events = 0;
2817 need_delayed_work = false;
2818 oq_ci = event_queue->oq_ci_copy;
2819
2820 while (1) {
2821 oq_pi = *event_queue->oq_pi;
2822 if (oq_pi == oq_ci)
2823 break;
2824
2825 num_events++;
2826 response = event_queue->oq_element_array +
2827 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2828
2829 event_index =
2830 pqi_event_type_to_event_index(response->event_type);
2831
2832 if (event_index >= 0) {
2833 if (response->request_acknowlege) {
2834 pending_event =
2835 &ctrl_info->pending_events[event_index];
2836 pending_event->event_type =
2837 response->event_type;
2838 pending_event->event_id = response->event_id;
2839 pending_event->additional_event_id =
2840 response->additional_event_id;
2841 if (event_index != PQI_EVENT_HEARTBEAT) {
2842 pending_event->pending = true;
2843 need_delayed_work = true;
2844 }
2845 }
2846 }
2847
2848 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2849 }
2850
2851 if (num_events) {
2852 event_queue->oq_ci_copy = oq_ci;
2853 writel(oq_ci, event_queue->oq_ci);
2854
2855 if (need_delayed_work)
2856 schedule_work(&ctrl_info->event_work);
2857 }
2858
2859 return num_events;
2860}
2861
2862static irqreturn_t pqi_irq_handler(int irq, void *data)
2863{
2864 struct pqi_ctrl_info *ctrl_info;
2865 struct pqi_queue_group *queue_group;
2866 unsigned int num_responses_handled;
2867
2868 queue_group = data;
2869 ctrl_info = queue_group->ctrl_info;
2870
2871 if (!ctrl_info || !queue_group->oq_ci)
2872 return IRQ_NONE;
2873
2874 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2875
2876 if (irq == ctrl_info->event_irq)
2877 num_responses_handled += pqi_process_event_intr(ctrl_info);
2878
2879 if (num_responses_handled)
2880 atomic_inc(&ctrl_info->num_interrupts);
2881
2882 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2883 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2884
2885 return IRQ_HANDLED;
2886}
2887
2888static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2889{
2890 int i;
2891 int rc;
2892
2893 ctrl_info->event_irq = ctrl_info->msix_vectors[0];
2894
2895 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2896 rc = request_irq(ctrl_info->msix_vectors[i],
2897 pqi_irq_handler, 0,
2898 DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
2899 if (rc) {
2900 dev_err(&ctrl_info->pci_dev->dev,
2901 "irq %u init failed with error %d\n",
2902 ctrl_info->msix_vectors[i], rc);
2903 return rc;
2904 }
2905 ctrl_info->num_msix_vectors_initialized++;
2906 }
2907
2908 return 0;
2909}
2910
2911static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2912{
2913 int i;
2914
2915 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2916 free_irq(ctrl_info->msix_vectors[i],
2917 ctrl_info->intr_data[i]);
2918}
2919
2920static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2921{
2922 unsigned int i;
2923 int max_vectors;
2924 int num_vectors_enabled;
2925 struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
2926
2927 max_vectors = ctrl_info->num_queue_groups;
2928
2929 for (i = 0; i < max_vectors; i++)
2930 msix_entries[i].entry = i;
2931
2932 num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
2933 msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
2934
2935 if (num_vectors_enabled < 0) {
2936 dev_err(&ctrl_info->pci_dev->dev,
2937 "MSI-X init failed with error %d\n",
2938 num_vectors_enabled);
2939 return num_vectors_enabled;
2940 }
2941
2942 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
2943 for (i = 0; i < num_vectors_enabled; i++) {
2944 ctrl_info->msix_vectors[i] = msix_entries[i].vector;
2945 ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
2946 }
2947
2948 return 0;
2949}
2950
2951static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2952{
2953 int i;
2954 int rc;
2955 int cpu;
2956
2957 cpu = cpumask_first(cpu_online_mask);
2958 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
2959 rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
2960 get_cpu_mask(cpu));
2961 if (rc)
2962 dev_err(&ctrl_info->pci_dev->dev,
2963 "error %d setting affinity hint for irq vector %u\n",
2964 rc, ctrl_info->msix_vectors[i]);
2965 cpu = cpumask_next(cpu, cpu_online_mask);
2966 }
2967}
2968
2969static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2970{
2971 int i;
2972
2973 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2974 irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
2975}
2976
2977static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2978{
2979 unsigned int i;
2980 size_t alloc_length;
2981 size_t element_array_length_per_iq;
2982 size_t element_array_length_per_oq;
2983 void *element_array;
2984 void *next_queue_index;
2985 void *aligned_pointer;
2986 unsigned int num_inbound_queues;
2987 unsigned int num_outbound_queues;
2988 unsigned int num_queue_indexes;
2989 struct pqi_queue_group *queue_group;
2990
2991 element_array_length_per_iq =
2992 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
2993 ctrl_info->num_elements_per_iq;
2994 element_array_length_per_oq =
2995 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
2996 ctrl_info->num_elements_per_oq;
2997 num_inbound_queues = ctrl_info->num_queue_groups * 2;
2998 num_outbound_queues = ctrl_info->num_queue_groups;
2999 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3000
3001 aligned_pointer = NULL;
3002
3003 for (i = 0; i < num_inbound_queues; i++) {
3004 aligned_pointer = PTR_ALIGN(aligned_pointer,
3005 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3006 aligned_pointer += element_array_length_per_iq;
3007 }
3008
3009 for (i = 0; i < num_outbound_queues; i++) {
3010 aligned_pointer = PTR_ALIGN(aligned_pointer,
3011 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3012 aligned_pointer += element_array_length_per_oq;
3013 }
3014
3015 aligned_pointer = PTR_ALIGN(aligned_pointer,
3016 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3017 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3018 PQI_EVENT_OQ_ELEMENT_LENGTH;
3019
3020 for (i = 0; i < num_queue_indexes; i++) {
3021 aligned_pointer = PTR_ALIGN(aligned_pointer,
3022 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3023 aligned_pointer += sizeof(pqi_index_t);
3024 }
3025
3026 alloc_length = (size_t)aligned_pointer +
3027 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3028
3029 ctrl_info->queue_memory_base =
3030 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3031 alloc_length,
3032 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3033
3034 if (!ctrl_info->queue_memory_base) {
3035 dev_err(&ctrl_info->pci_dev->dev,
3036 "failed to allocate memory for PQI admin queues\n");
3037 return -ENOMEM;
3038 }
3039
3040 ctrl_info->queue_memory_length = alloc_length;
3041
3042 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3043 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3044
3045 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3046 queue_group = &ctrl_info->queue_groups[i];
3047 queue_group->iq_element_array[RAID_PATH] = element_array;
3048 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3049 ctrl_info->queue_memory_base_dma_handle +
3050 (element_array - ctrl_info->queue_memory_base);
3051 element_array += element_array_length_per_iq;
3052 element_array = PTR_ALIGN(element_array,
3053 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3054 queue_group->iq_element_array[AIO_PATH] = element_array;
3055 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3056 ctrl_info->queue_memory_base_dma_handle +
3057 (element_array - ctrl_info->queue_memory_base);
3058 element_array += element_array_length_per_iq;
3059 element_array = PTR_ALIGN(element_array,
3060 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3061 }
3062
3063 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3064 queue_group = &ctrl_info->queue_groups[i];
3065 queue_group->oq_element_array = element_array;
3066 queue_group->oq_element_array_bus_addr =
3067 ctrl_info->queue_memory_base_dma_handle +
3068 (element_array - ctrl_info->queue_memory_base);
3069 element_array += element_array_length_per_oq;
3070 element_array = PTR_ALIGN(element_array,
3071 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3072 }
3073
3074 ctrl_info->event_queue.oq_element_array = element_array;
3075 ctrl_info->event_queue.oq_element_array_bus_addr =
3076 ctrl_info->queue_memory_base_dma_handle +
3077 (element_array - ctrl_info->queue_memory_base);
3078 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3079 PQI_EVENT_OQ_ELEMENT_LENGTH;
3080
3081 next_queue_index = PTR_ALIGN(element_array,
3082 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3083
3084 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3085 queue_group = &ctrl_info->queue_groups[i];
3086 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3087 queue_group->iq_ci_bus_addr[RAID_PATH] =
3088 ctrl_info->queue_memory_base_dma_handle +
3089 (next_queue_index - ctrl_info->queue_memory_base);
3090 next_queue_index += sizeof(pqi_index_t);
3091 next_queue_index = PTR_ALIGN(next_queue_index,
3092 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3093 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3094 queue_group->iq_ci_bus_addr[AIO_PATH] =
3095 ctrl_info->queue_memory_base_dma_handle +
3096 (next_queue_index - ctrl_info->queue_memory_base);
3097 next_queue_index += sizeof(pqi_index_t);
3098 next_queue_index = PTR_ALIGN(next_queue_index,
3099 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3100 queue_group->oq_pi = next_queue_index;
3101 queue_group->oq_pi_bus_addr =
3102 ctrl_info->queue_memory_base_dma_handle +
3103 (next_queue_index - ctrl_info->queue_memory_base);
3104 next_queue_index += sizeof(pqi_index_t);
3105 next_queue_index = PTR_ALIGN(next_queue_index,
3106 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3107 }
3108
3109 ctrl_info->event_queue.oq_pi = next_queue_index;
3110 ctrl_info->event_queue.oq_pi_bus_addr =
3111 ctrl_info->queue_memory_base_dma_handle +
3112 (next_queue_index - ctrl_info->queue_memory_base);
3113
3114 return 0;
3115}
3116
3117static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3118{
3119 unsigned int i;
3120 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3121 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3122
3123 /*
3124 * Initialize the backpointers to the controller structure in
3125 * each operational queue group structure.
3126 */
3127 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3128 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3129
3130 /*
3131 * Assign IDs to all operational queues. Note that the IDs
3132 * assigned to operational IQs are independent of the IDs
3133 * assigned to operational OQs.
3134 */
3135 ctrl_info->event_queue.oq_id = next_oq_id++;
3136 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3137 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3138 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3139 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3140 }
3141
3142 /*
3143 * Assign MSI-X table entry indexes to all queues. Note that the
3144 * interrupt for the event queue is shared with the first queue group.
3145 */
3146 ctrl_info->event_queue.int_msg_num = 0;
3147 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3148 ctrl_info->queue_groups[i].int_msg_num = i;
3149
3150 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3151 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3152 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3153 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3154 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3155 }
3156}
3157
3158static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3159{
3160 size_t alloc_length;
3161 struct pqi_admin_queues_aligned *admin_queues_aligned;
3162 struct pqi_admin_queues *admin_queues;
3163
3164 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3165 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3166
3167 ctrl_info->admin_queue_memory_base =
3168 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3169 alloc_length,
3170 &ctrl_info->admin_queue_memory_base_dma_handle,
3171 GFP_KERNEL);
3172
3173 if (!ctrl_info->admin_queue_memory_base)
3174 return -ENOMEM;
3175
3176 ctrl_info->admin_queue_memory_length = alloc_length;
3177
3178 admin_queues = &ctrl_info->admin_queues;
3179 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3180 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3181 admin_queues->iq_element_array =
3182 &admin_queues_aligned->iq_element_array;
3183 admin_queues->oq_element_array =
3184 &admin_queues_aligned->oq_element_array;
3185 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3186 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3187
3188 admin_queues->iq_element_array_bus_addr =
3189 ctrl_info->admin_queue_memory_base_dma_handle +
3190 (admin_queues->iq_element_array -
3191 ctrl_info->admin_queue_memory_base);
3192 admin_queues->oq_element_array_bus_addr =
3193 ctrl_info->admin_queue_memory_base_dma_handle +
3194 (admin_queues->oq_element_array -
3195 ctrl_info->admin_queue_memory_base);
3196 admin_queues->iq_ci_bus_addr =
3197 ctrl_info->admin_queue_memory_base_dma_handle +
3198 ((void *)admin_queues->iq_ci -
3199 ctrl_info->admin_queue_memory_base);
3200 admin_queues->oq_pi_bus_addr =
3201 ctrl_info->admin_queue_memory_base_dma_handle +
3202 ((void *)admin_queues->oq_pi -
3203 ctrl_info->admin_queue_memory_base);
3204
3205 return 0;
3206}
3207
3208#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3209#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3210
3211static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3212{
3213 struct pqi_device_registers __iomem *pqi_registers;
3214 struct pqi_admin_queues *admin_queues;
3215 unsigned long timeout;
3216 u8 status;
3217 u32 reg;
3218
3219 pqi_registers = ctrl_info->pqi_registers;
3220 admin_queues = &ctrl_info->admin_queues;
3221
3222 writeq((u64)admin_queues->iq_element_array_bus_addr,
3223 &pqi_registers->admin_iq_element_array_addr);
3224 writeq((u64)admin_queues->oq_element_array_bus_addr,
3225 &pqi_registers->admin_oq_element_array_addr);
3226 writeq((u64)admin_queues->iq_ci_bus_addr,
3227 &pqi_registers->admin_iq_ci_addr);
3228 writeq((u64)admin_queues->oq_pi_bus_addr,
3229 &pqi_registers->admin_oq_pi_addr);
3230
3231 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3232 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3233 (admin_queues->int_msg_num << 16);
3234 writel(reg, &pqi_registers->admin_iq_num_elements);
3235 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3236 &pqi_registers->function_and_status_code);
3237
3238 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3239 while (1) {
3240 status = readb(&pqi_registers->function_and_status_code);
3241 if (status == PQI_STATUS_IDLE)
3242 break;
3243 if (time_after(jiffies, timeout))
3244 return -ETIMEDOUT;
3245 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3246 }
3247
3248 /*
3249 * The offset registers are not initialized to the correct
3250 * offsets until *after* the create admin queue pair command
3251 * completes successfully.
3252 */
3253 admin_queues->iq_pi = ctrl_info->iomem_base +
3254 PQI_DEVICE_REGISTERS_OFFSET +
3255 readq(&pqi_registers->admin_iq_pi_offset);
3256 admin_queues->oq_ci = ctrl_info->iomem_base +
3257 PQI_DEVICE_REGISTERS_OFFSET +
3258 readq(&pqi_registers->admin_oq_ci_offset);
3259
3260 return 0;
3261}
3262
3263static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3264 struct pqi_general_admin_request *request)
3265{
3266 struct pqi_admin_queues *admin_queues;
3267 void *next_element;
3268 pqi_index_t iq_pi;
3269
3270 admin_queues = &ctrl_info->admin_queues;
3271 iq_pi = admin_queues->iq_pi_copy;
3272
3273 next_element = admin_queues->iq_element_array +
3274 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3275
3276 memcpy(next_element, request, sizeof(*request));
3277
3278 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3279 admin_queues->iq_pi_copy = iq_pi;
3280
3281 /*
3282 * This write notifies the controller that an IU is available to be
3283 * processed.
3284 */
3285 writel(iq_pi, admin_queues->iq_pi);
3286}
3287
3288static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3289 struct pqi_general_admin_response *response)
3290{
3291 struct pqi_admin_queues *admin_queues;
3292 pqi_index_t oq_pi;
3293 pqi_index_t oq_ci;
3294 unsigned long timeout;
3295
3296 admin_queues = &ctrl_info->admin_queues;
3297 oq_ci = admin_queues->oq_ci_copy;
3298
3299 timeout = (3 * HZ) + jiffies;
3300
3301 while (1) {
3302 oq_pi = *admin_queues->oq_pi;
3303 if (oq_pi != oq_ci)
3304 break;
3305 if (time_after(jiffies, timeout)) {
3306 dev_err(&ctrl_info->pci_dev->dev,
3307 "timed out waiting for admin response\n");
3308 return -ETIMEDOUT;
3309 }
3310 usleep_range(1000, 2000);
3311 }
3312
3313 memcpy(response, admin_queues->oq_element_array +
3314 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3315
3316 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3317 admin_queues->oq_ci_copy = oq_ci;
3318 writel(oq_ci, admin_queues->oq_ci);
3319
3320 return 0;
3321}
3322
3323static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3324 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3325 struct pqi_io_request *io_request)
3326{
3327 struct pqi_io_request *next;
3328 void *next_element;
3329 pqi_index_t iq_pi;
3330 pqi_index_t iq_ci;
3331 size_t iu_length;
3332 unsigned long flags;
3333 unsigned int num_elements_needed;
3334 unsigned int num_elements_to_end_of_queue;
3335 size_t copy_count;
3336 struct pqi_iu_header *request;
3337
3338 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3339
3340 if (io_request)
3341 list_add_tail(&io_request->request_list_entry,
3342 &queue_group->request_list[path]);
3343
3344 iq_pi = queue_group->iq_pi_copy[path];
3345
3346 list_for_each_entry_safe(io_request, next,
3347 &queue_group->request_list[path], request_list_entry) {
3348
3349 request = io_request->iu;
3350
3351 iu_length = get_unaligned_le16(&request->iu_length) +
3352 PQI_REQUEST_HEADER_LENGTH;
3353 num_elements_needed =
3354 DIV_ROUND_UP(iu_length,
3355 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3356
3357 iq_ci = *queue_group->iq_ci[path];
3358
3359 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3360 ctrl_info->num_elements_per_iq))
3361 break;
3362
3363 put_unaligned_le16(queue_group->oq_id,
3364 &request->response_queue_id);
3365
3366 next_element = queue_group->iq_element_array[path] +
3367 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3368
3369 num_elements_to_end_of_queue =
3370 ctrl_info->num_elements_per_iq - iq_pi;
3371
3372 if (num_elements_needed <= num_elements_to_end_of_queue) {
3373 memcpy(next_element, request, iu_length);
3374 } else {
3375 copy_count = num_elements_to_end_of_queue *
3376 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3377 memcpy(next_element, request, copy_count);
3378 memcpy(queue_group->iq_element_array[path],
3379 (u8 *)request + copy_count,
3380 iu_length - copy_count);
3381 }
3382
3383 iq_pi = (iq_pi + num_elements_needed) %
3384 ctrl_info->num_elements_per_iq;
3385
3386 list_del(&io_request->request_list_entry);
3387 }
3388
3389 if (iq_pi != queue_group->iq_pi_copy[path]) {
3390 queue_group->iq_pi_copy[path] = iq_pi;
3391 /*
3392 * This write notifies the controller that one or more IUs are
3393 * available to be processed.
3394 */
3395 writel(iq_pi, queue_group->iq_pi[path]);
3396 }
3397
3398 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3399}
3400
3401static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3402 void *context)
3403{
3404 struct completion *waiting = context;
3405
3406 complete(waiting);
3407}
3408
3409static int pqi_submit_raid_request_synchronous_with_io_request(
3410 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3411 unsigned long timeout_msecs)
3412{
3413 int rc = 0;
3414 DECLARE_COMPLETION_ONSTACK(wait);
3415
3416 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3417 io_request->context = &wait;
3418
3419 pqi_start_io(ctrl_info,
3420 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3421 io_request);
3422
3423 if (timeout_msecs == NO_TIMEOUT) {
3424 wait_for_completion_io(&wait);
3425 } else {
3426 if (!wait_for_completion_io_timeout(&wait,
3427 msecs_to_jiffies(timeout_msecs))) {
3428 dev_warn(&ctrl_info->pci_dev->dev,
3429 "command timed out\n");
3430 rc = -ETIMEDOUT;
3431 }
3432 }
3433
3434 return rc;
3435}
3436
3437static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3438 struct pqi_iu_header *request, unsigned int flags,
3439 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3440{
3441 int rc;
3442 struct pqi_io_request *io_request;
3443 unsigned long start_jiffies;
3444 unsigned long msecs_blocked;
3445 size_t iu_length;
3446
3447 /*
3448 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3449 * are mutually exclusive.
3450 */
3451
3452 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3453 if (down_interruptible(&ctrl_info->sync_request_sem))
3454 return -ERESTARTSYS;
3455 } else {
3456 if (timeout_msecs == NO_TIMEOUT) {
3457 down(&ctrl_info->sync_request_sem);
3458 } else {
3459 start_jiffies = jiffies;
3460 if (down_timeout(&ctrl_info->sync_request_sem,
3461 msecs_to_jiffies(timeout_msecs)))
3462 return -ETIMEDOUT;
3463 msecs_blocked =
3464 jiffies_to_msecs(jiffies - start_jiffies);
3465 if (msecs_blocked >= timeout_msecs)
3466 return -ETIMEDOUT;
3467 timeout_msecs -= msecs_blocked;
3468 }
3469 }
3470
3471 io_request = pqi_alloc_io_request(ctrl_info);
3472
3473 put_unaligned_le16(io_request->index,
3474 &(((struct pqi_raid_path_request *)request)->request_id));
3475
3476 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3477 ((struct pqi_raid_path_request *)request)->error_index =
3478 ((struct pqi_raid_path_request *)request)->request_id;
3479
3480 iu_length = get_unaligned_le16(&request->iu_length) +
3481 PQI_REQUEST_HEADER_LENGTH;
3482 memcpy(io_request->iu, request, iu_length);
3483
3484 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3485 io_request, timeout_msecs);
3486
3487 if (error_info) {
3488 if (io_request->error_info)
3489 memcpy(error_info, io_request->error_info,
3490 sizeof(*error_info));
3491 else
3492 memset(error_info, 0, sizeof(*error_info));
3493 } else if (rc == 0 && io_request->error_info) {
3494 u8 scsi_status;
3495 struct pqi_raid_error_info *raid_error_info;
3496
3497 raid_error_info = io_request->error_info;
3498 scsi_status = raid_error_info->status;
3499
3500 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3501 raid_error_info->data_out_result ==
3502 PQI_DATA_IN_OUT_UNDERFLOW)
3503 scsi_status = SAM_STAT_GOOD;
3504
3505 if (scsi_status != SAM_STAT_GOOD)
3506 rc = -EIO;
3507 }
3508
3509 pqi_free_io_request(io_request);
3510
3511 up(&ctrl_info->sync_request_sem);
3512
3513 return rc;
3514}
3515
3516static int pqi_validate_admin_response(
3517 struct pqi_general_admin_response *response, u8 expected_function_code)
3518{
3519 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3520 return -EINVAL;
3521
3522 if (get_unaligned_le16(&response->header.iu_length) !=
3523 PQI_GENERAL_ADMIN_IU_LENGTH)
3524 return -EINVAL;
3525
3526 if (response->function_code != expected_function_code)
3527 return -EINVAL;
3528
3529 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3530 return -EINVAL;
3531
3532 return 0;
3533}
3534
3535static int pqi_submit_admin_request_synchronous(
3536 struct pqi_ctrl_info *ctrl_info,
3537 struct pqi_general_admin_request *request,
3538 struct pqi_general_admin_response *response)
3539{
3540 int rc;
3541
3542 pqi_submit_admin_request(ctrl_info, request);
3543
3544 rc = pqi_poll_for_admin_response(ctrl_info, response);
3545
3546 if (rc == 0)
3547 rc = pqi_validate_admin_response(response,
3548 request->function_code);
3549
3550 return rc;
3551}
3552
3553static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3554{
3555 int rc;
3556 struct pqi_general_admin_request request;
3557 struct pqi_general_admin_response response;
3558 struct pqi_device_capability *capability;
3559 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3560
3561 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3562 if (!capability)
3563 return -ENOMEM;
3564
3565 memset(&request, 0, sizeof(request));
3566
3567 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3568 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3569 &request.header.iu_length);
3570 request.function_code =
3571 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3572 put_unaligned_le32(sizeof(*capability),
3573 &request.data.report_device_capability.buffer_length);
3574
3575 rc = pqi_map_single(ctrl_info->pci_dev,
3576 &request.data.report_device_capability.sg_descriptor,
3577 capability, sizeof(*capability),
3578 PCI_DMA_FROMDEVICE);
3579 if (rc)
3580 goto out;
3581
3582 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3583 &response);
3584
3585 pqi_pci_unmap(ctrl_info->pci_dev,
3586 &request.data.report_device_capability.sg_descriptor, 1,
3587 PCI_DMA_FROMDEVICE);
3588
3589 if (rc)
3590 goto out;
3591
3592 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3593 rc = -EIO;
3594 goto out;
3595 }
3596
3597 ctrl_info->max_inbound_queues =
3598 get_unaligned_le16(&capability->max_inbound_queues);
3599 ctrl_info->max_elements_per_iq =
3600 get_unaligned_le16(&capability->max_elements_per_iq);
3601 ctrl_info->max_iq_element_length =
3602 get_unaligned_le16(&capability->max_iq_element_length)
3603 * 16;
3604 ctrl_info->max_outbound_queues =
3605 get_unaligned_le16(&capability->max_outbound_queues);
3606 ctrl_info->max_elements_per_oq =
3607 get_unaligned_le16(&capability->max_elements_per_oq);
3608 ctrl_info->max_oq_element_length =
3609 get_unaligned_le16(&capability->max_oq_element_length)
3610 * 16;
3611
3612 sop_iu_layer_descriptor =
3613 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3614
3615 ctrl_info->max_inbound_iu_length_per_firmware =
3616 get_unaligned_le16(
3617 &sop_iu_layer_descriptor->max_inbound_iu_length);
3618 ctrl_info->inbound_spanning_supported =
3619 sop_iu_layer_descriptor->inbound_spanning_supported;
3620 ctrl_info->outbound_spanning_supported =
3621 sop_iu_layer_descriptor->outbound_spanning_supported;
3622
3623out:
3624 kfree(capability);
3625
3626 return rc;
3627}
3628
3629static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3630{
3631 if (ctrl_info->max_iq_element_length <
3632 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3633 dev_err(&ctrl_info->pci_dev->dev,
3634 "max. inbound queue element length of %d is less than the required length of %d\n",
3635 ctrl_info->max_iq_element_length,
3636 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3637 return -EINVAL;
3638 }
3639
3640 if (ctrl_info->max_oq_element_length <
3641 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3642 dev_err(&ctrl_info->pci_dev->dev,
3643 "max. outbound queue element length of %d is less than the required length of %d\n",
3644 ctrl_info->max_oq_element_length,
3645 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3646 return -EINVAL;
3647 }
3648
3649 if (ctrl_info->max_inbound_iu_length_per_firmware <
3650 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3651 dev_err(&ctrl_info->pci_dev->dev,
3652 "max. inbound IU length of %u is less than the min. required length of %d\n",
3653 ctrl_info->max_inbound_iu_length_per_firmware,
3654 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3655 return -EINVAL;
3656 }
3657
77668f41
KB
3658 if (!ctrl_info->inbound_spanning_supported) {
3659 dev_err(&ctrl_info->pci_dev->dev,
3660 "the controller does not support inbound spanning\n");
3661 return -EINVAL;
3662 }
3663
3664 if (ctrl_info->outbound_spanning_supported) {
3665 dev_err(&ctrl_info->pci_dev->dev,
3666 "the controller supports outbound spanning but this driver does not\n");
3667 return -EINVAL;
3668 }
3669
6c223761
KB
3670 return 0;
3671}
3672
3673static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3674 bool inbound_queue, u16 queue_id)
3675{
3676 struct pqi_general_admin_request request;
3677 struct pqi_general_admin_response response;
3678
3679 memset(&request, 0, sizeof(request));
3680 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3681 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3682 &request.header.iu_length);
3683 if (inbound_queue)
3684 request.function_code =
3685 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3686 else
3687 request.function_code =
3688 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3689 put_unaligned_le16(queue_id,
3690 &request.data.delete_operational_queue.queue_id);
3691
3692 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3693 &response);
3694}
3695
3696static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3697{
3698 int rc;
3699 struct pqi_event_queue *event_queue;
3700 struct pqi_general_admin_request request;
3701 struct pqi_general_admin_response response;
3702
3703 event_queue = &ctrl_info->event_queue;
3704
3705 /*
3706 * Create OQ (Outbound Queue - device to host queue) to dedicate
3707 * to events.
3708 */
3709 memset(&request, 0, sizeof(request));
3710 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3711 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3712 &request.header.iu_length);
3713 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3714 put_unaligned_le16(event_queue->oq_id,
3715 &request.data.create_operational_oq.queue_id);
3716 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3717 &request.data.create_operational_oq.element_array_addr);
3718 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3719 &request.data.create_operational_oq.pi_addr);
3720 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3721 &request.data.create_operational_oq.num_elements);
3722 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3723 &request.data.create_operational_oq.element_length);
3724 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3725 put_unaligned_le16(event_queue->int_msg_num,
3726 &request.data.create_operational_oq.int_msg_num);
3727
3728 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3729 &response);
3730 if (rc)
3731 return rc;
3732
3733 event_queue->oq_ci = ctrl_info->iomem_base +
3734 PQI_DEVICE_REGISTERS_OFFSET +
3735 get_unaligned_le64(
3736 &response.data.create_operational_oq.oq_ci_offset);
3737
3738 return 0;
3739}
3740
3741static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3742{
3743 unsigned int i;
3744 int rc;
3745 struct pqi_queue_group *queue_group;
3746 struct pqi_general_admin_request request;
3747 struct pqi_general_admin_response response;
3748
3749 i = ctrl_info->num_active_queue_groups;
3750 queue_group = &ctrl_info->queue_groups[i];
3751
3752 /*
3753 * Create IQ (Inbound Queue - host to device queue) for
3754 * RAID path.
3755 */
3756 memset(&request, 0, sizeof(request));
3757 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3758 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3759 &request.header.iu_length);
3760 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3761 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3762 &request.data.create_operational_iq.queue_id);
3763 put_unaligned_le64(
3764 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3765 &request.data.create_operational_iq.element_array_addr);
3766 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3767 &request.data.create_operational_iq.ci_addr);
3768 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3769 &request.data.create_operational_iq.num_elements);
3770 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3771 &request.data.create_operational_iq.element_length);
3772 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3773
3774 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3775 &response);
3776 if (rc) {
3777 dev_err(&ctrl_info->pci_dev->dev,
3778 "error creating inbound RAID queue\n");
3779 return rc;
3780 }
3781
3782 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3783 PQI_DEVICE_REGISTERS_OFFSET +
3784 get_unaligned_le64(
3785 &response.data.create_operational_iq.iq_pi_offset);
3786
3787 /*
3788 * Create IQ (Inbound Queue - host to device queue) for
3789 * Advanced I/O (AIO) path.
3790 */
3791 memset(&request, 0, sizeof(request));
3792 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3793 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3794 &request.header.iu_length);
3795 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3796 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3797 &request.data.create_operational_iq.queue_id);
3798 put_unaligned_le64((u64)queue_group->
3799 iq_element_array_bus_addr[AIO_PATH],
3800 &request.data.create_operational_iq.element_array_addr);
3801 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3802 &request.data.create_operational_iq.ci_addr);
3803 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3804 &request.data.create_operational_iq.num_elements);
3805 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3806 &request.data.create_operational_iq.element_length);
3807 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3808
3809 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3810 &response);
3811 if (rc) {
3812 dev_err(&ctrl_info->pci_dev->dev,
3813 "error creating inbound AIO queue\n");
3814 goto delete_inbound_queue_raid;
3815 }
3816
3817 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3818 PQI_DEVICE_REGISTERS_OFFSET +
3819 get_unaligned_le64(
3820 &response.data.create_operational_iq.iq_pi_offset);
3821
3822 /*
3823 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3824 * assumed to be for RAID path I/O unless we change the queue's
3825 * property.
3826 */
3827 memset(&request, 0, sizeof(request));
3828 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3829 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3830 &request.header.iu_length);
3831 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3832 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3833 &request.data.change_operational_iq_properties.queue_id);
3834 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3835 &request.data.change_operational_iq_properties.vendor_specific);
3836
3837 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3838 &response);
3839 if (rc) {
3840 dev_err(&ctrl_info->pci_dev->dev,
3841 "error changing queue property\n");
3842 goto delete_inbound_queue_aio;
3843 }
3844
3845 /*
3846 * Create OQ (Outbound Queue - device to host queue).
3847 */
3848 memset(&request, 0, sizeof(request));
3849 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3850 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3851 &request.header.iu_length);
3852 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3853 put_unaligned_le16(queue_group->oq_id,
3854 &request.data.create_operational_oq.queue_id);
3855 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3856 &request.data.create_operational_oq.element_array_addr);
3857 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3858 &request.data.create_operational_oq.pi_addr);
3859 put_unaligned_le16(ctrl_info->num_elements_per_oq,
3860 &request.data.create_operational_oq.num_elements);
3861 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3862 &request.data.create_operational_oq.element_length);
3863 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3864 put_unaligned_le16(queue_group->int_msg_num,
3865 &request.data.create_operational_oq.int_msg_num);
3866
3867 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3868 &response);
3869 if (rc) {
3870 dev_err(&ctrl_info->pci_dev->dev,
3871 "error creating outbound queue\n");
3872 goto delete_inbound_queue_aio;
3873 }
3874
3875 queue_group->oq_ci = ctrl_info->iomem_base +
3876 PQI_DEVICE_REGISTERS_OFFSET +
3877 get_unaligned_le64(
3878 &response.data.create_operational_oq.oq_ci_offset);
3879
3880 ctrl_info->num_active_queue_groups++;
3881
3882 return 0;
3883
3884delete_inbound_queue_aio:
3885 pqi_delete_operational_queue(ctrl_info, true,
3886 queue_group->iq_id[AIO_PATH]);
3887
3888delete_inbound_queue_raid:
3889 pqi_delete_operational_queue(ctrl_info, true,
3890 queue_group->iq_id[RAID_PATH]);
3891
3892 return rc;
3893}
3894
3895static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3896{
3897 int rc;
3898 unsigned int i;
3899
3900 rc = pqi_create_event_queue(ctrl_info);
3901 if (rc) {
3902 dev_err(&ctrl_info->pci_dev->dev,
3903 "error creating event queue\n");
3904 return rc;
3905 }
3906
3907 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3908 rc = pqi_create_queue_group(ctrl_info);
3909 if (rc) {
3910 dev_err(&ctrl_info->pci_dev->dev,
3911 "error creating queue group number %u/%u\n",
3912 i, ctrl_info->num_queue_groups);
3913 return rc;
3914 }
3915 }
3916
3917 return 0;
3918}
3919
3920#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3921 (offsetof(struct pqi_event_config, descriptors) + \
3922 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3923
3924static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3925{
3926 int rc;
3927 unsigned int i;
3928 struct pqi_event_config *event_config;
3929 struct pqi_general_management_request request;
3930
3931 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3932 GFP_KERNEL);
3933 if (!event_config)
3934 return -ENOMEM;
3935
3936 memset(&request, 0, sizeof(request));
3937
3938 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3939 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3940 data.report_event_configuration.sg_descriptors[1]) -
3941 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3942 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3943 &request.data.report_event_configuration.buffer_length);
3944
3945 rc = pqi_map_single(ctrl_info->pci_dev,
3946 request.data.report_event_configuration.sg_descriptors,
3947 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3948 PCI_DMA_FROMDEVICE);
3949 if (rc)
3950 goto out;
3951
3952 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3953 0, NULL, NO_TIMEOUT);
3954
3955 pqi_pci_unmap(ctrl_info->pci_dev,
3956 request.data.report_event_configuration.sg_descriptors, 1,
3957 PCI_DMA_FROMDEVICE);
3958
3959 if (rc)
3960 goto out;
3961
3962 for (i = 0; i < event_config->num_event_descriptors; i++)
3963 put_unaligned_le16(ctrl_info->event_queue.oq_id,
3964 &event_config->descriptors[i].oq_id);
3965
3966 memset(&request, 0, sizeof(request));
3967
3968 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3969 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3970 data.report_event_configuration.sg_descriptors[1]) -
3971 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3972 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3973 &request.data.report_event_configuration.buffer_length);
3974
3975 rc = pqi_map_single(ctrl_info->pci_dev,
3976 request.data.report_event_configuration.sg_descriptors,
3977 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3978 PCI_DMA_TODEVICE);
3979 if (rc)
3980 goto out;
3981
3982 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
3983 NULL, NO_TIMEOUT);
3984
3985 pqi_pci_unmap(ctrl_info->pci_dev,
3986 request.data.report_event_configuration.sg_descriptors, 1,
3987 PCI_DMA_TODEVICE);
3988
3989out:
3990 kfree(event_config);
3991
3992 return rc;
3993}
3994
3995static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
3996{
3997 unsigned int i;
3998 struct device *dev;
3999 size_t sg_chain_buffer_length;
4000 struct pqi_io_request *io_request;
4001
4002 if (!ctrl_info->io_request_pool)
4003 return;
4004
4005 dev = &ctrl_info->pci_dev->dev;
4006 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4007 io_request = ctrl_info->io_request_pool;
4008
4009 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4010 kfree(io_request->iu);
4011 if (!io_request->sg_chain_buffer)
4012 break;
4013 dma_free_coherent(dev, sg_chain_buffer_length,
4014 io_request->sg_chain_buffer,
4015 io_request->sg_chain_buffer_dma_handle);
4016 io_request++;
4017 }
4018
4019 kfree(ctrl_info->io_request_pool);
4020 ctrl_info->io_request_pool = NULL;
4021}
4022
4023static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4024{
4025 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4026 ctrl_info->error_buffer_length,
4027 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4028
4029 if (!ctrl_info->error_buffer)
4030 return -ENOMEM;
4031
4032 return 0;
4033}
4034
4035static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4036{
4037 unsigned int i;
4038 void *sg_chain_buffer;
4039 size_t sg_chain_buffer_length;
4040 dma_addr_t sg_chain_buffer_dma_handle;
4041 struct device *dev;
4042 struct pqi_io_request *io_request;
4043
4044 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4045 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4046
4047 if (!ctrl_info->io_request_pool) {
4048 dev_err(&ctrl_info->pci_dev->dev,
4049 "failed to allocate I/O request pool\n");
4050 goto error;
4051 }
4052
4053 dev = &ctrl_info->pci_dev->dev;
4054 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4055 io_request = ctrl_info->io_request_pool;
4056
4057 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4058 io_request->iu =
4059 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4060
4061 if (!io_request->iu) {
4062 dev_err(&ctrl_info->pci_dev->dev,
4063 "failed to allocate IU buffers\n");
4064 goto error;
4065 }
4066
4067 sg_chain_buffer = dma_alloc_coherent(dev,
4068 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4069 GFP_KERNEL);
4070
4071 if (!sg_chain_buffer) {
4072 dev_err(&ctrl_info->pci_dev->dev,
4073 "failed to allocate PQI scatter-gather chain buffers\n");
4074 goto error;
4075 }
4076
4077 io_request->index = i;
4078 io_request->sg_chain_buffer = sg_chain_buffer;
4079 io_request->sg_chain_buffer_dma_handle =
4080 sg_chain_buffer_dma_handle;
4081 io_request++;
4082 }
4083
4084 return 0;
4085
4086error:
4087 pqi_free_all_io_requests(ctrl_info);
4088
4089 return -ENOMEM;
4090}
4091
4092/*
4093 * Calculate required resources that are sized based on max. outstanding
4094 * requests and max. transfer size.
4095 */
4096
4097static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4098{
4099 u32 max_transfer_size;
4100 u32 max_sg_entries;
4101
4102 ctrl_info->scsi_ml_can_queue =
4103 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4104 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4105
4106 ctrl_info->error_buffer_length =
4107 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4108
4109 max_transfer_size =
4110 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4111
4112 max_sg_entries = max_transfer_size / PAGE_SIZE;
4113
4114 /* +1 to cover when the buffer is not page-aligned. */
4115 max_sg_entries++;
4116
4117 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4118
4119 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4120
4121 ctrl_info->sg_chain_buffer_length =
4122 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4123 ctrl_info->sg_tablesize = max_sg_entries;
4124 ctrl_info->max_sectors = max_transfer_size / 512;
4125}
4126
4127static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4128{
4129 int num_cpus;
4130 int max_queue_groups;
4131 int num_queue_groups;
4132 u16 num_elements_per_iq;
4133 u16 num_elements_per_oq;
4134
4135 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4136 ctrl_info->max_outbound_queues - 1);
4137 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4138
4139 num_cpus = num_online_cpus();
4140 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4141 num_queue_groups = min(num_queue_groups, max_queue_groups);
4142
4143 ctrl_info->num_queue_groups = num_queue_groups;
4144
77668f41
KB
4145 /*
4146 * Make sure that the max. inbound IU length is an even multiple
4147 * of our inbound element length.
4148 */
4149 ctrl_info->max_inbound_iu_length =
4150 (ctrl_info->max_inbound_iu_length_per_firmware /
4151 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4152 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
4153
4154 num_elements_per_iq =
4155 (ctrl_info->max_inbound_iu_length /
4156 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4157
4158 /* Add one because one element in each queue is unusable. */
4159 num_elements_per_iq++;
4160
4161 num_elements_per_iq = min(num_elements_per_iq,
4162 ctrl_info->max_elements_per_iq);
4163
4164 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4165 num_elements_per_oq = min(num_elements_per_oq,
4166 ctrl_info->max_elements_per_oq);
4167
4168 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4169 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4170
4171 ctrl_info->max_sg_per_iu =
4172 ((ctrl_info->max_inbound_iu_length -
4173 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4174 sizeof(struct pqi_sg_descriptor)) +
4175 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4176}
4177
4178static inline void pqi_set_sg_descriptor(
4179 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4180{
4181 u64 address = (u64)sg_dma_address(sg);
4182 unsigned int length = sg_dma_len(sg);
4183
4184 put_unaligned_le64(address, &sg_descriptor->address);
4185 put_unaligned_le32(length, &sg_descriptor->length);
4186 put_unaligned_le32(0, &sg_descriptor->flags);
4187}
4188
4189static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4190 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4191 struct pqi_io_request *io_request)
4192{
4193 int i;
4194 u16 iu_length;
4195 int sg_count;
4196 bool chained;
4197 unsigned int num_sg_in_iu;
4198 unsigned int max_sg_per_iu;
4199 struct scatterlist *sg;
4200 struct pqi_sg_descriptor *sg_descriptor;
4201
4202 sg_count = scsi_dma_map(scmd);
4203 if (sg_count < 0)
4204 return sg_count;
4205
4206 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4207 PQI_REQUEST_HEADER_LENGTH;
4208
4209 if (sg_count == 0)
4210 goto out;
4211
4212 sg = scsi_sglist(scmd);
4213 sg_descriptor = request->sg_descriptors;
4214 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4215 chained = false;
4216 num_sg_in_iu = 0;
4217 i = 0;
4218
4219 while (1) {
4220 pqi_set_sg_descriptor(sg_descriptor, sg);
4221 if (!chained)
4222 num_sg_in_iu++;
4223 i++;
4224 if (i == sg_count)
4225 break;
4226 sg_descriptor++;
4227 if (i == max_sg_per_iu) {
4228 put_unaligned_le64(
4229 (u64)io_request->sg_chain_buffer_dma_handle,
4230 &sg_descriptor->address);
4231 put_unaligned_le32((sg_count - num_sg_in_iu)
4232 * sizeof(*sg_descriptor),
4233 &sg_descriptor->length);
4234 put_unaligned_le32(CISS_SG_CHAIN,
4235 &sg_descriptor->flags);
4236 chained = true;
4237 num_sg_in_iu++;
4238 sg_descriptor = io_request->sg_chain_buffer;
4239 }
4240 sg = sg_next(sg);
4241 }
4242
4243 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4244 request->partial = chained;
4245 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4246
4247out:
4248 put_unaligned_le16(iu_length, &request->header.iu_length);
4249
4250 return 0;
4251}
4252
4253static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4254 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4255 struct pqi_io_request *io_request)
4256{
4257 int i;
4258 u16 iu_length;
4259 int sg_count;
a60eec02
KB
4260 bool chained;
4261 unsigned int num_sg_in_iu;
4262 unsigned int max_sg_per_iu;
6c223761
KB
4263 struct scatterlist *sg;
4264 struct pqi_sg_descriptor *sg_descriptor;
4265
4266 sg_count = scsi_dma_map(scmd);
4267 if (sg_count < 0)
4268 return sg_count;
a60eec02
KB
4269
4270 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4271 PQI_REQUEST_HEADER_LENGTH;
4272 num_sg_in_iu = 0;
4273
6c223761
KB
4274 if (sg_count == 0)
4275 goto out;
4276
a60eec02
KB
4277 sg = scsi_sglist(scmd);
4278 sg_descriptor = request->sg_descriptors;
4279 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4280 chained = false;
4281 i = 0;
4282
4283 while (1) {
4284 pqi_set_sg_descriptor(sg_descriptor, sg);
4285 if (!chained)
4286 num_sg_in_iu++;
4287 i++;
4288 if (i == sg_count)
4289 break;
4290 sg_descriptor++;
4291 if (i == max_sg_per_iu) {
4292 put_unaligned_le64(
4293 (u64)io_request->sg_chain_buffer_dma_handle,
4294 &sg_descriptor->address);
4295 put_unaligned_le32((sg_count - num_sg_in_iu)
4296 * sizeof(*sg_descriptor),
4297 &sg_descriptor->length);
4298 put_unaligned_le32(CISS_SG_CHAIN,
4299 &sg_descriptor->flags);
4300 chained = true;
4301 num_sg_in_iu++;
4302 sg_descriptor = io_request->sg_chain_buffer;
6c223761 4303 }
a60eec02 4304 sg = sg_next(sg);
6c223761
KB
4305 }
4306
a60eec02
KB
4307 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4308 request->partial = chained;
6c223761 4309 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
4310
4311out:
6c223761
KB
4312 put_unaligned_le16(iu_length, &request->header.iu_length);
4313 request->num_sg_descriptors = num_sg_in_iu;
4314
4315 return 0;
4316}
4317
4318static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4319 void *context)
4320{
4321 struct scsi_cmnd *scmd;
4322
4323 scmd = io_request->scmd;
4324 pqi_free_io_request(io_request);
4325 scsi_dma_unmap(scmd);
4326 pqi_scsi_done(scmd);
4327}
4328
4329static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4330 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4331 struct pqi_queue_group *queue_group)
4332{
4333 int rc;
4334 size_t cdb_length;
4335 struct pqi_io_request *io_request;
4336 struct pqi_raid_path_request *request;
4337
4338 io_request = pqi_alloc_io_request(ctrl_info);
4339 io_request->io_complete_callback = pqi_raid_io_complete;
4340 io_request->scmd = scmd;
4341
4342 scmd->host_scribble = (unsigned char *)io_request;
4343
4344 request = io_request->iu;
4345 memset(request, 0,
4346 offsetof(struct pqi_raid_path_request, sg_descriptors));
4347
4348 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4349 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4350 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4351 put_unaligned_le16(io_request->index, &request->request_id);
4352 request->error_index = request->request_id;
4353 memcpy(request->lun_number, device->scsi3addr,
4354 sizeof(request->lun_number));
4355
4356 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4357 memcpy(request->cdb, scmd->cmnd, cdb_length);
4358
4359 switch (cdb_length) {
4360 case 6:
4361 case 10:
4362 case 12:
4363 case 16:
4364 /* No bytes in the Additional CDB bytes field */
4365 request->additional_cdb_bytes_usage =
4366 SOP_ADDITIONAL_CDB_BYTES_0;
4367 break;
4368 case 20:
4369 /* 4 bytes in the Additional cdb field */
4370 request->additional_cdb_bytes_usage =
4371 SOP_ADDITIONAL_CDB_BYTES_4;
4372 break;
4373 case 24:
4374 /* 8 bytes in the Additional cdb field */
4375 request->additional_cdb_bytes_usage =
4376 SOP_ADDITIONAL_CDB_BYTES_8;
4377 break;
4378 case 28:
4379 /* 12 bytes in the Additional cdb field */
4380 request->additional_cdb_bytes_usage =
4381 SOP_ADDITIONAL_CDB_BYTES_12;
4382 break;
4383 case 32:
4384 default:
4385 /* 16 bytes in the Additional cdb field */
4386 request->additional_cdb_bytes_usage =
4387 SOP_ADDITIONAL_CDB_BYTES_16;
4388 break;
4389 }
4390
4391 switch (scmd->sc_data_direction) {
4392 case DMA_TO_DEVICE:
4393 request->data_direction = SOP_READ_FLAG;
4394 break;
4395 case DMA_FROM_DEVICE:
4396 request->data_direction = SOP_WRITE_FLAG;
4397 break;
4398 case DMA_NONE:
4399 request->data_direction = SOP_NO_DIRECTION_FLAG;
4400 break;
4401 case DMA_BIDIRECTIONAL:
4402 request->data_direction = SOP_BIDIRECTIONAL;
4403 break;
4404 default:
4405 dev_err(&ctrl_info->pci_dev->dev,
4406 "unknown data direction: %d\n",
4407 scmd->sc_data_direction);
4408 WARN_ON(scmd->sc_data_direction);
4409 break;
4410 }
4411
4412 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4413 if (rc) {
4414 pqi_free_io_request(io_request);
4415 return SCSI_MLQUEUE_HOST_BUSY;
4416 }
4417
4418 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4419
4420 return 0;
4421}
4422
4423static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4424 void *context)
4425{
4426 struct scsi_cmnd *scmd;
4427
4428 scmd = io_request->scmd;
4429 scsi_dma_unmap(scmd);
4430 if (io_request->status == -EAGAIN)
4431 set_host_byte(scmd, DID_IMM_RETRY);
4432 pqi_free_io_request(io_request);
4433 pqi_scsi_done(scmd);
4434}
4435
4436static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4437 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4438 struct pqi_queue_group *queue_group)
4439{
4440 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4441 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4442}
4443
4444static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4445 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4446 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4447 struct pqi_encryption_info *encryption_info)
4448{
4449 int rc;
4450 struct pqi_io_request *io_request;
4451 struct pqi_aio_path_request *request;
4452
4453 io_request = pqi_alloc_io_request(ctrl_info);
4454 io_request->io_complete_callback = pqi_aio_io_complete;
4455 io_request->scmd = scmd;
4456
4457 scmd->host_scribble = (unsigned char *)io_request;
4458
4459 request = io_request->iu;
4460 memset(request, 0,
4461 offsetof(struct pqi_raid_path_request, sg_descriptors));
4462
4463 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4464 put_unaligned_le32(aio_handle, &request->nexus_id);
4465 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4466 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4467 put_unaligned_le16(io_request->index, &request->request_id);
4468 request->error_index = request->request_id;
4469 if (cdb_length > sizeof(request->cdb))
4470 cdb_length = sizeof(request->cdb);
4471 request->cdb_length = cdb_length;
4472 memcpy(request->cdb, cdb, cdb_length);
4473
4474 switch (scmd->sc_data_direction) {
4475 case DMA_TO_DEVICE:
4476 request->data_direction = SOP_READ_FLAG;
4477 break;
4478 case DMA_FROM_DEVICE:
4479 request->data_direction = SOP_WRITE_FLAG;
4480 break;
4481 case DMA_NONE:
4482 request->data_direction = SOP_NO_DIRECTION_FLAG;
4483 break;
4484 case DMA_BIDIRECTIONAL:
4485 request->data_direction = SOP_BIDIRECTIONAL;
4486 break;
4487 default:
4488 dev_err(&ctrl_info->pci_dev->dev,
4489 "unknown data direction: %d\n",
4490 scmd->sc_data_direction);
4491 WARN_ON(scmd->sc_data_direction);
4492 break;
4493 }
4494
4495 if (encryption_info) {
4496 request->encryption_enable = true;
4497 put_unaligned_le16(encryption_info->data_encryption_key_index,
4498 &request->data_encryption_key_index);
4499 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4500 &request->encrypt_tweak_lower);
4501 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4502 &request->encrypt_tweak_upper);
4503 }
4504
4505 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4506 if (rc) {
4507 pqi_free_io_request(io_request);
4508 return SCSI_MLQUEUE_HOST_BUSY;
4509 }
4510
4511 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4512
4513 return 0;
4514}
4515
4516static int pqi_scsi_queue_command(struct Scsi_Host *shost,
7d81d2b8 4517 struct scsi_cmnd *scmd)
6c223761
KB
4518{
4519 int rc;
4520 struct pqi_ctrl_info *ctrl_info;
4521 struct pqi_scsi_dev *device;
4522 u16 hwq;
4523 struct pqi_queue_group *queue_group;
4524 bool raid_bypassed;
4525
4526 device = scmd->device->hostdata;
6c223761
KB
4527 ctrl_info = shost_to_hba(shost);
4528
4529 if (pqi_ctrl_offline(ctrl_info)) {
4530 set_host_byte(scmd, DID_NO_CONNECT);
4531 pqi_scsi_done(scmd);
4532 return 0;
4533 }
4534
7d81d2b8
KB
4535 /*
4536 * This is necessary because the SML doesn't zero out this field during
4537 * error recovery.
4538 */
4539 scmd->result = 0;
4540
6c223761
KB
4541 hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4542 if (hwq >= ctrl_info->num_queue_groups)
4543 hwq = 0;
4544
4545 queue_group = &ctrl_info->queue_groups[hwq];
4546
4547 if (pqi_is_logical_device(device)) {
4548 raid_bypassed = false;
4549 if (device->offload_enabled &&
4550 scmd->request->cmd_type == REQ_TYPE_FS) {
4551 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4552 scmd, queue_group);
4553 if (rc == 0 ||
4554 rc == SCSI_MLQUEUE_HOST_BUSY ||
4555 rc == SAM_STAT_CHECK_CONDITION ||
4556 rc == SAM_STAT_RESERVATION_CONFLICT)
4557 raid_bypassed = true;
4558 }
4559 if (!raid_bypassed)
4560 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4561 queue_group);
4562 } else {
4563 if (device->aio_enabled)
4564 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4565 queue_group);
4566 else
4567 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4568 queue_group);
4569 }
4570
4571 return rc;
4572}
4573
14bb215d
KB
4574static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4575 void *context)
6c223761 4576{
14bb215d 4577 struct completion *waiting = context;
6c223761 4578
14bb215d
KB
4579 complete(waiting);
4580}
6c223761 4581
14bb215d
KB
4582#define PQI_LUN_RESET_TIMEOUT_SECS 10
4583
4584static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4585 struct pqi_scsi_dev *device, struct completion *wait)
4586{
4587 int rc;
4588 unsigned int wait_secs = 0;
4589
4590 while (1) {
4591 if (wait_for_completion_io_timeout(wait,
4592 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4593 rc = 0;
4594 break;
6c223761
KB
4595 }
4596
14bb215d
KB
4597 pqi_check_ctrl_health(ctrl_info);
4598 if (pqi_ctrl_offline(ctrl_info)) {
4599 rc = -ETIMEDOUT;
4600 break;
4601 }
6c223761 4602
14bb215d 4603 wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
6c223761 4604
14bb215d
KB
4605 dev_err(&ctrl_info->pci_dev->dev,
4606 "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4607 ctrl_info->scsi_host->host_no, device->bus,
4608 device->target, device->lun, wait_secs);
6c223761 4609 }
6c223761 4610
14bb215d 4611 return rc;
6c223761
KB
4612}
4613
14bb215d 4614static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
4615 struct pqi_scsi_dev *device)
4616{
4617 int rc;
4618 struct pqi_io_request *io_request;
4619 DECLARE_COMPLETION_ONSTACK(wait);
4620 struct pqi_task_management_request *request;
4621
4622 down(&ctrl_info->lun_reset_sem);
4623
4624 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 4625 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
4626 io_request->context = &wait;
4627
4628 request = io_request->iu;
4629 memset(request, 0, sizeof(*request));
4630
4631 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4632 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4633 &request->header.iu_length);
4634 put_unaligned_le16(io_request->index, &request->request_id);
4635 memcpy(request->lun_number, device->scsi3addr,
4636 sizeof(request->lun_number));
4637 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4638
4639 pqi_start_io(ctrl_info,
4640 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4641 io_request);
4642
14bb215d
KB
4643 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4644 if (rc == 0)
6c223761 4645 rc = io_request->status;
6c223761
KB
4646
4647 pqi_free_io_request(io_request);
4648 up(&ctrl_info->lun_reset_sem);
4649
4650 return rc;
4651}
4652
4653/* Performs a reset at the LUN level. */
4654
4655static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4656 struct pqi_scsi_dev *device)
4657{
4658 int rc;
4659
4660 pqi_check_ctrl_health(ctrl_info);
4661 if (pqi_ctrl_offline(ctrl_info))
4662 return FAILED;
4663
14bb215d 4664 rc = pqi_lun_reset(ctrl_info, device);
6c223761 4665
14bb215d 4666 return rc == 0 ? SUCCESS : FAILED;
6c223761
KB
4667}
4668
4669static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4670{
4671 int rc;
4672 struct pqi_ctrl_info *ctrl_info;
4673 struct pqi_scsi_dev *device;
4674
4675 ctrl_info = shost_to_hba(scmd->device->host);
6c223761
KB
4676 device = scmd->device->hostdata;
4677
4678 dev_err(&ctrl_info->pci_dev->dev,
4679 "resetting scsi %d:%d:%d:%d\n",
4680 ctrl_info->scsi_host->host_no,
4681 device->bus, device->target, device->lun);
4682
4683 rc = pqi_device_reset(ctrl_info, device);
4684
4685 dev_err(&ctrl_info->pci_dev->dev,
4686 "reset of scsi %d:%d:%d:%d: %s\n",
4687 ctrl_info->scsi_host->host_no,
4688 device->bus, device->target, device->lun,
4689 rc == SUCCESS ? "SUCCESS" : "FAILED");
4690
4691 return rc;
4692}
4693
4694static int pqi_slave_alloc(struct scsi_device *sdev)
4695{
4696 struct pqi_scsi_dev *device;
4697 unsigned long flags;
4698 struct pqi_ctrl_info *ctrl_info;
4699 struct scsi_target *starget;
4700 struct sas_rphy *rphy;
4701
4702 ctrl_info = shost_to_hba(sdev->host);
4703
4704 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4705
4706 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4707 starget = scsi_target(sdev);
4708 rphy = target_to_rphy(starget);
4709 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4710 if (device) {
4711 device->target = sdev_id(sdev);
4712 device->lun = sdev->lun;
4713 device->target_lun_valid = true;
4714 }
4715 } else {
4716 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4717 sdev_id(sdev), sdev->lun);
4718 }
4719
4720 if (device && device->expose_device) {
4721 sdev->hostdata = device;
4722 device->sdev = sdev;
4723 if (device->queue_depth) {
4724 device->advertised_queue_depth = device->queue_depth;
4725 scsi_change_queue_depth(sdev,
4726 device->advertised_queue_depth);
4727 }
4728 }
4729
4730 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4731
4732 return 0;
4733}
4734
4735static int pqi_slave_configure(struct scsi_device *sdev)
4736{
4737 struct pqi_scsi_dev *device;
4738
4739 device = sdev->hostdata;
4740 if (!device->expose_device)
4741 sdev->no_uld_attach = true;
4742
4743 return 0;
4744}
4745
4746static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4747 void __user *arg)
4748{
4749 struct pci_dev *pci_dev;
4750 u32 subsystem_vendor;
4751 u32 subsystem_device;
4752 cciss_pci_info_struct pciinfo;
4753
4754 if (!arg)
4755 return -EINVAL;
4756
4757 pci_dev = ctrl_info->pci_dev;
4758
4759 pciinfo.domain = pci_domain_nr(pci_dev->bus);
4760 pciinfo.bus = pci_dev->bus->number;
4761 pciinfo.dev_fn = pci_dev->devfn;
4762 subsystem_vendor = pci_dev->subsystem_vendor;
4763 subsystem_device = pci_dev->subsystem_device;
4764 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4765 subsystem_vendor;
4766
4767 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4768 return -EFAULT;
4769
4770 return 0;
4771}
4772
4773static int pqi_getdrivver_ioctl(void __user *arg)
4774{
4775 u32 version;
4776
4777 if (!arg)
4778 return -EINVAL;
4779
4780 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4781 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
4782
4783 if (copy_to_user(arg, &version, sizeof(version)))
4784 return -EFAULT;
4785
4786 return 0;
4787}
4788
4789struct ciss_error_info {
4790 u8 scsi_status;
4791 int command_status;
4792 size_t sense_data_length;
4793};
4794
4795static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4796 struct ciss_error_info *ciss_error_info)
4797{
4798 int ciss_cmd_status;
4799 size_t sense_data_length;
4800
4801 switch (pqi_error_info->data_out_result) {
4802 case PQI_DATA_IN_OUT_GOOD:
4803 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4804 break;
4805 case PQI_DATA_IN_OUT_UNDERFLOW:
4806 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4807 break;
4808 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4809 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4810 break;
4811 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4812 case PQI_DATA_IN_OUT_BUFFER_ERROR:
4813 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4814 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4815 case PQI_DATA_IN_OUT_ERROR:
4816 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4817 break;
4818 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4819 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4820 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4821 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4822 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4823 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4824 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4825 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4826 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4827 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4828 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4829 break;
4830 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4831 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4832 break;
4833 case PQI_DATA_IN_OUT_ABORTED:
4834 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4835 break;
4836 case PQI_DATA_IN_OUT_TIMEOUT:
4837 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4838 break;
4839 default:
4840 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4841 break;
4842 }
4843
4844 sense_data_length =
4845 get_unaligned_le16(&pqi_error_info->sense_data_length);
4846 if (sense_data_length == 0)
4847 sense_data_length =
4848 get_unaligned_le16(&pqi_error_info->response_data_length);
4849 if (sense_data_length)
4850 if (sense_data_length > sizeof(pqi_error_info->data))
4851 sense_data_length = sizeof(pqi_error_info->data);
4852
4853 ciss_error_info->scsi_status = pqi_error_info->status;
4854 ciss_error_info->command_status = ciss_cmd_status;
4855 ciss_error_info->sense_data_length = sense_data_length;
4856}
4857
4858static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4859{
4860 int rc;
4861 char *kernel_buffer = NULL;
4862 u16 iu_length;
4863 size_t sense_data_length;
4864 IOCTL_Command_struct iocommand;
4865 struct pqi_raid_path_request request;
4866 struct pqi_raid_error_info pqi_error_info;
4867 struct ciss_error_info ciss_error_info;
4868
4869 if (pqi_ctrl_offline(ctrl_info))
4870 return -ENXIO;
4871 if (!arg)
4872 return -EINVAL;
4873 if (!capable(CAP_SYS_RAWIO))
4874 return -EPERM;
4875 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4876 return -EFAULT;
4877 if (iocommand.buf_size < 1 &&
4878 iocommand.Request.Type.Direction != XFER_NONE)
4879 return -EINVAL;
4880 if (iocommand.Request.CDBLen > sizeof(request.cdb))
4881 return -EINVAL;
4882 if (iocommand.Request.Type.Type != TYPE_CMD)
4883 return -EINVAL;
4884
4885 switch (iocommand.Request.Type.Direction) {
4886 case XFER_NONE:
4887 case XFER_WRITE:
4888 case XFER_READ:
4889 break;
4890 default:
4891 return -EINVAL;
4892 }
4893
4894 if (iocommand.buf_size > 0) {
4895 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4896 if (!kernel_buffer)
4897 return -ENOMEM;
4898 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4899 if (copy_from_user(kernel_buffer, iocommand.buf,
4900 iocommand.buf_size)) {
4901 rc = -EFAULT;
4902 goto out;
4903 }
4904 } else {
4905 memset(kernel_buffer, 0, iocommand.buf_size);
4906 }
4907 }
4908
4909 memset(&request, 0, sizeof(request));
4910
4911 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4912 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4913 PQI_REQUEST_HEADER_LENGTH;
4914 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4915 sizeof(request.lun_number));
4916 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4917 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4918
4919 switch (iocommand.Request.Type.Direction) {
4920 case XFER_NONE:
4921 request.data_direction = SOP_NO_DIRECTION_FLAG;
4922 break;
4923 case XFER_WRITE:
4924 request.data_direction = SOP_WRITE_FLAG;
4925 break;
4926 case XFER_READ:
4927 request.data_direction = SOP_READ_FLAG;
4928 break;
4929 }
4930
4931 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4932
4933 if (iocommand.buf_size > 0) {
4934 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4935
4936 rc = pqi_map_single(ctrl_info->pci_dev,
4937 &request.sg_descriptors[0], kernel_buffer,
4938 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4939 if (rc)
4940 goto out;
4941
4942 iu_length += sizeof(request.sg_descriptors[0]);
4943 }
4944
4945 put_unaligned_le16(iu_length, &request.header.iu_length);
4946
4947 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4948 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4949
4950 if (iocommand.buf_size > 0)
4951 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4952 PCI_DMA_BIDIRECTIONAL);
4953
4954 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4955
4956 if (rc == 0) {
4957 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4958 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4959 iocommand.error_info.CommandStatus =
4960 ciss_error_info.command_status;
4961 sense_data_length = ciss_error_info.sense_data_length;
4962 if (sense_data_length) {
4963 if (sense_data_length >
4964 sizeof(iocommand.error_info.SenseInfo))
4965 sense_data_length =
4966 sizeof(iocommand.error_info.SenseInfo);
4967 memcpy(iocommand.error_info.SenseInfo,
4968 pqi_error_info.data, sense_data_length);
4969 iocommand.error_info.SenseLen = sense_data_length;
4970 }
4971 }
4972
4973 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
4974 rc = -EFAULT;
4975 goto out;
4976 }
4977
4978 if (rc == 0 && iocommand.buf_size > 0 &&
4979 (iocommand.Request.Type.Direction & XFER_READ)) {
4980 if (copy_to_user(iocommand.buf, kernel_buffer,
4981 iocommand.buf_size)) {
4982 rc = -EFAULT;
4983 }
4984 }
4985
4986out:
4987 kfree(kernel_buffer);
4988
4989 return rc;
4990}
4991
4992static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4993{
4994 int rc;
4995 struct pqi_ctrl_info *ctrl_info;
4996
4997 ctrl_info = shost_to_hba(sdev->host);
4998
4999 switch (cmd) {
5000 case CCISS_DEREGDISK:
5001 case CCISS_REGNEWDISK:
5002 case CCISS_REGNEWD:
5003 rc = pqi_scan_scsi_devices(ctrl_info);
5004 break;
5005 case CCISS_GETPCIINFO:
5006 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5007 break;
5008 case CCISS_GETDRIVVER:
5009 rc = pqi_getdrivver_ioctl(arg);
5010 break;
5011 case CCISS_PASSTHRU:
5012 rc = pqi_passthru_ioctl(ctrl_info, arg);
5013 break;
5014 default:
5015 rc = -EINVAL;
5016 break;
5017 }
5018
5019 return rc;
5020}
5021
5022static ssize_t pqi_version_show(struct device *dev,
5023 struct device_attribute *attr, char *buffer)
5024{
5025 ssize_t count = 0;
5026 struct Scsi_Host *shost;
5027 struct pqi_ctrl_info *ctrl_info;
5028
5029 shost = class_to_shost(dev);
5030 ctrl_info = shost_to_hba(shost);
5031
5032 count += snprintf(buffer + count, PAGE_SIZE - count,
5033 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5034
5035 count += snprintf(buffer + count, PAGE_SIZE - count,
5036 "firmware: %s\n", ctrl_info->firmware_version);
5037
5038 return count;
5039}
5040
5041static ssize_t pqi_host_rescan_store(struct device *dev,
5042 struct device_attribute *attr, const char *buffer, size_t count)
5043{
5044 struct Scsi_Host *shost = class_to_shost(dev);
5045
5046 pqi_scan_start(shost);
5047
5048 return count;
5049}
5050
5051static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5052static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5053
5054static struct device_attribute *pqi_shost_attrs[] = {
5055 &dev_attr_version,
5056 &dev_attr_rescan,
5057 NULL
5058};
5059
5060static ssize_t pqi_sas_address_show(struct device *dev,
5061 struct device_attribute *attr, char *buffer)
5062{
5063 struct pqi_ctrl_info *ctrl_info;
5064 struct scsi_device *sdev;
5065 struct pqi_scsi_dev *device;
5066 unsigned long flags;
5067 u64 sas_address;
5068
5069 sdev = to_scsi_device(dev);
5070 ctrl_info = shost_to_hba(sdev->host);
5071
5072 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5073
5074 device = sdev->hostdata;
5075 if (pqi_is_logical_device(device)) {
5076 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5077 flags);
5078 return -ENODEV;
5079 }
5080 sas_address = device->sas_address;
5081
5082 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5083
5084 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5085}
5086
5087static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5088 struct device_attribute *attr, char *buffer)
5089{
5090 struct pqi_ctrl_info *ctrl_info;
5091 struct scsi_device *sdev;
5092 struct pqi_scsi_dev *device;
5093 unsigned long flags;
5094
5095 sdev = to_scsi_device(dev);
5096 ctrl_info = shost_to_hba(sdev->host);
5097
5098 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5099
5100 device = sdev->hostdata;
5101 buffer[0] = device->offload_enabled ? '1' : '0';
5102 buffer[1] = '\n';
5103 buffer[2] = '\0';
5104
5105 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5106
5107 return 2;
5108}
5109
5110static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5111static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5112 pqi_ssd_smart_path_enabled_show, NULL);
5113
5114static struct device_attribute *pqi_sdev_attrs[] = {
5115 &dev_attr_sas_address,
5116 &dev_attr_ssd_smart_path_enabled,
5117 NULL
5118};
5119
5120static struct scsi_host_template pqi_driver_template = {
5121 .module = THIS_MODULE,
5122 .name = DRIVER_NAME_SHORT,
5123 .proc_name = DRIVER_NAME_SHORT,
5124 .queuecommand = pqi_scsi_queue_command,
5125 .scan_start = pqi_scan_start,
5126 .scan_finished = pqi_scan_finished,
5127 .this_id = -1,
5128 .use_clustering = ENABLE_CLUSTERING,
5129 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5130 .ioctl = pqi_ioctl,
5131 .slave_alloc = pqi_slave_alloc,
5132 .slave_configure = pqi_slave_configure,
5133 .sdev_attrs = pqi_sdev_attrs,
5134 .shost_attrs = pqi_shost_attrs,
5135};
5136
5137static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5138{
5139 int rc;
5140 struct Scsi_Host *shost;
5141
5142 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5143 if (!shost) {
5144 dev_err(&ctrl_info->pci_dev->dev,
5145 "scsi_host_alloc failed for controller %u\n",
5146 ctrl_info->ctrl_id);
5147 return -ENOMEM;
5148 }
5149
5150 shost->io_port = 0;
5151 shost->n_io_port = 0;
5152 shost->this_id = -1;
5153 shost->max_channel = PQI_MAX_BUS;
5154 shost->max_cmd_len = MAX_COMMAND_SIZE;
5155 shost->max_lun = ~0;
5156 shost->max_id = ~0;
5157 shost->max_sectors = ctrl_info->max_sectors;
5158 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5159 shost->cmd_per_lun = shost->can_queue;
5160 shost->sg_tablesize = ctrl_info->sg_tablesize;
5161 shost->transportt = pqi_sas_transport_template;
5162 shost->irq = ctrl_info->msix_vectors[0];
5163 shost->unique_id = shost->irq;
5164 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5165 shost->hostdata[0] = (unsigned long)ctrl_info;
5166
5167 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5168 if (rc) {
5169 dev_err(&ctrl_info->pci_dev->dev,
5170 "scsi_add_host failed for controller %u\n",
5171 ctrl_info->ctrl_id);
5172 goto free_host;
5173 }
5174
5175 rc = pqi_add_sas_host(shost, ctrl_info);
5176 if (rc) {
5177 dev_err(&ctrl_info->pci_dev->dev,
5178 "add SAS host failed for controller %u\n",
5179 ctrl_info->ctrl_id);
5180 goto remove_host;
5181 }
5182
5183 ctrl_info->scsi_host = shost;
5184
5185 return 0;
5186
5187remove_host:
5188 scsi_remove_host(shost);
5189free_host:
5190 scsi_host_put(shost);
5191
5192 return rc;
5193}
5194
5195static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5196{
5197 struct Scsi_Host *shost;
5198
5199 pqi_delete_sas_host(ctrl_info);
5200
5201 shost = ctrl_info->scsi_host;
5202 if (!shost)
5203 return;
5204
5205 scsi_remove_host(shost);
5206 scsi_host_put(shost);
5207}
5208
5209#define PQI_RESET_ACTION_RESET 0x1
5210
5211#define PQI_RESET_TYPE_NO_RESET 0x0
5212#define PQI_RESET_TYPE_SOFT_RESET 0x1
5213#define PQI_RESET_TYPE_FIRM_RESET 0x2
5214#define PQI_RESET_TYPE_HARD_RESET 0x3
5215
5216static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5217{
5218 int rc;
5219 u32 reset_params;
5220
5221 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5222 PQI_RESET_TYPE_HARD_RESET;
5223
5224 writel(reset_params,
5225 &ctrl_info->pqi_registers->device_reset);
5226
5227 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5228 if (rc)
5229 dev_err(&ctrl_info->pci_dev->dev,
5230 "PQI reset failed\n");
5231
5232 return rc;
5233}
5234
5235static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5236{
5237 int rc;
5238 struct bmic_identify_controller *identify;
5239
5240 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5241 if (!identify)
5242 return -ENOMEM;
5243
5244 rc = pqi_identify_controller(ctrl_info, identify);
5245 if (rc)
5246 goto out;
5247
5248 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5249 sizeof(identify->firmware_version));
5250 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5251 snprintf(ctrl_info->firmware_version +
5252 strlen(ctrl_info->firmware_version),
5253 sizeof(ctrl_info->firmware_version),
5254 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5255
5256out:
5257 kfree(identify);
5258
5259 return rc;
5260}
5261
ff6abb73
KB
5262static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
5263{
5264 if (!sis_is_firmware_running(ctrl_info))
5265 return -ENXIO;
5266
5267 if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
5268 sis_disable_msix(ctrl_info);
5269 if (pqi_reset(ctrl_info) == 0)
5270 sis_reenable_sis_mode(ctrl_info);
5271 }
5272
5273 return 0;
5274}
5275
6c223761
KB
5276static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5277{
5278 int rc;
5279
ff6abb73
KB
5280 if (reset_devices) {
5281 rc = pqi_kdump_init(ctrl_info);
5282 if (rc)
5283 return rc;
5284 }
5285
6c223761
KB
5286 /*
5287 * When the controller comes out of reset, it is always running
5288 * in legacy SIS mode. This is so that it can be compatible
5289 * with legacy drivers shipped with OSes. So we have to talk
5290 * to it using SIS commands at first. Once we are satisified
5291 * that the controller supports PQI, we transition it into PQI
5292 * mode.
5293 */
5294
5295 /*
5296 * Wait until the controller is ready to start accepting SIS
5297 * commands.
5298 */
5299 rc = sis_wait_for_ctrl_ready(ctrl_info);
5300 if (rc) {
5301 dev_err(&ctrl_info->pci_dev->dev,
5302 "error initializing SIS interface\n");
5303 return rc;
5304 }
5305
5306 /*
5307 * Get the controller properties. This allows us to determine
5308 * whether or not it supports PQI mode.
5309 */
5310 rc = sis_get_ctrl_properties(ctrl_info);
5311 if (rc) {
5312 dev_err(&ctrl_info->pci_dev->dev,
5313 "error obtaining controller properties\n");
5314 return rc;
5315 }
5316
5317 rc = sis_get_pqi_capabilities(ctrl_info);
5318 if (rc) {
5319 dev_err(&ctrl_info->pci_dev->dev,
5320 "error obtaining controller capabilities\n");
5321 return rc;
5322 }
5323
5324 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5325 ctrl_info->max_outstanding_requests =
5326 PQI_MAX_OUTSTANDING_REQUESTS;
5327
5328 pqi_calculate_io_resources(ctrl_info);
5329
5330 rc = pqi_alloc_error_buffer(ctrl_info);
5331 if (rc) {
5332 dev_err(&ctrl_info->pci_dev->dev,
5333 "failed to allocate PQI error buffer\n");
5334 return rc;
5335 }
5336
5337 /*
5338 * If the function we are about to call succeeds, the
5339 * controller will transition from legacy SIS mode
5340 * into PQI mode.
5341 */
5342 rc = sis_init_base_struct_addr(ctrl_info);
5343 if (rc) {
5344 dev_err(&ctrl_info->pci_dev->dev,
5345 "error initializing PQI mode\n");
5346 return rc;
5347 }
5348
5349 /* Wait for the controller to complete the SIS -> PQI transition. */
5350 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5351 if (rc) {
5352 dev_err(&ctrl_info->pci_dev->dev,
5353 "transition to PQI mode failed\n");
5354 return rc;
5355 }
5356
5357 /* From here on, we are running in PQI mode. */
5358 ctrl_info->pqi_mode_enabled = true;
ff6abb73 5359 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761
KB
5360
5361 rc = pqi_alloc_admin_queues(ctrl_info);
5362 if (rc) {
5363 dev_err(&ctrl_info->pci_dev->dev,
5364 "error allocating admin queues\n");
5365 return rc;
5366 }
5367
5368 rc = pqi_create_admin_queues(ctrl_info);
5369 if (rc) {
5370 dev_err(&ctrl_info->pci_dev->dev,
5371 "error creating admin queues\n");
5372 return rc;
5373 }
5374
5375 rc = pqi_report_device_capability(ctrl_info);
5376 if (rc) {
5377 dev_err(&ctrl_info->pci_dev->dev,
5378 "obtaining device capability failed\n");
5379 return rc;
5380 }
5381
5382 rc = pqi_validate_device_capability(ctrl_info);
5383 if (rc)
5384 return rc;
5385
5386 pqi_calculate_queue_resources(ctrl_info);
5387
5388 rc = pqi_enable_msix_interrupts(ctrl_info);
5389 if (rc)
5390 return rc;
5391
5392 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5393 ctrl_info->max_msix_vectors =
5394 ctrl_info->num_msix_vectors_enabled;
5395 pqi_calculate_queue_resources(ctrl_info);
5396 }
5397
5398 rc = pqi_alloc_io_resources(ctrl_info);
5399 if (rc)
5400 return rc;
5401
5402 rc = pqi_alloc_operational_queues(ctrl_info);
5403 if (rc)
5404 return rc;
5405
5406 pqi_init_operational_queues(ctrl_info);
5407
5408 rc = pqi_request_irqs(ctrl_info);
5409 if (rc)
5410 return rc;
5411
5412 pqi_irq_set_affinity_hint(ctrl_info);
5413
5414 rc = pqi_create_queues(ctrl_info);
5415 if (rc)
5416 return rc;
5417
5418 sis_enable_msix(ctrl_info);
5419
5420 rc = pqi_configure_events(ctrl_info);
5421 if (rc) {
5422 dev_err(&ctrl_info->pci_dev->dev,
5423 "error configuring events\n");
5424 return rc;
5425 }
5426
5427 pqi_start_heartbeat_timer(ctrl_info);
5428
5429 ctrl_info->controller_online = true;
5430
5431 /* Register with the SCSI subsystem. */
5432 rc = pqi_register_scsi(ctrl_info);
5433 if (rc)
5434 return rc;
5435
5436 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5437 if (rc) {
5438 dev_err(&ctrl_info->pci_dev->dev,
5439 "error obtaining firmware version\n");
5440 return rc;
5441 }
5442
5443 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5444 if (rc) {
5445 dev_err(&ctrl_info->pci_dev->dev,
5446 "error updating host wellness\n");
5447 return rc;
5448 }
5449
5450 pqi_schedule_update_time_worker(ctrl_info);
5451
5452 pqi_scan_scsi_devices(ctrl_info);
5453
5454 return 0;
5455}
5456
5457static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5458{
5459 int rc;
5460 u64 mask;
5461
5462 rc = pci_enable_device(ctrl_info->pci_dev);
5463 if (rc) {
5464 dev_err(&ctrl_info->pci_dev->dev,
5465 "failed to enable PCI device\n");
5466 return rc;
5467 }
5468
5469 if (sizeof(dma_addr_t) > 4)
5470 mask = DMA_BIT_MASK(64);
5471 else
5472 mask = DMA_BIT_MASK(32);
5473
5474 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5475 if (rc) {
5476 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5477 goto disable_device;
5478 }
5479
5480 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5481 if (rc) {
5482 dev_err(&ctrl_info->pci_dev->dev,
5483 "failed to obtain PCI resources\n");
5484 goto disable_device;
5485 }
5486
5487 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5488 ctrl_info->pci_dev, 0),
5489 sizeof(struct pqi_ctrl_registers));
5490 if (!ctrl_info->iomem_base) {
5491 dev_err(&ctrl_info->pci_dev->dev,
5492 "failed to map memory for controller registers\n");
5493 rc = -ENOMEM;
5494 goto release_regions;
5495 }
5496
5497 ctrl_info->registers = ctrl_info->iomem_base;
5498 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5499
5500 /* Enable bus mastering. */
5501 pci_set_master(ctrl_info->pci_dev);
5502
5503 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5504
5505 return 0;
5506
5507release_regions:
5508 pci_release_regions(ctrl_info->pci_dev);
5509disable_device:
5510 pci_disable_device(ctrl_info->pci_dev);
5511
5512 return rc;
5513}
5514
5515static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5516{
5517 iounmap(ctrl_info->iomem_base);
5518 pci_release_regions(ctrl_info->pci_dev);
5519 pci_disable_device(ctrl_info->pci_dev);
5520 pci_set_drvdata(ctrl_info->pci_dev, NULL);
5521}
5522
5523static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5524{
5525 struct pqi_ctrl_info *ctrl_info;
5526
5527 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5528 GFP_KERNEL, numa_node);
5529 if (!ctrl_info)
5530 return NULL;
5531
5532 mutex_init(&ctrl_info->scan_mutex);
5533
5534 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5535 spin_lock_init(&ctrl_info->scsi_device_list_lock);
5536
5537 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5538 atomic_set(&ctrl_info->num_interrupts, 0);
5539
5540 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5541 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5542
5543 sema_init(&ctrl_info->sync_request_sem,
5544 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5545 sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5546
5547 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5548 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5549
5550 return ctrl_info;
5551}
5552
5553static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5554{
5555 kfree(ctrl_info);
5556}
5557
5558static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5559{
5560 pqi_irq_unset_affinity_hint(ctrl_info);
5561 pqi_free_irqs(ctrl_info);
5562 if (ctrl_info->num_msix_vectors_enabled)
5563 pci_disable_msix(ctrl_info->pci_dev);
5564}
5565
5566static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5567{
5568 pqi_stop_heartbeat_timer(ctrl_info);
5569 pqi_free_interrupts(ctrl_info);
5570 if (ctrl_info->queue_memory_base)
5571 dma_free_coherent(&ctrl_info->pci_dev->dev,
5572 ctrl_info->queue_memory_length,
5573 ctrl_info->queue_memory_base,
5574 ctrl_info->queue_memory_base_dma_handle);
5575 if (ctrl_info->admin_queue_memory_base)
5576 dma_free_coherent(&ctrl_info->pci_dev->dev,
5577 ctrl_info->admin_queue_memory_length,
5578 ctrl_info->admin_queue_memory_base,
5579 ctrl_info->admin_queue_memory_base_dma_handle);
5580 pqi_free_all_io_requests(ctrl_info);
5581 if (ctrl_info->error_buffer)
5582 dma_free_coherent(&ctrl_info->pci_dev->dev,
5583 ctrl_info->error_buffer_length,
5584 ctrl_info->error_buffer,
5585 ctrl_info->error_buffer_dma_handle);
5586 if (ctrl_info->iomem_base)
5587 pqi_cleanup_pci_init(ctrl_info);
5588 pqi_free_ctrl_info(ctrl_info);
5589}
5590
5591static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5592{
e57a1f9b
KB
5593 cancel_delayed_work_sync(&ctrl_info->rescan_work);
5594 cancel_delayed_work_sync(&ctrl_info->update_time_work);
5595 pqi_remove_all_scsi_devices(ctrl_info);
5596 pqi_unregister_scsi(ctrl_info);
6c223761 5597
6c223761
KB
5598 if (ctrl_info->pqi_mode_enabled) {
5599 sis_disable_msix(ctrl_info);
e57a1f9b 5600 if (pqi_reset(ctrl_info) == 0)
6c223761
KB
5601 sis_reenable_sis_mode(ctrl_info);
5602 }
5603 pqi_free_ctrl_resources(ctrl_info);
5604}
5605
5606static void pqi_print_ctrl_info(struct pci_dev *pdev,
5607 const struct pci_device_id *id)
5608{
5609 char *ctrl_description;
5610
5611 if (id->driver_data) {
5612 ctrl_description = (char *)id->driver_data;
5613 } else {
5614 switch (id->subvendor) {
5615 case PCI_VENDOR_ID_HP:
5616 ctrl_description = hpe_branded_controller;
5617 break;
5618 case PCI_VENDOR_ID_ADAPTEC2:
5619 default:
5620 ctrl_description = microsemi_branded_controller;
5621 break;
5622 }
5623 }
5624
5625 dev_info(&pdev->dev, "%s found\n", ctrl_description);
5626}
5627
5628static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5629{
5630 int rc;
5631 int node;
5632 struct pqi_ctrl_info *ctrl_info;
5633
5634 pqi_print_ctrl_info(pdev, id);
5635
5636 if (pqi_disable_device_id_wildcards &&
5637 id->subvendor == PCI_ANY_ID &&
5638 id->subdevice == PCI_ANY_ID) {
5639 dev_warn(&pdev->dev,
5640 "controller not probed because device ID wildcards are disabled\n");
5641 return -ENODEV;
5642 }
5643
5644 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5645 dev_warn(&pdev->dev,
5646 "controller device ID matched using wildcards\n");
5647
5648 node = dev_to_node(&pdev->dev);
5649 if (node == NUMA_NO_NODE)
5650 set_dev_node(&pdev->dev, 0);
5651
5652 ctrl_info = pqi_alloc_ctrl_info(node);
5653 if (!ctrl_info) {
5654 dev_err(&pdev->dev,
5655 "failed to allocate controller info block\n");
5656 return -ENOMEM;
5657 }
5658
5659 ctrl_info->pci_dev = pdev;
5660
5661 rc = pqi_pci_init(ctrl_info);
5662 if (rc)
5663 goto error;
5664
5665 rc = pqi_ctrl_init(ctrl_info);
5666 if (rc)
5667 goto error;
5668
5669 return 0;
5670
5671error:
5672 pqi_remove_ctrl(ctrl_info);
5673
5674 return rc;
5675}
5676
5677static void pqi_pci_remove(struct pci_dev *pdev)
5678{
5679 struct pqi_ctrl_info *ctrl_info;
5680
5681 ctrl_info = pci_get_drvdata(pdev);
5682 if (!ctrl_info)
5683 return;
5684
5685 pqi_remove_ctrl(ctrl_info);
5686}
5687
5688static void pqi_shutdown(struct pci_dev *pdev)
5689{
5690 int rc;
5691 struct pqi_ctrl_info *ctrl_info;
5692
5693 ctrl_info = pci_get_drvdata(pdev);
5694 if (!ctrl_info)
5695 goto error;
5696
5697 /*
5698 * Write all data in the controller's battery-backed cache to
5699 * storage.
5700 */
5701 rc = pqi_flush_cache(ctrl_info);
5702 if (rc == 0)
5703 return;
5704
5705error:
5706 dev_warn(&pdev->dev,
5707 "unable to flush controller cache\n");
5708}
5709
5710/* Define the PCI IDs for the controllers that we support. */
5711static const struct pci_device_id pqi_pci_id_table[] = {
5712 {
5713 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5714 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5715 },
5716 {
5717 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5718 PCI_VENDOR_ID_HP, 0x0600)
5719 },
5720 {
5721 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5722 PCI_VENDOR_ID_HP, 0x0601)
5723 },
5724 {
5725 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5726 PCI_VENDOR_ID_HP, 0x0602)
5727 },
5728 {
5729 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5730 PCI_VENDOR_ID_HP, 0x0603)
5731 },
5732 {
5733 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5734 PCI_VENDOR_ID_HP, 0x0650)
5735 },
5736 {
5737 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5738 PCI_VENDOR_ID_HP, 0x0651)
5739 },
5740 {
5741 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5742 PCI_VENDOR_ID_HP, 0x0652)
5743 },
5744 {
5745 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5746 PCI_VENDOR_ID_HP, 0x0653)
5747 },
5748 {
5749 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5750 PCI_VENDOR_ID_HP, 0x0654)
5751 },
5752 {
5753 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5754 PCI_VENDOR_ID_HP, 0x0655)
5755 },
5756 {
5757 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5758 PCI_VENDOR_ID_HP, 0x0700)
5759 },
5760 {
5761 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5762 PCI_VENDOR_ID_HP, 0x0701)
5763 },
5764 {
5765 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5766 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5767 },
5768 {
5769 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5770 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5771 },
5772 {
5773 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5774 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5775 },
5776 {
5777 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5778 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5779 },
5780 {
5781 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5782 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5783 },
5784 {
5785 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5786 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5787 },
5788 {
5789 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5790 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5791 },
5792 {
5793 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5794 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5795 },
5796 {
5797 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5798 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5799 },
5800 {
5801 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5802 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5803 },
5804 {
5805 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5806 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5807 },
5808 {
5809 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5810 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5811 },
5812 {
5813 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5814 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5815 },
5816 {
5817 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5818 PCI_VENDOR_ID_HP, 0x1001)
5819 },
5820 {
5821 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5822 PCI_VENDOR_ID_HP, 0x1100)
5823 },
5824 {
5825 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5826 PCI_VENDOR_ID_HP, 0x1101)
5827 },
5828 {
5829 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5830 PCI_VENDOR_ID_HP, 0x1102)
5831 },
5832 {
5833 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5834 PCI_VENDOR_ID_HP, 0x1150)
5835 },
5836 {
5837 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5838 PCI_ANY_ID, PCI_ANY_ID)
5839 },
5840 { 0 }
5841};
5842
5843MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5844
5845static struct pci_driver pqi_pci_driver = {
5846 .name = DRIVER_NAME_SHORT,
5847 .id_table = pqi_pci_id_table,
5848 .probe = pqi_pci_probe,
5849 .remove = pqi_pci_remove,
5850 .shutdown = pqi_shutdown,
5851};
5852
5853static int __init pqi_init(void)
5854{
5855 int rc;
5856
5857 pr_info(DRIVER_NAME "\n");
5858
5859 pqi_sas_transport_template =
5860 sas_attach_transport(&pqi_sas_transport_functions);
5861 if (!pqi_sas_transport_template)
5862 return -ENODEV;
5863
5864 rc = pci_register_driver(&pqi_pci_driver);
5865 if (rc)
5866 sas_release_transport(pqi_sas_transport_template);
5867
5868 return rc;
5869}
5870
5871static void __exit pqi_cleanup(void)
5872{
5873 pci_unregister_driver(&pqi_pci_driver);
5874 sas_release_transport(pqi_sas_transport_template);
5875}
5876
5877module_init(pqi_init);
5878module_exit(pqi_cleanup);
5879
5880static void __attribute__((unused)) verify_structures(void)
5881{
5882 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5883 sis_host_to_ctrl_doorbell) != 0x20);
5884 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5885 sis_interrupt_mask) != 0x34);
5886 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5887 sis_ctrl_to_host_doorbell) != 0x9c);
5888 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5889 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
5890 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5891 sis_driver_scratch) != 0xb0);
6c223761
KB
5892 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5893 sis_firmware_status) != 0xbc);
5894 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5895 sis_mailbox) != 0x1000);
5896 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5897 pqi_registers) != 0x4000);
5898
5899 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5900 iu_type) != 0x0);
5901 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5902 iu_length) != 0x2);
5903 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5904 response_queue_id) != 0x4);
5905 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5906 work_area) != 0x6);
5907 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5908
5909 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5910 status) != 0x0);
5911 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5912 service_response) != 0x1);
5913 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5914 data_present) != 0x2);
5915 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5916 reserved) != 0x3);
5917 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5918 residual_count) != 0x4);
5919 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5920 data_length) != 0x8);
5921 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5922 reserved1) != 0xa);
5923 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5924 data) != 0xc);
5925 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5926
5927 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5928 data_in_result) != 0x0);
5929 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5930 data_out_result) != 0x1);
5931 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5932 reserved) != 0x2);
5933 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5934 status) != 0x5);
5935 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5936 status_qualifier) != 0x6);
5937 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5938 sense_data_length) != 0x8);
5939 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5940 response_data_length) != 0xa);
5941 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5942 data_in_transferred) != 0xc);
5943 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5944 data_out_transferred) != 0x10);
5945 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5946 data) != 0x14);
5947 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5948
5949 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5950 signature) != 0x0);
5951 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5952 function_and_status_code) != 0x8);
5953 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5954 max_admin_iq_elements) != 0x10);
5955 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5956 max_admin_oq_elements) != 0x11);
5957 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5958 admin_iq_element_length) != 0x12);
5959 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5960 admin_oq_element_length) != 0x13);
5961 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5962 max_reset_timeout) != 0x14);
5963 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5964 legacy_intx_status) != 0x18);
5965 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5966 legacy_intx_mask_set) != 0x1c);
5967 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5968 legacy_intx_mask_clear) != 0x20);
5969 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5970 device_status) != 0x40);
5971 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5972 admin_iq_pi_offset) != 0x48);
5973 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5974 admin_oq_ci_offset) != 0x50);
5975 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5976 admin_iq_element_array_addr) != 0x58);
5977 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5978 admin_oq_element_array_addr) != 0x60);
5979 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5980 admin_iq_ci_addr) != 0x68);
5981 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5982 admin_oq_pi_addr) != 0x70);
5983 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5984 admin_iq_num_elements) != 0x78);
5985 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5986 admin_oq_num_elements) != 0x79);
5987 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5988 admin_queue_int_msg_num) != 0x7a);
5989 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5990 device_error) != 0x80);
5991 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5992 error_details) != 0x88);
5993 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5994 device_reset) != 0x90);
5995 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5996 power_action) != 0x94);
5997 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
5998
5999 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6000 header.iu_type) != 0);
6001 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6002 header.iu_length) != 2);
6003 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6004 header.work_area) != 6);
6005 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6006 request_id) != 8);
6007 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6008 function_code) != 10);
6009 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6010 data.report_device_capability.buffer_length) != 44);
6011 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6012 data.report_device_capability.sg_descriptor) != 48);
6013 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6014 data.create_operational_iq.queue_id) != 12);
6015 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6016 data.create_operational_iq.element_array_addr) != 16);
6017 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6018 data.create_operational_iq.ci_addr) != 24);
6019 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6020 data.create_operational_iq.num_elements) != 32);
6021 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6022 data.create_operational_iq.element_length) != 34);
6023 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6024 data.create_operational_iq.queue_protocol) != 36);
6025 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6026 data.create_operational_oq.queue_id) != 12);
6027 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6028 data.create_operational_oq.element_array_addr) != 16);
6029 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6030 data.create_operational_oq.pi_addr) != 24);
6031 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6032 data.create_operational_oq.num_elements) != 32);
6033 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6034 data.create_operational_oq.element_length) != 34);
6035 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6036 data.create_operational_oq.queue_protocol) != 36);
6037 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6038 data.create_operational_oq.int_msg_num) != 40);
6039 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6040 data.create_operational_oq.coalescing_count) != 42);
6041 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6042 data.create_operational_oq.min_coalescing_time) != 44);
6043 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6044 data.create_operational_oq.max_coalescing_time) != 48);
6045 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6046 data.delete_operational_queue.queue_id) != 12);
6047 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6048 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6049 data.create_operational_iq) != 64 - 11);
6050 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6051 data.create_operational_oq) != 64 - 11);
6052 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6053 data.delete_operational_queue) != 64 - 11);
6054
6055 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6056 header.iu_type) != 0);
6057 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6058 header.iu_length) != 2);
6059 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6060 header.work_area) != 6);
6061 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6062 request_id) != 8);
6063 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6064 function_code) != 10);
6065 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6066 status) != 11);
6067 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6068 data.create_operational_iq.status_descriptor) != 12);
6069 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6070 data.create_operational_iq.iq_pi_offset) != 16);
6071 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6072 data.create_operational_oq.status_descriptor) != 12);
6073 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6074 data.create_operational_oq.oq_ci_offset) != 16);
6075 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6076
6077 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6078 header.iu_type) != 0);
6079 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6080 header.iu_length) != 2);
6081 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6082 header.response_queue_id) != 4);
6083 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6084 header.work_area) != 6);
6085 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6086 request_id) != 8);
6087 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6088 nexus_id) != 10);
6089 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6090 buffer_length) != 12);
6091 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6092 lun_number) != 16);
6093 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6094 protocol_specific) != 24);
6095 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6096 error_index) != 27);
6097 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6098 cdb) != 32);
6099 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6100 sg_descriptors) != 64);
6101 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6102 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6103
6104 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6105 header.iu_type) != 0);
6106 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6107 header.iu_length) != 2);
6108 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6109 header.response_queue_id) != 4);
6110 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6111 header.work_area) != 6);
6112 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6113 request_id) != 8);
6114 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6115 nexus_id) != 12);
6116 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6117 buffer_length) != 16);
6118 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6119 data_encryption_key_index) != 22);
6120 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6121 encrypt_tweak_lower) != 24);
6122 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6123 encrypt_tweak_upper) != 28);
6124 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6125 cdb) != 32);
6126 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6127 error_index) != 48);
6128 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6129 num_sg_descriptors) != 50);
6130 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6131 cdb_length) != 51);
6132 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6133 lun_number) != 52);
6134 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6135 sg_descriptors) != 64);
6136 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6137 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6138
6139 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6140 header.iu_type) != 0);
6141 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6142 header.iu_length) != 2);
6143 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6144 request_id) != 8);
6145 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6146 error_index) != 10);
6147
6148 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6149 header.iu_type) != 0);
6150 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6151 header.iu_length) != 2);
6152 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6153 header.response_queue_id) != 4);
6154 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6155 request_id) != 8);
6156 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6157 data.report_event_configuration.buffer_length) != 12);
6158 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6159 data.report_event_configuration.sg_descriptors) != 16);
6160 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6161 data.set_event_configuration.global_event_oq_id) != 10);
6162 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6163 data.set_event_configuration.buffer_length) != 12);
6164 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6165 data.set_event_configuration.sg_descriptors) != 16);
6166
6167 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6168 max_inbound_iu_length) != 6);
6169 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6170 max_outbound_iu_length) != 14);
6171 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6172
6173 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6174 data_length) != 0);
6175 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6176 iq_arbitration_priority_support_bitmask) != 8);
6177 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6178 maximum_aw_a) != 9);
6179 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6180 maximum_aw_b) != 10);
6181 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6182 maximum_aw_c) != 11);
6183 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6184 max_inbound_queues) != 16);
6185 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6186 max_elements_per_iq) != 18);
6187 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6188 max_iq_element_length) != 24);
6189 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6190 min_iq_element_length) != 26);
6191 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6192 max_outbound_queues) != 30);
6193 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6194 max_elements_per_oq) != 32);
6195 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6196 intr_coalescing_time_granularity) != 34);
6197 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6198 max_oq_element_length) != 36);
6199 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6200 min_oq_element_length) != 38);
6201 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6202 iu_layer_descriptors) != 64);
6203 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6204
6205 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6206 event_type) != 0);
6207 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6208 oq_id) != 2);
6209 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6210
6211 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6212 num_event_descriptors) != 2);
6213 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6214 descriptors) != 4);
6215
6216 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6217 header.iu_type) != 0);
6218 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6219 header.iu_length) != 2);
6220 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6221 event_type) != 8);
6222 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6223 event_id) != 10);
6224 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6225 additional_event_id) != 12);
6226 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6227 data) != 16);
6228 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6229
6230 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6231 header.iu_type) != 0);
6232 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6233 header.iu_length) != 2);
6234 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6235 event_type) != 8);
6236 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6237 event_id) != 10);
6238 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6239 additional_event_id) != 12);
6240 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6241
6242 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6243 header.iu_type) != 0);
6244 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6245 header.iu_length) != 2);
6246 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6247 request_id) != 8);
6248 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6249 nexus_id) != 10);
6250 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6251 lun_number) != 16);
6252 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6253 protocol_specific) != 24);
6254 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6255 outbound_queue_id_to_manage) != 26);
6256 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6257 request_id_to_manage) != 28);
6258 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6259 task_management_function) != 30);
6260 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6261
6262 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6263 header.iu_type) != 0);
6264 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6265 header.iu_length) != 2);
6266 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6267 request_id) != 8);
6268 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6269 nexus_id) != 10);
6270 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6271 additional_response_info) != 12);
6272 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6273 response_code) != 15);
6274 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6275
6276 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6277 configured_logical_drive_count) != 0);
6278 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6279 configuration_signature) != 1);
6280 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6281 firmware_version) != 5);
6282 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6283 extended_logical_unit_count) != 154);
6284 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6285 firmware_build_number) != 190);
6286 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6287 controller_mode) != 292);
6288
6289 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6290 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6291 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6292 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6293 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6294 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6295 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6296 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6297 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6298 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6299 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6300 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6301
6302 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6303}