]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/smartpqi/smartpqi_init.c
block: introduce blk_rq_is_passthrough
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
6c223761
KB
1/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
52198226 28#include <linux/blk-mq-pci.h>
6c223761
KB
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport_sas.h>
34#include <asm/unaligned.h>
35#include "smartpqi.h"
36#include "smartpqi_sis.h"
37
38#if !defined(BUILD_TIMESTAMP)
39#define BUILD_TIMESTAMP
40#endif
41
699bed75 42#define DRIVER_VERSION "0.9.13-370"
6c223761
KB
43#define DRIVER_MAJOR 0
44#define DRIVER_MINOR 9
699bed75
KB
45#define DRIVER_RELEASE 13
46#define DRIVER_REVISION 370
6c223761
KB
47
48#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
49#define DRIVER_NAME_SHORT "smartpqi"
50
51MODULE_AUTHOR("Microsemi");
52MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
53 DRIVER_VERSION);
54MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
55MODULE_VERSION(DRIVER_VERSION);
56MODULE_LICENSE("GPL");
57
58#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
59
60static char *hpe_branded_controller = "HPE Smart Array Controller";
61static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
62
63static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
64static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
65static void pqi_scan_start(struct Scsi_Host *shost);
66static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
67 struct pqi_queue_group *queue_group, enum pqi_io_path path,
68 struct pqi_io_request *io_request);
69static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
70 struct pqi_iu_header *request, unsigned int flags,
71 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
72static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
73 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
74 unsigned int cdb_length, struct pqi_queue_group *queue_group,
75 struct pqi_encryption_info *encryption_info);
76
77/* for flags argument to pqi_submit_raid_request_synchronous() */
78#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
79
80static struct scsi_transport_template *pqi_sas_transport_template;
81
82static atomic_t pqi_controller_count = ATOMIC_INIT(0);
83
84static int pqi_disable_device_id_wildcards;
85module_param_named(disable_device_id_wildcards,
86 pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
87MODULE_PARM_DESC(disable_device_id_wildcards,
88 "Disable device ID wildcards.");
89
90static char *raid_levels[] = {
91 "RAID-0",
92 "RAID-4",
93 "RAID-1(1+0)",
94 "RAID-5",
95 "RAID-5+1",
96 "RAID-ADG",
97 "RAID-1(ADM)",
98};
99
100static char *pqi_raid_level_to_string(u8 raid_level)
101{
102 if (raid_level < ARRAY_SIZE(raid_levels))
103 return raid_levels[raid_level];
104
105 return "";
106}
107
108#define SA_RAID_0 0
109#define SA_RAID_4 1
110#define SA_RAID_1 2 /* also used for RAID 10 */
111#define SA_RAID_5 3 /* also used for RAID 50 */
112#define SA_RAID_51 4
113#define SA_RAID_6 5 /* also used for RAID 60 */
114#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
115#define SA_RAID_MAX SA_RAID_ADM
116#define SA_RAID_UNKNOWN 0xff
117
118static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
119{
120 scmd->scsi_done(scmd);
121}
122
123static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
124{
125 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
126}
127
128static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
129{
130 void *hostdata = shost_priv(shost);
131
132 return *((struct pqi_ctrl_info **)hostdata);
133}
134
135static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
136{
137 return !device->is_physical_device;
138}
139
140static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
141{
142 return !ctrl_info->controller_online;
143}
144
145static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
146{
147 if (ctrl_info->controller_online)
148 if (!sis_is_firmware_running(ctrl_info))
149 pqi_take_ctrl_offline(ctrl_info);
150}
151
152static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
153{
154 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
155}
156
ff6abb73
KB
157static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
158 struct pqi_ctrl_info *ctrl_info)
159{
160 return sis_read_driver_scratch(ctrl_info);
161}
162
163static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
164 enum pqi_ctrl_mode mode)
165{
166 sis_write_driver_scratch(ctrl_info, mode);
167}
168
6c223761
KB
169#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
170
171static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
172{
173 schedule_delayed_work(&ctrl_info->rescan_work,
174 PQI_RESCAN_WORK_INTERVAL);
175}
176
177static int pqi_map_single(struct pci_dev *pci_dev,
178 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
179 size_t buffer_length, int data_direction)
180{
181 dma_addr_t bus_address;
182
183 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
184 return 0;
185
186 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
187 data_direction);
188 if (pci_dma_mapping_error(pci_dev, bus_address))
189 return -ENOMEM;
190
191 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
192 put_unaligned_le32(buffer_length, &sg_descriptor->length);
193 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
194
195 return 0;
196}
197
198static void pqi_pci_unmap(struct pci_dev *pci_dev,
199 struct pqi_sg_descriptor *descriptors, int num_descriptors,
200 int data_direction)
201{
202 int i;
203
204 if (data_direction == PCI_DMA_NONE)
205 return;
206
207 for (i = 0; i < num_descriptors; i++)
208 pci_unmap_single(pci_dev,
209 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
210 get_unaligned_le32(&descriptors[i].length),
211 data_direction);
212}
213
214static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
215 struct pqi_raid_path_request *request, u8 cmd,
216 u8 *scsi3addr, void *buffer, size_t buffer_length,
217 u16 vpd_page, int *pci_direction)
218{
219 u8 *cdb;
220 int pci_dir;
221
222 memset(request, 0, sizeof(*request));
223
224 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
225 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
226 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
227 &request->header.iu_length);
228 put_unaligned_le32(buffer_length, &request->buffer_length);
229 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
230 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
231 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
232
233 cdb = request->cdb;
234
235 switch (cmd) {
236 case INQUIRY:
237 request->data_direction = SOP_READ_FLAG;
238 cdb[0] = INQUIRY;
239 if (vpd_page & VPD_PAGE) {
240 cdb[1] = 0x1;
241 cdb[2] = (u8)vpd_page;
242 }
243 cdb[4] = (u8)buffer_length;
244 break;
245 case CISS_REPORT_LOG:
246 case CISS_REPORT_PHYS:
247 request->data_direction = SOP_READ_FLAG;
248 cdb[0] = cmd;
249 if (cmd == CISS_REPORT_PHYS)
250 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
251 else
252 cdb[1] = CISS_REPORT_LOG_EXTENDED;
253 put_unaligned_be32(buffer_length, &cdb[6]);
254 break;
255 case CISS_GET_RAID_MAP:
256 request->data_direction = SOP_READ_FLAG;
257 cdb[0] = CISS_READ;
258 cdb[1] = CISS_GET_RAID_MAP;
259 put_unaligned_be32(buffer_length, &cdb[6]);
260 break;
261 case SA_CACHE_FLUSH:
262 request->data_direction = SOP_WRITE_FLAG;
263 cdb[0] = BMIC_WRITE;
264 cdb[6] = BMIC_CACHE_FLUSH;
265 put_unaligned_be16(buffer_length, &cdb[7]);
266 break;
267 case BMIC_IDENTIFY_CONTROLLER:
268 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
269 request->data_direction = SOP_READ_FLAG;
270 cdb[0] = BMIC_READ;
271 cdb[6] = cmd;
272 put_unaligned_be16(buffer_length, &cdb[7]);
273 break;
274 case BMIC_WRITE_HOST_WELLNESS:
275 request->data_direction = SOP_WRITE_FLAG;
276 cdb[0] = BMIC_WRITE;
277 cdb[6] = cmd;
278 put_unaligned_be16(buffer_length, &cdb[7]);
279 break;
280 default:
281 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
282 cmd);
283 WARN_ON(cmd);
284 break;
285 }
286
287 switch (request->data_direction) {
288 case SOP_READ_FLAG:
289 pci_dir = PCI_DMA_FROMDEVICE;
290 break;
291 case SOP_WRITE_FLAG:
292 pci_dir = PCI_DMA_TODEVICE;
293 break;
294 case SOP_NO_DIRECTION_FLAG:
295 pci_dir = PCI_DMA_NONE;
296 break;
297 default:
298 pci_dir = PCI_DMA_BIDIRECTIONAL;
299 break;
300 }
301
302 *pci_direction = pci_dir;
303
304 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
305 buffer, buffer_length, pci_dir);
306}
307
308static struct pqi_io_request *pqi_alloc_io_request(
309 struct pqi_ctrl_info *ctrl_info)
310{
311 struct pqi_io_request *io_request;
312 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
313
314 while (1) {
315 io_request = &ctrl_info->io_request_pool[i];
316 if (atomic_inc_return(&io_request->refcount) == 1)
317 break;
318 atomic_dec(&io_request->refcount);
319 i = (i + 1) % ctrl_info->max_io_slots;
320 }
321
322 /* benignly racy */
323 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
324
325 io_request->scmd = NULL;
326 io_request->status = 0;
327 io_request->error_info = NULL;
328
329 return io_request;
330}
331
332static void pqi_free_io_request(struct pqi_io_request *io_request)
333{
334 atomic_dec(&io_request->refcount);
335}
336
337static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
338 struct bmic_identify_controller *buffer)
339{
340 int rc;
341 int pci_direction;
342 struct pqi_raid_path_request request;
343
344 rc = pqi_build_raid_path_request(ctrl_info, &request,
345 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
346 sizeof(*buffer), 0, &pci_direction);
347 if (rc)
348 return rc;
349
350 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
351 NULL, NO_TIMEOUT);
352
353 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
354 pci_direction);
355
356 return rc;
357}
358
359static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
360 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
361{
362 int rc;
363 int pci_direction;
364 struct pqi_raid_path_request request;
365
366 rc = pqi_build_raid_path_request(ctrl_info, &request,
367 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
368 &pci_direction);
369 if (rc)
370 return rc;
371
372 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
373 NULL, NO_TIMEOUT);
374
375 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
376 pci_direction);
377
378 return rc;
379}
380
381static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
382 struct pqi_scsi_dev *device,
383 struct bmic_identify_physical_device *buffer,
384 size_t buffer_length)
385{
386 int rc;
387 int pci_direction;
388 u16 bmic_device_index;
389 struct pqi_raid_path_request request;
390
391 rc = pqi_build_raid_path_request(ctrl_info, &request,
392 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
393 buffer_length, 0, &pci_direction);
394 if (rc)
395 return rc;
396
397 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
398 request.cdb[2] = (u8)bmic_device_index;
399 request.cdb[9] = (u8)(bmic_device_index >> 8);
400
401 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
402 0, NULL, NO_TIMEOUT);
403
404 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
405 pci_direction);
406
407 return rc;
408}
409
410#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
6c223761
KB
411
412static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
413{
414 int rc;
415 struct pqi_raid_path_request request;
416 int pci_direction;
417 u8 *buffer;
418
419 /*
420 * Don't bother trying to flush the cache if the controller is
421 * locked up.
422 */
423 if (pqi_ctrl_offline(ctrl_info))
424 return -ENXIO;
425
426 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
427 if (!buffer)
428 return -ENOMEM;
429
430 rc = pqi_build_raid_path_request(ctrl_info, &request,
431 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
432 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
433 if (rc)
434 goto out;
435
436 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
d48f8fad 437 0, NULL, NO_TIMEOUT);
6c223761
KB
438
439 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
440 pci_direction);
441
442out:
443 kfree(buffer);
444
445 return rc;
446}
447
448static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
449 void *buffer, size_t buffer_length)
450{
451 int rc;
452 struct pqi_raid_path_request request;
453 int pci_direction;
454
455 rc = pqi_build_raid_path_request(ctrl_info, &request,
456 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
457 buffer_length, 0, &pci_direction);
458 if (rc)
459 return rc;
460
461 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
462 0, NULL, NO_TIMEOUT);
463
464 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
465 pci_direction);
466
467 return rc;
468}
469
470#pragma pack(1)
471
472struct bmic_host_wellness_driver_version {
473 u8 start_tag[4];
474 u8 driver_version_tag[2];
475 __le16 driver_version_length;
476 char driver_version[32];
477 u8 end_tag[2];
478};
479
480#pragma pack()
481
482static int pqi_write_driver_version_to_host_wellness(
483 struct pqi_ctrl_info *ctrl_info)
484{
485 int rc;
486 struct bmic_host_wellness_driver_version *buffer;
487 size_t buffer_length;
488
489 buffer_length = sizeof(*buffer);
490
491 buffer = kmalloc(buffer_length, GFP_KERNEL);
492 if (!buffer)
493 return -ENOMEM;
494
495 buffer->start_tag[0] = '<';
496 buffer->start_tag[1] = 'H';
497 buffer->start_tag[2] = 'W';
498 buffer->start_tag[3] = '>';
499 buffer->driver_version_tag[0] = 'D';
500 buffer->driver_version_tag[1] = 'V';
501 put_unaligned_le16(sizeof(buffer->driver_version),
502 &buffer->driver_version_length);
503 strncpy(buffer->driver_version, DRIVER_VERSION,
504 sizeof(buffer->driver_version) - 1);
505 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
506 buffer->end_tag[0] = 'Z';
507 buffer->end_tag[1] = 'Z';
508
509 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
510
511 kfree(buffer);
512
513 return rc;
514}
515
516#pragma pack(1)
517
518struct bmic_host_wellness_time {
519 u8 start_tag[4];
520 u8 time_tag[2];
521 __le16 time_length;
522 u8 time[8];
523 u8 dont_write_tag[2];
524 u8 end_tag[2];
525};
526
527#pragma pack()
528
529static int pqi_write_current_time_to_host_wellness(
530 struct pqi_ctrl_info *ctrl_info)
531{
532 int rc;
533 struct bmic_host_wellness_time *buffer;
534 size_t buffer_length;
535 time64_t local_time;
536 unsigned int year;
537 struct timeval time;
538 struct rtc_time tm;
539
540 buffer_length = sizeof(*buffer);
541
542 buffer = kmalloc(buffer_length, GFP_KERNEL);
543 if (!buffer)
544 return -ENOMEM;
545
546 buffer->start_tag[0] = '<';
547 buffer->start_tag[1] = 'H';
548 buffer->start_tag[2] = 'W';
549 buffer->start_tag[3] = '>';
550 buffer->time_tag[0] = 'T';
551 buffer->time_tag[1] = 'D';
552 put_unaligned_le16(sizeof(buffer->time),
553 &buffer->time_length);
554
555 do_gettimeofday(&time);
556 local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
557 rtc_time64_to_tm(local_time, &tm);
558 year = tm.tm_year + 1900;
559
560 buffer->time[0] = bin2bcd(tm.tm_hour);
561 buffer->time[1] = bin2bcd(tm.tm_min);
562 buffer->time[2] = bin2bcd(tm.tm_sec);
563 buffer->time[3] = 0;
564 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
565 buffer->time[5] = bin2bcd(tm.tm_mday);
566 buffer->time[6] = bin2bcd(year / 100);
567 buffer->time[7] = bin2bcd(year % 100);
568
569 buffer->dont_write_tag[0] = 'D';
570 buffer->dont_write_tag[1] = 'W';
571 buffer->end_tag[0] = 'Z';
572 buffer->end_tag[1] = 'Z';
573
574 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
575
576 kfree(buffer);
577
578 return rc;
579}
580
581#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
582
583static void pqi_update_time_worker(struct work_struct *work)
584{
585 int rc;
586 struct pqi_ctrl_info *ctrl_info;
587
588 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
589 update_time_work);
590
6c223761
KB
591 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
592 if (rc)
593 dev_warn(&ctrl_info->pci_dev->dev,
594 "error updating time on controller\n");
595
596 schedule_delayed_work(&ctrl_info->update_time_work,
597 PQI_UPDATE_TIME_WORK_INTERVAL);
598}
599
600static inline void pqi_schedule_update_time_worker(
4fbebf1a 601 struct pqi_ctrl_info *ctrl_info)
6c223761 602{
4fbebf1a 603 schedule_delayed_work(&ctrl_info->update_time_work, 0);
6c223761
KB
604}
605
606static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
607 void *buffer, size_t buffer_length)
608{
609 int rc;
610 int pci_direction;
611 struct pqi_raid_path_request request;
612
613 rc = pqi_build_raid_path_request(ctrl_info, &request,
614 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
615 if (rc)
616 return rc;
617
618 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
619 NULL, NO_TIMEOUT);
620
621 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
622 pci_direction);
623
624 return rc;
625}
626
627static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
628 void **buffer)
629{
630 int rc;
631 size_t lun_list_length;
632 size_t lun_data_length;
633 size_t new_lun_list_length;
634 void *lun_data = NULL;
635 struct report_lun_header *report_lun_header;
636
637 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
638 if (!report_lun_header) {
639 rc = -ENOMEM;
640 goto out;
641 }
642
643 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
644 sizeof(*report_lun_header));
645 if (rc)
646 goto out;
647
648 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
649
650again:
651 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
652
653 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
654 if (!lun_data) {
655 rc = -ENOMEM;
656 goto out;
657 }
658
659 if (lun_list_length == 0) {
660 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
661 goto out;
662 }
663
664 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
665 if (rc)
666 goto out;
667
668 new_lun_list_length = get_unaligned_be32(
669 &((struct report_lun_header *)lun_data)->list_length);
670
671 if (new_lun_list_length > lun_list_length) {
672 lun_list_length = new_lun_list_length;
673 kfree(lun_data);
674 goto again;
675 }
676
677out:
678 kfree(report_lun_header);
679
680 if (rc) {
681 kfree(lun_data);
682 lun_data = NULL;
683 }
684
685 *buffer = lun_data;
686
687 return rc;
688}
689
690static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
691 void **buffer)
692{
693 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
694 buffer);
695}
696
697static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
698 void **buffer)
699{
700 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
701}
702
703static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
704 struct report_phys_lun_extended **physdev_list,
705 struct report_log_lun_extended **logdev_list)
706{
707 int rc;
708 size_t logdev_list_length;
709 size_t logdev_data_length;
710 struct report_log_lun_extended *internal_logdev_list;
711 struct report_log_lun_extended *logdev_data;
712 struct report_lun_header report_lun_header;
713
714 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
715 if (rc)
716 dev_err(&ctrl_info->pci_dev->dev,
717 "report physical LUNs failed\n");
718
719 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
720 if (rc)
721 dev_err(&ctrl_info->pci_dev->dev,
722 "report logical LUNs failed\n");
723
724 /*
725 * Tack the controller itself onto the end of the logical device list.
726 */
727
728 logdev_data = *logdev_list;
729
730 if (logdev_data) {
731 logdev_list_length =
732 get_unaligned_be32(&logdev_data->header.list_length);
733 } else {
734 memset(&report_lun_header, 0, sizeof(report_lun_header));
735 logdev_data =
736 (struct report_log_lun_extended *)&report_lun_header;
737 logdev_list_length = 0;
738 }
739
740 logdev_data_length = sizeof(struct report_lun_header) +
741 logdev_list_length;
742
743 internal_logdev_list = kmalloc(logdev_data_length +
744 sizeof(struct report_log_lun_extended), GFP_KERNEL);
745 if (!internal_logdev_list) {
746 kfree(*logdev_list);
747 *logdev_list = NULL;
748 return -ENOMEM;
749 }
750
751 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
752 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
753 sizeof(struct report_log_lun_extended_entry));
754 put_unaligned_be32(logdev_list_length +
755 sizeof(struct report_log_lun_extended_entry),
756 &internal_logdev_list->header.list_length);
757
758 kfree(*logdev_list);
759 *logdev_list = internal_logdev_list;
760
761 return 0;
762}
763
764static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
765 int bus, int target, int lun)
766{
767 device->bus = bus;
768 device->target = target;
769 device->lun = lun;
770}
771
772static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
773{
774 u8 *scsi3addr;
775 u32 lunid;
776
777 scsi3addr = device->scsi3addr;
778 lunid = get_unaligned_le32(scsi3addr);
779
780 if (pqi_is_hba_lunid(scsi3addr)) {
781 /* The specified device is the controller. */
782 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
783 device->target_lun_valid = true;
784 return;
785 }
786
787 if (pqi_is_logical_device(device)) {
788 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
789 lunid & 0x3fff);
790 device->target_lun_valid = true;
791 return;
792 }
793
794 /*
795 * Defer target and LUN assignment for non-controller physical devices
796 * because the SAS transport layer will make these assignments later.
797 */
798 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
799}
800
801static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
802 struct pqi_scsi_dev *device)
803{
804 int rc;
805 u8 raid_level;
806 u8 *buffer;
807
808 raid_level = SA_RAID_UNKNOWN;
809
810 buffer = kmalloc(64, GFP_KERNEL);
811 if (buffer) {
812 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
813 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
814 if (rc == 0) {
815 raid_level = buffer[8];
816 if (raid_level > SA_RAID_MAX)
817 raid_level = SA_RAID_UNKNOWN;
818 }
819 kfree(buffer);
820 }
821
822 device->raid_level = raid_level;
823}
824
825static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
826 struct pqi_scsi_dev *device, struct raid_map *raid_map)
827{
828 char *err_msg;
829 u32 raid_map_size;
830 u32 r5or6_blocks_per_row;
831 unsigned int num_phys_disks;
832 unsigned int num_raid_map_entries;
833
834 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
835
836 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
837 err_msg = "RAID map too small";
838 goto bad_raid_map;
839 }
840
841 if (raid_map_size > sizeof(*raid_map)) {
842 err_msg = "RAID map too large";
843 goto bad_raid_map;
844 }
845
846 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
847 (get_unaligned_le16(&raid_map->data_disks_per_row) +
848 get_unaligned_le16(&raid_map->metadata_disks_per_row));
849 num_raid_map_entries = num_phys_disks *
850 get_unaligned_le16(&raid_map->row_cnt);
851
852 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
853 err_msg = "invalid number of map entries in RAID map";
854 goto bad_raid_map;
855 }
856
857 if (device->raid_level == SA_RAID_1) {
858 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
859 err_msg = "invalid RAID-1 map";
860 goto bad_raid_map;
861 }
862 } else if (device->raid_level == SA_RAID_ADM) {
863 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
864 err_msg = "invalid RAID-1(ADM) map";
865 goto bad_raid_map;
866 }
867 } else if ((device->raid_level == SA_RAID_5 ||
868 device->raid_level == SA_RAID_6) &&
869 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
870 /* RAID 50/60 */
871 r5or6_blocks_per_row =
872 get_unaligned_le16(&raid_map->strip_size) *
873 get_unaligned_le16(&raid_map->data_disks_per_row);
874 if (r5or6_blocks_per_row == 0) {
875 err_msg = "invalid RAID-5 or RAID-6 map";
876 goto bad_raid_map;
877 }
878 }
879
880 return 0;
881
882bad_raid_map:
883 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
884
885 return -EINVAL;
886}
887
888static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
889 struct pqi_scsi_dev *device)
890{
891 int rc;
892 int pci_direction;
893 struct pqi_raid_path_request request;
894 struct raid_map *raid_map;
895
896 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
897 if (!raid_map)
898 return -ENOMEM;
899
900 rc = pqi_build_raid_path_request(ctrl_info, &request,
901 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
902 sizeof(*raid_map), 0, &pci_direction);
903 if (rc)
904 goto error;
905
906 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
907 NULL, NO_TIMEOUT);
908
909 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
910 pci_direction);
911
912 if (rc)
913 goto error;
914
915 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
916 if (rc)
917 goto error;
918
919 device->raid_map = raid_map;
920
921 return 0;
922
923error:
924 kfree(raid_map);
925
926 return rc;
927}
928
929static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
930 struct pqi_scsi_dev *device)
931{
932 int rc;
933 u8 *buffer;
934 u8 offload_status;
935
936 buffer = kmalloc(64, GFP_KERNEL);
937 if (!buffer)
938 return;
939
940 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
941 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
942 if (rc)
943 goto out;
944
945#define OFFLOAD_STATUS_BYTE 4
946#define OFFLOAD_CONFIGURED_BIT 0x1
947#define OFFLOAD_ENABLED_BIT 0x2
948
949 offload_status = buffer[OFFLOAD_STATUS_BYTE];
950 device->offload_configured =
951 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
952 if (device->offload_configured) {
953 device->offload_enabled_pending =
954 !!(offload_status & OFFLOAD_ENABLED_BIT);
955 if (pqi_get_raid_map(ctrl_info, device))
956 device->offload_enabled_pending = false;
957 }
958
959out:
960 kfree(buffer);
961}
962
963/*
964 * Use vendor-specific VPD to determine online/offline status of a volume.
965 */
966
967static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
968 struct pqi_scsi_dev *device)
969{
970 int rc;
971 size_t page_length;
972 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
973 bool volume_offline = true;
974 u32 volume_flags;
975 struct ciss_vpd_logical_volume_status *vpd;
976
977 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
978 if (!vpd)
979 goto no_buffer;
980
981 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
982 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
983 if (rc)
984 goto out;
985
986 page_length = offsetof(struct ciss_vpd_logical_volume_status,
987 volume_status) + vpd->page_length;
988 if (page_length < sizeof(*vpd))
989 goto out;
990
991 volume_status = vpd->volume_status;
992 volume_flags = get_unaligned_be32(&vpd->flags);
993 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
994
995out:
996 kfree(vpd);
997no_buffer:
998 device->volume_status = volume_status;
999 device->volume_offline = volume_offline;
1000}
1001
1002static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1003 struct pqi_scsi_dev *device)
1004{
1005 int rc;
1006 u8 *buffer;
1007
1008 buffer = kmalloc(64, GFP_KERNEL);
1009 if (!buffer)
1010 return -ENOMEM;
1011
1012 /* Send an inquiry to the device to see what it is. */
1013 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1014 if (rc)
1015 goto out;
1016
1017 scsi_sanitize_inquiry_string(&buffer[8], 8);
1018 scsi_sanitize_inquiry_string(&buffer[16], 16);
1019
1020 device->devtype = buffer[0] & 0x1f;
1021 memcpy(device->vendor, &buffer[8],
1022 sizeof(device->vendor));
1023 memcpy(device->model, &buffer[16],
1024 sizeof(device->model));
1025
1026 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1027 pqi_get_raid_level(ctrl_info, device);
1028 pqi_get_offload_status(ctrl_info, device);
1029 pqi_get_volume_status(ctrl_info, device);
1030 }
1031
1032out:
1033 kfree(buffer);
1034
1035 return rc;
1036}
1037
1038static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1039 struct pqi_scsi_dev *device,
1040 struct bmic_identify_physical_device *id_phys)
1041{
1042 int rc;
1043
1044 memset(id_phys, 0, sizeof(*id_phys));
1045
1046 rc = pqi_identify_physical_device(ctrl_info, device,
1047 id_phys, sizeof(*id_phys));
1048 if (rc) {
1049 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1050 return;
1051 }
1052
1053 device->queue_depth =
1054 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1055 device->device_type = id_phys->device_type;
1056 device->active_path_index = id_phys->active_path_number;
1057 device->path_map = id_phys->redundant_path_present_map;
1058 memcpy(&device->box,
1059 &id_phys->alternate_paths_phys_box_on_port,
1060 sizeof(device->box));
1061 memcpy(&device->phys_connector,
1062 &id_phys->alternate_paths_phys_connector,
1063 sizeof(device->phys_connector));
1064 device->bay = id_phys->phys_bay_in_box;
1065}
1066
1067static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1068 struct pqi_scsi_dev *device)
1069{
1070 char *status;
1071 static const char unknown_state_str[] =
1072 "Volume is in an unknown state (%u)";
1073 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1074
1075 switch (device->volume_status) {
1076 case CISS_LV_OK:
1077 status = "Volume online";
1078 break;
1079 case CISS_LV_FAILED:
1080 status = "Volume failed";
1081 break;
1082 case CISS_LV_NOT_CONFIGURED:
1083 status = "Volume not configured";
1084 break;
1085 case CISS_LV_DEGRADED:
1086 status = "Volume degraded";
1087 break;
1088 case CISS_LV_READY_FOR_RECOVERY:
1089 status = "Volume ready for recovery operation";
1090 break;
1091 case CISS_LV_UNDERGOING_RECOVERY:
1092 status = "Volume undergoing recovery";
1093 break;
1094 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1095 status = "Wrong physical drive was replaced";
1096 break;
1097 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1098 status = "A physical drive not properly connected";
1099 break;
1100 case CISS_LV_HARDWARE_OVERHEATING:
1101 status = "Hardware is overheating";
1102 break;
1103 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1104 status = "Hardware has overheated";
1105 break;
1106 case CISS_LV_UNDERGOING_EXPANSION:
1107 status = "Volume undergoing expansion";
1108 break;
1109 case CISS_LV_NOT_AVAILABLE:
1110 status = "Volume waiting for transforming volume";
1111 break;
1112 case CISS_LV_QUEUED_FOR_EXPANSION:
1113 status = "Volume queued for expansion";
1114 break;
1115 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1116 status = "Volume disabled due to SCSI ID conflict";
1117 break;
1118 case CISS_LV_EJECTED:
1119 status = "Volume has been ejected";
1120 break;
1121 case CISS_LV_UNDERGOING_ERASE:
1122 status = "Volume undergoing background erase";
1123 break;
1124 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1125 status = "Volume ready for predictive spare rebuild";
1126 break;
1127 case CISS_LV_UNDERGOING_RPI:
1128 status = "Volume undergoing rapid parity initialization";
1129 break;
1130 case CISS_LV_PENDING_RPI:
1131 status = "Volume queued for rapid parity initialization";
1132 break;
1133 case CISS_LV_ENCRYPTED_NO_KEY:
1134 status = "Encrypted volume inaccessible - key not present";
1135 break;
1136 case CISS_LV_UNDERGOING_ENCRYPTION:
1137 status = "Volume undergoing encryption process";
1138 break;
1139 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1140 status = "Volume undergoing encryption re-keying process";
1141 break;
1142 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1143 status =
1144 "Encrypted volume inaccessible - disabled on ctrl";
1145 break;
1146 case CISS_LV_PENDING_ENCRYPTION:
1147 status = "Volume pending migration to encrypted state";
1148 break;
1149 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1150 status = "Volume pending encryption rekeying";
1151 break;
1152 case CISS_LV_NOT_SUPPORTED:
1153 status = "Volume not supported on this controller";
1154 break;
1155 case CISS_LV_STATUS_UNAVAILABLE:
1156 status = "Volume status not available";
1157 break;
1158 default:
1159 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1160 unknown_state_str, device->volume_status);
1161 status = unknown_state_buffer;
1162 break;
1163 }
1164
1165 dev_info(&ctrl_info->pci_dev->dev,
1166 "scsi %d:%d:%d:%d %s\n",
1167 ctrl_info->scsi_host->host_no,
1168 device->bus, device->target, device->lun, status);
1169}
1170
1171static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1172 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1173{
1174 struct pqi_scsi_dev *device;
1175
1176 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1177 scsi_device_list_entry) {
1178 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1179 continue;
1180 if (pqi_is_logical_device(device))
1181 continue;
1182 if (device->aio_handle == aio_handle)
1183 return device;
1184 }
1185
1186 return NULL;
1187}
1188
1189static void pqi_update_logical_drive_queue_depth(
1190 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1191{
1192 unsigned int i;
1193 struct raid_map *raid_map;
1194 struct raid_map_disk_data *disk_data;
1195 struct pqi_scsi_dev *phys_disk;
1196 unsigned int num_phys_disks;
1197 unsigned int num_raid_map_entries;
1198 unsigned int queue_depth;
1199
1200 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1201
1202 raid_map = logical_drive->raid_map;
1203 if (!raid_map)
1204 return;
1205
1206 disk_data = raid_map->disk_data;
1207 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1208 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1209 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1210 num_raid_map_entries = num_phys_disks *
1211 get_unaligned_le16(&raid_map->row_cnt);
1212
1213 queue_depth = 0;
1214 for (i = 0; i < num_raid_map_entries; i++) {
1215 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1216 disk_data[i].aio_handle);
1217
1218 if (!phys_disk) {
1219 dev_warn(&ctrl_info->pci_dev->dev,
1220 "failed to find physical disk for logical drive %016llx\n",
1221 get_unaligned_be64(logical_drive->scsi3addr));
1222 logical_drive->offload_enabled = false;
1223 logical_drive->offload_enabled_pending = false;
1224 kfree(raid_map);
1225 logical_drive->raid_map = NULL;
1226 return;
1227 }
1228
1229 queue_depth += phys_disk->queue_depth;
1230 }
1231
1232 logical_drive->queue_depth = queue_depth;
1233}
1234
1235static void pqi_update_all_logical_drive_queue_depths(
1236 struct pqi_ctrl_info *ctrl_info)
1237{
1238 struct pqi_scsi_dev *device;
1239
1240 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1241 scsi_device_list_entry) {
1242 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1243 continue;
1244 if (!pqi_is_logical_device(device))
1245 continue;
1246 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1247 }
1248}
1249
1250static void pqi_rescan_worker(struct work_struct *work)
1251{
1252 struct pqi_ctrl_info *ctrl_info;
1253
1254 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1255 rescan_work);
1256
1257 pqi_scan_scsi_devices(ctrl_info);
1258}
1259
1260static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1261 struct pqi_scsi_dev *device)
1262{
1263 int rc;
1264
1265 if (pqi_is_logical_device(device))
1266 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1267 device->target, device->lun);
1268 else
1269 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1270
1271 return rc;
1272}
1273
1274static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1275 struct pqi_scsi_dev *device)
1276{
1277 if (pqi_is_logical_device(device))
1278 scsi_remove_device(device->sdev);
1279 else
1280 pqi_remove_sas_device(device);
1281}
1282
1283/* Assumes the SCSI device list lock is held. */
1284
1285static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1286 int bus, int target, int lun)
1287{
1288 struct pqi_scsi_dev *device;
1289
1290 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1291 scsi_device_list_entry)
1292 if (device->bus == bus && device->target == target &&
1293 device->lun == lun)
1294 return device;
1295
1296 return NULL;
1297}
1298
1299static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1300 struct pqi_scsi_dev *dev2)
1301{
1302 if (dev1->is_physical_device != dev2->is_physical_device)
1303 return false;
1304
1305 if (dev1->is_physical_device)
1306 return dev1->wwid == dev2->wwid;
1307
1308 return memcmp(dev1->volume_id, dev2->volume_id,
1309 sizeof(dev1->volume_id)) == 0;
1310}
1311
1312enum pqi_find_result {
1313 DEVICE_NOT_FOUND,
1314 DEVICE_CHANGED,
1315 DEVICE_SAME,
1316};
1317
1318static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1319 struct pqi_scsi_dev *device_to_find,
1320 struct pqi_scsi_dev **matching_device)
1321{
1322 struct pqi_scsi_dev *device;
1323
1324 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1325 scsi_device_list_entry) {
1326 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1327 device->scsi3addr)) {
1328 *matching_device = device;
1329 if (pqi_device_equal(device_to_find, device)) {
1330 if (device_to_find->volume_offline)
1331 return DEVICE_CHANGED;
1332 return DEVICE_SAME;
1333 }
1334 return DEVICE_CHANGED;
1335 }
1336 }
1337
1338 return DEVICE_NOT_FOUND;
1339}
1340
1341static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1342 char *action, struct pqi_scsi_dev *device)
1343{
1344 dev_info(&ctrl_info->pci_dev->dev,
1345 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1346 action,
1347 ctrl_info->scsi_host->host_no,
1348 device->bus,
1349 device->target,
1350 device->lun,
1351 scsi_device_type(device->devtype),
1352 device->vendor,
1353 device->model,
1354 pqi_raid_level_to_string(device->raid_level),
1355 device->offload_configured ? '+' : '-',
1356 device->offload_enabled_pending ? '+' : '-',
1357 device->expose_device ? '+' : '-',
1358 device->queue_depth);
1359}
1360
1361/* Assumes the SCSI device list lock is held. */
1362
1363static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1364 struct pqi_scsi_dev *new_device)
1365{
1366 existing_device->devtype = new_device->devtype;
1367 existing_device->device_type = new_device->device_type;
1368 existing_device->bus = new_device->bus;
1369 if (new_device->target_lun_valid) {
1370 existing_device->target = new_device->target;
1371 existing_device->lun = new_device->lun;
1372 existing_device->target_lun_valid = true;
1373 }
1374
1375 /* By definition, the scsi3addr and wwid fields are already the same. */
1376
1377 existing_device->is_physical_device = new_device->is_physical_device;
1378 existing_device->expose_device = new_device->expose_device;
1379 existing_device->no_uld_attach = new_device->no_uld_attach;
1380 existing_device->aio_enabled = new_device->aio_enabled;
1381 memcpy(existing_device->vendor, new_device->vendor,
1382 sizeof(existing_device->vendor));
1383 memcpy(existing_device->model, new_device->model,
1384 sizeof(existing_device->model));
1385 existing_device->sas_address = new_device->sas_address;
1386 existing_device->raid_level = new_device->raid_level;
1387 existing_device->queue_depth = new_device->queue_depth;
1388 existing_device->aio_handle = new_device->aio_handle;
1389 existing_device->volume_status = new_device->volume_status;
1390 existing_device->active_path_index = new_device->active_path_index;
1391 existing_device->path_map = new_device->path_map;
1392 existing_device->bay = new_device->bay;
1393 memcpy(existing_device->box, new_device->box,
1394 sizeof(existing_device->box));
1395 memcpy(existing_device->phys_connector, new_device->phys_connector,
1396 sizeof(existing_device->phys_connector));
1397 existing_device->offload_configured = new_device->offload_configured;
1398 existing_device->offload_enabled = false;
1399 existing_device->offload_enabled_pending =
1400 new_device->offload_enabled_pending;
1401 existing_device->offload_to_mirror = 0;
1402 kfree(existing_device->raid_map);
1403 existing_device->raid_map = new_device->raid_map;
1404
1405 /* To prevent this from being freed later. */
1406 new_device->raid_map = NULL;
1407}
1408
1409static inline void pqi_free_device(struct pqi_scsi_dev *device)
1410{
1411 if (device) {
1412 kfree(device->raid_map);
1413 kfree(device);
1414 }
1415}
1416
1417/*
1418 * Called when exposing a new device to the OS fails in order to re-adjust
1419 * our internal SCSI device list to match the SCSI ML's view.
1420 */
1421
1422static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1423 struct pqi_scsi_dev *device)
1424{
1425 unsigned long flags;
1426
1427 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1428 list_del(&device->scsi_device_list_entry);
1429 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1430
1431 /* Allow the device structure to be freed later. */
1432 device->keep_device = false;
1433}
1434
1435static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1436 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1437{
1438 int rc;
1439 unsigned int i;
1440 unsigned long flags;
1441 enum pqi_find_result find_result;
1442 struct pqi_scsi_dev *device;
1443 struct pqi_scsi_dev *next;
1444 struct pqi_scsi_dev *matching_device;
1445 struct list_head add_list;
1446 struct list_head delete_list;
1447
1448 INIT_LIST_HEAD(&add_list);
1449 INIT_LIST_HEAD(&delete_list);
1450
1451 /*
1452 * The idea here is to do as little work as possible while holding the
1453 * spinlock. That's why we go to great pains to defer anything other
1454 * than updating the internal device list until after we release the
1455 * spinlock.
1456 */
1457
1458 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1459
1460 /* Assume that all devices in the existing list have gone away. */
1461 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1462 scsi_device_list_entry)
1463 device->device_gone = true;
1464
1465 for (i = 0; i < num_new_devices; i++) {
1466 device = new_device_list[i];
1467
1468 find_result = pqi_scsi_find_entry(ctrl_info, device,
1469 &matching_device);
1470
1471 switch (find_result) {
1472 case DEVICE_SAME:
1473 /*
1474 * The newly found device is already in the existing
1475 * device list.
1476 */
1477 device->new_device = false;
1478 matching_device->device_gone = false;
1479 pqi_scsi_update_device(matching_device, device);
1480 break;
1481 case DEVICE_NOT_FOUND:
1482 /*
1483 * The newly found device is NOT in the existing device
1484 * list.
1485 */
1486 device->new_device = true;
1487 break;
1488 case DEVICE_CHANGED:
1489 /*
1490 * The original device has gone away and we need to add
1491 * the new device.
1492 */
1493 device->new_device = true;
1494 break;
1495 default:
1496 WARN_ON(find_result);
1497 break;
1498 }
1499 }
1500
1501 /* Process all devices that have gone away. */
1502 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1503 scsi_device_list_entry) {
1504 if (device->device_gone) {
1505 list_del(&device->scsi_device_list_entry);
1506 list_add_tail(&device->delete_list_entry, &delete_list);
1507 }
1508 }
1509
1510 /* Process all new devices. */
1511 for (i = 0; i < num_new_devices; i++) {
1512 device = new_device_list[i];
1513 if (!device->new_device)
1514 continue;
1515 if (device->volume_offline)
1516 continue;
1517 list_add_tail(&device->scsi_device_list_entry,
1518 &ctrl_info->scsi_device_list);
1519 list_add_tail(&device->add_list_entry, &add_list);
1520 /* To prevent this device structure from being freed later. */
1521 device->keep_device = true;
1522 }
1523
1524 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1525
1526 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1527 scsi_device_list_entry)
1528 device->offload_enabled =
1529 device->offload_enabled_pending;
1530
1531 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1532
1533 /* Remove all devices that have gone away. */
1534 list_for_each_entry_safe(device, next, &delete_list,
1535 delete_list_entry) {
1536 if (device->sdev)
1537 pqi_remove_device(ctrl_info, device);
1538 if (device->volume_offline) {
1539 pqi_dev_info(ctrl_info, "offline", device);
1540 pqi_show_volume_status(ctrl_info, device);
1541 } else {
1542 pqi_dev_info(ctrl_info, "removed", device);
1543 }
1544 list_del(&device->delete_list_entry);
1545 pqi_free_device(device);
1546 }
1547
1548 /*
1549 * Notify the SCSI ML if the queue depth of any existing device has
1550 * changed.
1551 */
1552 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1553 scsi_device_list_entry) {
1554 if (device->sdev && device->queue_depth !=
1555 device->advertised_queue_depth) {
1556 device->advertised_queue_depth = device->queue_depth;
1557 scsi_change_queue_depth(device->sdev,
1558 device->advertised_queue_depth);
1559 }
1560 }
1561
1562 /* Expose any new devices. */
1563 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1564 if (device->expose_device && !device->sdev) {
1565 rc = pqi_add_device(ctrl_info, device);
1566 if (rc) {
1567 dev_warn(&ctrl_info->pci_dev->dev,
1568 "scsi %d:%d:%d:%d addition failed, device not added\n",
1569 ctrl_info->scsi_host->host_no,
1570 device->bus, device->target,
1571 device->lun);
1572 pqi_fixup_botched_add(ctrl_info, device);
1573 continue;
1574 }
1575 }
1576 pqi_dev_info(ctrl_info, "added", device);
1577 }
1578}
1579
1580static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1581{
1582 bool is_supported = false;
1583
1584 switch (device->devtype) {
1585 case TYPE_DISK:
1586 case TYPE_ZBC:
1587 case TYPE_TAPE:
1588 case TYPE_MEDIUM_CHANGER:
1589 case TYPE_ENCLOSURE:
1590 is_supported = true;
1591 break;
1592 case TYPE_RAID:
1593 /*
1594 * Only support the HBA controller itself as a RAID
1595 * controller. If it's a RAID controller other than
1596 * the HBA itself (an external RAID controller, MSA500
1597 * or similar), we don't support it.
1598 */
1599 if (pqi_is_hba_lunid(device->scsi3addr))
1600 is_supported = true;
1601 break;
1602 }
1603
1604 return is_supported;
1605}
1606
1607static inline bool pqi_skip_device(u8 *scsi3addr,
1608 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1609{
1610 u8 device_flags;
1611
1612 if (!MASKED_DEVICE(scsi3addr))
1613 return false;
1614
1615 /* The device is masked. */
1616
1617 device_flags = phys_lun_ext_entry->device_flags;
1618
1619 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1620 /*
1621 * It's a non-disk device. We ignore all devices of this type
1622 * when they're masked.
1623 */
1624 return true;
1625 }
1626
1627 return false;
1628}
1629
1630static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1631{
1632 /* Expose all devices except for physical devices that are masked. */
1633 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1634 return false;
1635
1636 return true;
1637}
1638
1639static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1640{
1641 int i;
1642 int rc;
1643 struct list_head new_device_list_head;
1644 struct report_phys_lun_extended *physdev_list = NULL;
1645 struct report_log_lun_extended *logdev_list = NULL;
1646 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1647 struct report_log_lun_extended_entry *log_lun_ext_entry;
1648 struct bmic_identify_physical_device *id_phys = NULL;
1649 u32 num_physicals;
1650 u32 num_logicals;
1651 struct pqi_scsi_dev **new_device_list = NULL;
1652 struct pqi_scsi_dev *device;
1653 struct pqi_scsi_dev *next;
1654 unsigned int num_new_devices;
1655 unsigned int num_valid_devices;
1656 bool is_physical_device;
1657 u8 *scsi3addr;
1658 static char *out_of_memory_msg =
1659 "out of memory, device discovery stopped";
1660
1661 INIT_LIST_HEAD(&new_device_list_head);
1662
1663 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1664 if (rc)
1665 goto out;
1666
1667 if (physdev_list)
1668 num_physicals =
1669 get_unaligned_be32(&physdev_list->header.list_length)
1670 / sizeof(physdev_list->lun_entries[0]);
1671 else
1672 num_physicals = 0;
1673
1674 if (logdev_list)
1675 num_logicals =
1676 get_unaligned_be32(&logdev_list->header.list_length)
1677 / sizeof(logdev_list->lun_entries[0]);
1678 else
1679 num_logicals = 0;
1680
1681 if (num_physicals) {
1682 /*
1683 * We need this buffer for calls to pqi_get_physical_disk_info()
1684 * below. We allocate it here instead of inside
1685 * pqi_get_physical_disk_info() because it's a fairly large
1686 * buffer.
1687 */
1688 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1689 if (!id_phys) {
1690 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1691 out_of_memory_msg);
1692 rc = -ENOMEM;
1693 goto out;
1694 }
1695 }
1696
1697 num_new_devices = num_physicals + num_logicals;
1698
1699 new_device_list = kmalloc(sizeof(*new_device_list) *
1700 num_new_devices, GFP_KERNEL);
1701 if (!new_device_list) {
1702 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1703 rc = -ENOMEM;
1704 goto out;
1705 }
1706
1707 for (i = 0; i < num_new_devices; i++) {
1708 device = kzalloc(sizeof(*device), GFP_KERNEL);
1709 if (!device) {
1710 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1711 out_of_memory_msg);
1712 rc = -ENOMEM;
1713 goto out;
1714 }
1715 list_add_tail(&device->new_device_list_entry,
1716 &new_device_list_head);
1717 }
1718
1719 device = NULL;
1720 num_valid_devices = 0;
1721
1722 for (i = 0; i < num_new_devices; i++) {
1723
1724 if (i < num_physicals) {
1725 is_physical_device = true;
1726 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1727 log_lun_ext_entry = NULL;
1728 scsi3addr = phys_lun_ext_entry->lunid;
1729 } else {
1730 is_physical_device = false;
1731 phys_lun_ext_entry = NULL;
1732 log_lun_ext_entry =
1733 &logdev_list->lun_entries[i - num_physicals];
1734 scsi3addr = log_lun_ext_entry->lunid;
1735 }
1736
1737 if (is_physical_device &&
1738 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1739 continue;
1740
1741 if (device)
1742 device = list_next_entry(device, new_device_list_entry);
1743 else
1744 device = list_first_entry(&new_device_list_head,
1745 struct pqi_scsi_dev, new_device_list_entry);
1746
1747 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1748 device->is_physical_device = is_physical_device;
1749 device->raid_level = SA_RAID_UNKNOWN;
1750
1751 /* Gather information about the device. */
1752 rc = pqi_get_device_info(ctrl_info, device);
1753 if (rc == -ENOMEM) {
1754 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1755 out_of_memory_msg);
1756 goto out;
1757 }
1758 if (rc) {
1759 dev_warn(&ctrl_info->pci_dev->dev,
1760 "obtaining device info failed, skipping device %016llx\n",
1761 get_unaligned_be64(device->scsi3addr));
1762 rc = 0;
1763 continue;
1764 }
1765
1766 if (!pqi_is_supported_device(device))
1767 continue;
1768
1769 pqi_assign_bus_target_lun(device);
1770
1771 device->expose_device = pqi_expose_device(device);
1772
1773 if (device->is_physical_device) {
1774 device->wwid = phys_lun_ext_entry->wwid;
1775 if ((phys_lun_ext_entry->device_flags &
1776 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1777 phys_lun_ext_entry->aio_handle)
1778 device->aio_enabled = true;
1779 } else {
1780 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1781 sizeof(device->volume_id));
1782 }
1783
1784 switch (device->devtype) {
1785 case TYPE_DISK:
1786 case TYPE_ZBC:
1787 case TYPE_ENCLOSURE:
1788 if (device->is_physical_device) {
1789 device->sas_address =
1790 get_unaligned_be64(&device->wwid);
1791 if (device->devtype == TYPE_DISK ||
1792 device->devtype == TYPE_ZBC) {
1793 device->aio_handle =
1794 phys_lun_ext_entry->aio_handle;
1795 pqi_get_physical_disk_info(ctrl_info,
1796 device, id_phys);
1797 }
1798 }
1799 break;
1800 }
1801
1802 new_device_list[num_valid_devices++] = device;
1803 }
1804
1805 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1806
1807out:
1808 list_for_each_entry_safe(device, next, &new_device_list_head,
1809 new_device_list_entry) {
1810 if (device->keep_device)
1811 continue;
1812 list_del(&device->new_device_list_entry);
1813 pqi_free_device(device);
1814 }
1815
1816 kfree(new_device_list);
1817 kfree(physdev_list);
1818 kfree(logdev_list);
1819 kfree(id_phys);
1820
1821 return rc;
1822}
1823
1824static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1825{
1826 unsigned long flags;
1827 struct pqi_scsi_dev *device;
1828 struct pqi_scsi_dev *next;
1829
1830 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1831
1832 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1833 scsi_device_list_entry) {
1834 if (device->sdev)
1835 pqi_remove_device(ctrl_info, device);
1836 list_del(&device->scsi_device_list_entry);
1837 pqi_free_device(device);
1838 }
1839
1840 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1841}
1842
1843static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1844{
1845 int rc;
1846
1847 if (pqi_ctrl_offline(ctrl_info))
1848 return -ENXIO;
1849
1850 mutex_lock(&ctrl_info->scan_mutex);
1851
1852 rc = pqi_update_scsi_devices(ctrl_info);
1853 if (rc)
1854 pqi_schedule_rescan_worker(ctrl_info);
1855
1856 mutex_unlock(&ctrl_info->scan_mutex);
1857
1858 return rc;
1859}
1860
1861static void pqi_scan_start(struct Scsi_Host *shost)
1862{
1863 pqi_scan_scsi_devices(shost_to_hba(shost));
1864}
1865
1866/* Returns TRUE if scan is finished. */
1867
1868static int pqi_scan_finished(struct Scsi_Host *shost,
1869 unsigned long elapsed_time)
1870{
1871 struct pqi_ctrl_info *ctrl_info;
1872
1873 ctrl_info = shost_priv(shost);
1874
1875 return !mutex_is_locked(&ctrl_info->scan_mutex);
1876}
1877
1878static inline void pqi_set_encryption_info(
1879 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1880 u64 first_block)
1881{
1882 u32 volume_blk_size;
1883
1884 /*
1885 * Set the encryption tweak values based on logical block address.
1886 * If the block size is 512, the tweak value is equal to the LBA.
1887 * For other block sizes, tweak value is (LBA * block size) / 512.
1888 */
1889 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1890 if (volume_blk_size != 512)
1891 first_block = (first_block * volume_blk_size) / 512;
1892
1893 encryption_info->data_encryption_key_index =
1894 get_unaligned_le16(&raid_map->data_encryption_key_index);
1895 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1896 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1897}
1898
1899/*
1900 * Attempt to perform offload RAID mapping for a logical volume I/O.
1901 */
1902
1903#define PQI_RAID_BYPASS_INELIGIBLE 1
1904
1905static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1906 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1907 struct pqi_queue_group *queue_group)
1908{
1909 struct raid_map *raid_map;
1910 bool is_write = false;
1911 u32 map_index;
1912 u64 first_block;
1913 u64 last_block;
1914 u32 block_cnt;
1915 u32 blocks_per_row;
1916 u64 first_row;
1917 u64 last_row;
1918 u32 first_row_offset;
1919 u32 last_row_offset;
1920 u32 first_column;
1921 u32 last_column;
1922 u64 r0_first_row;
1923 u64 r0_last_row;
1924 u32 r5or6_blocks_per_row;
1925 u64 r5or6_first_row;
1926 u64 r5or6_last_row;
1927 u32 r5or6_first_row_offset;
1928 u32 r5or6_last_row_offset;
1929 u32 r5or6_first_column;
1930 u32 r5or6_last_column;
1931 u16 data_disks_per_row;
1932 u32 total_disks_per_row;
1933 u16 layout_map_count;
1934 u32 stripesize;
1935 u16 strip_size;
1936 u32 first_group;
1937 u32 last_group;
1938 u32 current_group;
1939 u32 map_row;
1940 u32 aio_handle;
1941 u64 disk_block;
1942 u32 disk_block_cnt;
1943 u8 cdb[16];
1944 u8 cdb_length;
1945 int offload_to_mirror;
1946 struct pqi_encryption_info *encryption_info_ptr;
1947 struct pqi_encryption_info encryption_info;
1948#if BITS_PER_LONG == 32
1949 u64 tmpdiv;
1950#endif
1951
1952 /* Check for valid opcode, get LBA and block count. */
1953 switch (scmd->cmnd[0]) {
1954 case WRITE_6:
1955 is_write = true;
1956 /* fall through */
1957 case READ_6:
e018ef57
B
1958 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
1959 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
6c223761
KB
1960 block_cnt = (u32)scmd->cmnd[4];
1961 if (block_cnt == 0)
1962 block_cnt = 256;
1963 break;
1964 case WRITE_10:
1965 is_write = true;
1966 /* fall through */
1967 case READ_10:
1968 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1969 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1970 break;
1971 case WRITE_12:
1972 is_write = true;
1973 /* fall through */
1974 case READ_12:
1975 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1976 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1977 break;
1978 case WRITE_16:
1979 is_write = true;
1980 /* fall through */
1981 case READ_16:
1982 first_block = get_unaligned_be64(&scmd->cmnd[2]);
1983 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1984 break;
1985 default:
1986 /* Process via normal I/O path. */
1987 return PQI_RAID_BYPASS_INELIGIBLE;
1988 }
1989
1990 /* Check for write to non-RAID-0. */
1991 if (is_write && device->raid_level != SA_RAID_0)
1992 return PQI_RAID_BYPASS_INELIGIBLE;
1993
1994 if (unlikely(block_cnt == 0))
1995 return PQI_RAID_BYPASS_INELIGIBLE;
1996
1997 last_block = first_block + block_cnt - 1;
1998 raid_map = device->raid_map;
1999
2000 /* Check for invalid block or wraparound. */
2001 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2002 last_block < first_block)
2003 return PQI_RAID_BYPASS_INELIGIBLE;
2004
2005 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2006 strip_size = get_unaligned_le16(&raid_map->strip_size);
2007 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2008
2009 /* Calculate stripe information for the request. */
2010 blocks_per_row = data_disks_per_row * strip_size;
2011#if BITS_PER_LONG == 32
2012 tmpdiv = first_block;
2013 do_div(tmpdiv, blocks_per_row);
2014 first_row = tmpdiv;
2015 tmpdiv = last_block;
2016 do_div(tmpdiv, blocks_per_row);
2017 last_row = tmpdiv;
2018 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2019 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2020 tmpdiv = first_row_offset;
2021 do_div(tmpdiv, strip_size);
2022 first_column = tmpdiv;
2023 tmpdiv = last_row_offset;
2024 do_div(tmpdiv, strip_size);
2025 last_column = tmpdiv;
2026#else
2027 first_row = first_block / blocks_per_row;
2028 last_row = last_block / blocks_per_row;
2029 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2030 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2031 first_column = first_row_offset / strip_size;
2032 last_column = last_row_offset / strip_size;
2033#endif
2034
2035 /* If this isn't a single row/column then give to the controller. */
2036 if (first_row != last_row || first_column != last_column)
2037 return PQI_RAID_BYPASS_INELIGIBLE;
2038
2039 /* Proceeding with driver mapping. */
2040 total_disks_per_row = data_disks_per_row +
2041 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2042 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2043 get_unaligned_le16(&raid_map->row_cnt);
2044 map_index = (map_row * total_disks_per_row) + first_column;
2045
2046 /* RAID 1 */
2047 if (device->raid_level == SA_RAID_1) {
2048 if (device->offload_to_mirror)
2049 map_index += data_disks_per_row;
2050 device->offload_to_mirror = !device->offload_to_mirror;
2051 } else if (device->raid_level == SA_RAID_ADM) {
2052 /* RAID ADM */
2053 /*
2054 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2055 * divisible by 3.
2056 */
2057 offload_to_mirror = device->offload_to_mirror;
2058 if (offload_to_mirror == 0) {
2059 /* use physical disk in the first mirrored group. */
2060 map_index %= data_disks_per_row;
2061 } else {
2062 do {
2063 /*
2064 * Determine mirror group that map_index
2065 * indicates.
2066 */
2067 current_group = map_index / data_disks_per_row;
2068
2069 if (offload_to_mirror != current_group) {
2070 if (current_group <
2071 layout_map_count - 1) {
2072 /*
2073 * Select raid index from
2074 * next group.
2075 */
2076 map_index += data_disks_per_row;
2077 current_group++;
2078 } else {
2079 /*
2080 * Select raid index from first
2081 * group.
2082 */
2083 map_index %= data_disks_per_row;
2084 current_group = 0;
2085 }
2086 }
2087 } while (offload_to_mirror != current_group);
2088 }
2089
2090 /* Set mirror group to use next time. */
2091 offload_to_mirror =
2092 (offload_to_mirror >= layout_map_count - 1) ?
2093 0 : offload_to_mirror + 1;
2094 WARN_ON(offload_to_mirror >= layout_map_count);
2095 device->offload_to_mirror = offload_to_mirror;
2096 /*
2097 * Avoid direct use of device->offload_to_mirror within this
2098 * function since multiple threads might simultaneously
2099 * increment it beyond the range of device->layout_map_count -1.
2100 */
2101 } else if ((device->raid_level == SA_RAID_5 ||
2102 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2103 /* RAID 50/60 */
2104 /* Verify first and last block are in same RAID group */
2105 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2106 stripesize = r5or6_blocks_per_row * layout_map_count;
2107#if BITS_PER_LONG == 32
2108 tmpdiv = first_block;
2109 first_group = do_div(tmpdiv, stripesize);
2110 tmpdiv = first_group;
2111 do_div(tmpdiv, r5or6_blocks_per_row);
2112 first_group = tmpdiv;
2113 tmpdiv = last_block;
2114 last_group = do_div(tmpdiv, stripesize);
2115 tmpdiv = last_group;
2116 do_div(tmpdiv, r5or6_blocks_per_row);
2117 last_group = tmpdiv;
2118#else
2119 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2120 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2121#endif
2122 if (first_group != last_group)
2123 return PQI_RAID_BYPASS_INELIGIBLE;
2124
2125 /* Verify request is in a single row of RAID 5/6 */
2126#if BITS_PER_LONG == 32
2127 tmpdiv = first_block;
2128 do_div(tmpdiv, stripesize);
2129 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2130 tmpdiv = last_block;
2131 do_div(tmpdiv, stripesize);
2132 r5or6_last_row = r0_last_row = tmpdiv;
2133#else
2134 first_row = r5or6_first_row = r0_first_row =
2135 first_block / stripesize;
2136 r5or6_last_row = r0_last_row = last_block / stripesize;
2137#endif
2138 if (r5or6_first_row != r5or6_last_row)
2139 return PQI_RAID_BYPASS_INELIGIBLE;
2140
2141 /* Verify request is in a single column */
2142#if BITS_PER_LONG == 32
2143 tmpdiv = first_block;
2144 first_row_offset = do_div(tmpdiv, stripesize);
2145 tmpdiv = first_row_offset;
2146 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2147 r5or6_first_row_offset = first_row_offset;
2148 tmpdiv = last_block;
2149 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2150 tmpdiv = r5or6_last_row_offset;
2151 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2152 tmpdiv = r5or6_first_row_offset;
2153 do_div(tmpdiv, strip_size);
2154 first_column = r5or6_first_column = tmpdiv;
2155 tmpdiv = r5or6_last_row_offset;
2156 do_div(tmpdiv, strip_size);
2157 r5or6_last_column = tmpdiv;
2158#else
2159 first_row_offset = r5or6_first_row_offset =
2160 (u32)((first_block % stripesize) %
2161 r5or6_blocks_per_row);
2162
2163 r5or6_last_row_offset =
2164 (u32)((last_block % stripesize) %
2165 r5or6_blocks_per_row);
2166
2167 first_column = r5or6_first_row_offset / strip_size;
2168 r5or6_first_column = first_column;
2169 r5or6_last_column = r5or6_last_row_offset / strip_size;
2170#endif
2171 if (r5or6_first_column != r5or6_last_column)
2172 return PQI_RAID_BYPASS_INELIGIBLE;
2173
2174 /* Request is eligible */
2175 map_row =
2176 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2177 get_unaligned_le16(&raid_map->row_cnt);
2178
2179 map_index = (first_group *
2180 (get_unaligned_le16(&raid_map->row_cnt) *
2181 total_disks_per_row)) +
2182 (map_row * total_disks_per_row) + first_column;
2183 }
2184
2185 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2186 return PQI_RAID_BYPASS_INELIGIBLE;
2187
2188 aio_handle = raid_map->disk_data[map_index].aio_handle;
2189 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2190 first_row * strip_size +
2191 (first_row_offset - first_column * strip_size);
2192 disk_block_cnt = block_cnt;
2193
2194 /* Handle differing logical/physical block sizes. */
2195 if (raid_map->phys_blk_shift) {
2196 disk_block <<= raid_map->phys_blk_shift;
2197 disk_block_cnt <<= raid_map->phys_blk_shift;
2198 }
2199
2200 if (unlikely(disk_block_cnt > 0xffff))
2201 return PQI_RAID_BYPASS_INELIGIBLE;
2202
2203 /* Build the new CDB for the physical disk I/O. */
2204 if (disk_block > 0xffffffff) {
2205 cdb[0] = is_write ? WRITE_16 : READ_16;
2206 cdb[1] = 0;
2207 put_unaligned_be64(disk_block, &cdb[2]);
2208 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2209 cdb[14] = 0;
2210 cdb[15] = 0;
2211 cdb_length = 16;
2212 } else {
2213 cdb[0] = is_write ? WRITE_10 : READ_10;
2214 cdb[1] = 0;
2215 put_unaligned_be32((u32)disk_block, &cdb[2]);
2216 cdb[6] = 0;
2217 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2218 cdb[9] = 0;
2219 cdb_length = 10;
2220 }
2221
2222 if (get_unaligned_le16(&raid_map->flags) &
2223 RAID_MAP_ENCRYPTION_ENABLED) {
2224 pqi_set_encryption_info(&encryption_info, raid_map,
2225 first_block);
2226 encryption_info_ptr = &encryption_info;
2227 } else {
2228 encryption_info_ptr = NULL;
2229 }
2230
2231 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2232 cdb, cdb_length, queue_group, encryption_info_ptr);
2233}
2234
2235#define PQI_STATUS_IDLE 0x0
2236
2237#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2238#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2239
2240#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2241#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2242#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2243#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2244#define PQI_DEVICE_STATE_ERROR 0x4
2245
2246#define PQI_MODE_READY_TIMEOUT_SECS 30
2247#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2248
2249static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2250{
2251 struct pqi_device_registers __iomem *pqi_registers;
2252 unsigned long timeout;
2253 u64 signature;
2254 u8 status;
2255
2256 pqi_registers = ctrl_info->pqi_registers;
2257 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2258
2259 while (1) {
2260 signature = readq(&pqi_registers->signature);
2261 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2262 sizeof(signature)) == 0)
2263 break;
2264 if (time_after(jiffies, timeout)) {
2265 dev_err(&ctrl_info->pci_dev->dev,
2266 "timed out waiting for PQI signature\n");
2267 return -ETIMEDOUT;
2268 }
2269 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2270 }
2271
2272 while (1) {
2273 status = readb(&pqi_registers->function_and_status_code);
2274 if (status == PQI_STATUS_IDLE)
2275 break;
2276 if (time_after(jiffies, timeout)) {
2277 dev_err(&ctrl_info->pci_dev->dev,
2278 "timed out waiting for PQI IDLE\n");
2279 return -ETIMEDOUT;
2280 }
2281 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2282 }
2283
2284 while (1) {
2285 if (readl(&pqi_registers->device_status) ==
2286 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2287 break;
2288 if (time_after(jiffies, timeout)) {
2289 dev_err(&ctrl_info->pci_dev->dev,
2290 "timed out waiting for PQI all registers ready\n");
2291 return -ETIMEDOUT;
2292 }
2293 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2294 }
2295
2296 return 0;
2297}
2298
2299static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2300{
2301 struct pqi_scsi_dev *device;
2302
2303 device = io_request->scmd->device->hostdata;
2304 device->offload_enabled = false;
2305}
2306
2307static inline void pqi_take_device_offline(struct scsi_device *sdev)
2308{
2309 struct pqi_ctrl_info *ctrl_info;
e58081a7 2310 struct pqi_scsi_dev *device;
6c223761
KB
2311
2312 if (scsi_device_online(sdev)) {
2313 scsi_device_set_state(sdev, SDEV_OFFLINE);
2314 ctrl_info = shost_to_hba(sdev->host);
2315 schedule_delayed_work(&ctrl_info->rescan_work, 0);
e58081a7
KB
2316 device = sdev->hostdata;
2317 dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2318 ctrl_info->scsi_host->host_no, device->bus,
2319 device->target, device->lun);
6c223761
KB
2320 }
2321}
2322
2323static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2324{
2325 u8 scsi_status;
2326 u8 host_byte;
2327 struct scsi_cmnd *scmd;
2328 struct pqi_raid_error_info *error_info;
2329 size_t sense_data_length;
2330 int residual_count;
2331 int xfer_count;
2332 struct scsi_sense_hdr sshdr;
2333
2334 scmd = io_request->scmd;
2335 if (!scmd)
2336 return;
2337
2338 error_info = io_request->error_info;
2339 scsi_status = error_info->status;
2340 host_byte = DID_OK;
2341
2342 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2343 xfer_count =
2344 get_unaligned_le32(&error_info->data_out_transferred);
2345 residual_count = scsi_bufflen(scmd) - xfer_count;
2346 scsi_set_resid(scmd, residual_count);
2347 if (xfer_count < scmd->underflow)
2348 host_byte = DID_SOFT_ERROR;
2349 }
2350
2351 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2352 if (sense_data_length == 0)
2353 sense_data_length =
2354 get_unaligned_le16(&error_info->response_data_length);
2355 if (sense_data_length) {
2356 if (sense_data_length > sizeof(error_info->data))
2357 sense_data_length = sizeof(error_info->data);
2358
2359 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2360 scsi_normalize_sense(error_info->data,
2361 sense_data_length, &sshdr) &&
2362 sshdr.sense_key == HARDWARE_ERROR &&
2363 sshdr.asc == 0x3e &&
2364 sshdr.ascq == 0x1) {
2365 pqi_take_device_offline(scmd->device);
2366 host_byte = DID_NO_CONNECT;
2367 }
2368
2369 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2370 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2371 memcpy(scmd->sense_buffer, error_info->data,
2372 sense_data_length);
2373 }
2374
2375 scmd->result = scsi_status;
2376 set_host_byte(scmd, host_byte);
2377}
2378
2379static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2380{
2381 u8 scsi_status;
2382 u8 host_byte;
2383 struct scsi_cmnd *scmd;
2384 struct pqi_aio_error_info *error_info;
2385 size_t sense_data_length;
2386 int residual_count;
2387 int xfer_count;
2388 bool device_offline;
2389
2390 scmd = io_request->scmd;
2391 error_info = io_request->error_info;
2392 host_byte = DID_OK;
2393 sense_data_length = 0;
2394 device_offline = false;
2395
2396 switch (error_info->service_response) {
2397 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2398 scsi_status = error_info->status;
2399 break;
2400 case PQI_AIO_SERV_RESPONSE_FAILURE:
2401 switch (error_info->status) {
2402 case PQI_AIO_STATUS_IO_ABORTED:
2403 scsi_status = SAM_STAT_TASK_ABORTED;
2404 break;
2405 case PQI_AIO_STATUS_UNDERRUN:
2406 scsi_status = SAM_STAT_GOOD;
2407 residual_count = get_unaligned_le32(
2408 &error_info->residual_count);
2409 scsi_set_resid(scmd, residual_count);
2410 xfer_count = scsi_bufflen(scmd) - residual_count;
2411 if (xfer_count < scmd->underflow)
2412 host_byte = DID_SOFT_ERROR;
2413 break;
2414 case PQI_AIO_STATUS_OVERRUN:
2415 scsi_status = SAM_STAT_GOOD;
2416 break;
2417 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2418 pqi_aio_path_disabled(io_request);
2419 scsi_status = SAM_STAT_GOOD;
2420 io_request->status = -EAGAIN;
2421 break;
2422 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2423 case PQI_AIO_STATUS_INVALID_DEVICE:
2424 device_offline = true;
2425 pqi_take_device_offline(scmd->device);
2426 host_byte = DID_NO_CONNECT;
2427 scsi_status = SAM_STAT_CHECK_CONDITION;
2428 break;
2429 case PQI_AIO_STATUS_IO_ERROR:
2430 default:
2431 scsi_status = SAM_STAT_CHECK_CONDITION;
2432 break;
2433 }
2434 break;
2435 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2436 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2437 scsi_status = SAM_STAT_GOOD;
2438 break;
2439 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2440 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2441 default:
2442 scsi_status = SAM_STAT_CHECK_CONDITION;
2443 break;
2444 }
2445
2446 if (error_info->data_present) {
2447 sense_data_length =
2448 get_unaligned_le16(&error_info->data_length);
2449 if (sense_data_length) {
2450 if (sense_data_length > sizeof(error_info->data))
2451 sense_data_length = sizeof(error_info->data);
2452 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2453 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2454 memcpy(scmd->sense_buffer, error_info->data,
2455 sense_data_length);
2456 }
2457 }
2458
2459 if (device_offline && sense_data_length == 0)
2460 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2461 0x3e, 0x1);
2462
2463 scmd->result = scsi_status;
2464 set_host_byte(scmd, host_byte);
2465}
2466
2467static void pqi_process_io_error(unsigned int iu_type,
2468 struct pqi_io_request *io_request)
2469{
2470 switch (iu_type) {
2471 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2472 pqi_process_raid_io_error(io_request);
2473 break;
2474 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2475 pqi_process_aio_io_error(io_request);
2476 break;
2477 }
2478}
2479
2480static int pqi_interpret_task_management_response(
2481 struct pqi_task_management_response *response)
2482{
2483 int rc;
2484
2485 switch (response->response_code) {
b17f0486
KB
2486 case SOP_TMF_COMPLETE:
2487 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
2488 rc = 0;
2489 break;
2490 default:
2491 rc = -EIO;
2492 break;
2493 }
2494
2495 return rc;
2496}
2497
2498static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2499 struct pqi_queue_group *queue_group)
2500{
2501 unsigned int num_responses;
2502 pqi_index_t oq_pi;
2503 pqi_index_t oq_ci;
2504 struct pqi_io_request *io_request;
2505 struct pqi_io_response *response;
2506 u16 request_id;
2507
2508 num_responses = 0;
2509 oq_ci = queue_group->oq_ci_copy;
2510
2511 while (1) {
2512 oq_pi = *queue_group->oq_pi;
2513 if (oq_pi == oq_ci)
2514 break;
2515
2516 num_responses++;
2517 response = queue_group->oq_element_array +
2518 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2519
2520 request_id = get_unaligned_le16(&response->request_id);
2521 WARN_ON(request_id >= ctrl_info->max_io_slots);
2522
2523 io_request = &ctrl_info->io_request_pool[request_id];
2524 WARN_ON(atomic_read(&io_request->refcount) == 0);
2525
2526 switch (response->header.iu_type) {
2527 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2528 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2529 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2530 break;
2531 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2532 io_request->status =
2533 pqi_interpret_task_management_response(
2534 (void *)response);
2535 break;
2536 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2537 pqi_aio_path_disabled(io_request);
2538 io_request->status = -EAGAIN;
2539 break;
2540 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2541 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2542 io_request->error_info = ctrl_info->error_buffer +
2543 (get_unaligned_le16(&response->error_index) *
2544 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2545 pqi_process_io_error(response->header.iu_type,
2546 io_request);
2547 break;
2548 default:
2549 dev_err(&ctrl_info->pci_dev->dev,
2550 "unexpected IU type: 0x%x\n",
2551 response->header.iu_type);
2552 WARN_ON(response->header.iu_type);
2553 break;
2554 }
2555
2556 io_request->io_complete_callback(io_request,
2557 io_request->context);
2558
2559 /*
2560 * Note that the I/O request structure CANNOT BE TOUCHED after
2561 * returning from the I/O completion callback!
2562 */
2563
2564 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2565 }
2566
2567 if (num_responses) {
2568 queue_group->oq_ci_copy = oq_ci;
2569 writel(oq_ci, queue_group->oq_ci);
2570 }
2571
2572 return num_responses;
2573}
2574
2575static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 2576 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
2577{
2578 unsigned int num_elements_used;
2579
2580 if (pi >= ci)
2581 num_elements_used = pi - ci;
2582 else
2583 num_elements_used = elements_in_queue - ci + pi;
2584
2585 return elements_in_queue - num_elements_used - 1;
2586}
2587
2588#define PQI_EVENT_ACK_TIMEOUT 30
2589
2590static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2591 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2592{
2593 pqi_index_t iq_pi;
2594 pqi_index_t iq_ci;
2595 unsigned long flags;
2596 void *next_element;
2597 unsigned long timeout;
2598 struct pqi_queue_group *queue_group;
2599
2600 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2601 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2602
2603 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2604
2605 while (1) {
2606 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2607
2608 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2609 iq_ci = *queue_group->iq_ci[RAID_PATH];
2610
2611 if (pqi_num_elements_free(iq_pi, iq_ci,
2612 ctrl_info->num_elements_per_iq))
2613 break;
2614
2615 spin_unlock_irqrestore(
2616 &queue_group->submit_lock[RAID_PATH], flags);
2617
2618 if (time_after(jiffies, timeout)) {
2619 dev_err(&ctrl_info->pci_dev->dev,
2620 "sending event acknowledge timed out\n");
2621 return;
2622 }
2623 }
2624
2625 next_element = queue_group->iq_element_array[RAID_PATH] +
2626 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2627
2628 memcpy(next_element, iu, iu_length);
2629
2630 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2631
2632 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2633
2634 /*
2635 * This write notifies the controller that an IU is available to be
2636 * processed.
2637 */
2638 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2639
2640 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
2641}
2642
2643static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2644 struct pqi_event *event)
2645{
2646 struct pqi_event_acknowledge_request request;
2647
2648 memset(&request, 0, sizeof(request));
2649
2650 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2651 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2652 &request.header.iu_length);
2653 request.event_type = event->event_type;
2654 request.event_id = event->event_id;
2655 request.additional_event_id = event->additional_event_id;
2656
2657 pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2658}
2659
2660static void pqi_event_worker(struct work_struct *work)
2661{
2662 unsigned int i;
2663 struct pqi_ctrl_info *ctrl_info;
2664 struct pqi_event *pending_event;
2665 bool got_non_heartbeat_event = false;
2666
2667 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2668
2669 pending_event = ctrl_info->pending_events;
2670 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2671 if (pending_event->pending) {
2672 pending_event->pending = false;
2673 pqi_acknowledge_event(ctrl_info, pending_event);
2674 if (i != PQI_EVENT_HEARTBEAT)
2675 got_non_heartbeat_event = true;
2676 }
2677 pending_event++;
2678 }
2679
2680 if (got_non_heartbeat_event)
2681 pqi_schedule_rescan_worker(ctrl_info);
2682}
2683
2684static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2685{
2686 unsigned int i;
2687 unsigned int path;
2688 struct pqi_queue_group *queue_group;
2689 unsigned long flags;
2690 struct pqi_io_request *io_request;
2691 struct pqi_io_request *next;
2692 struct scsi_cmnd *scmd;
2693
2694 ctrl_info->controller_online = false;
2695 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2696
2697 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2698 queue_group = &ctrl_info->queue_groups[i];
2699
2700 for (path = 0; path < 2; path++) {
2701 spin_lock_irqsave(
2702 &queue_group->submit_lock[path], flags);
2703
2704 list_for_each_entry_safe(io_request, next,
2705 &queue_group->request_list[path],
2706 request_list_entry) {
2707
2708 scmd = io_request->scmd;
2709 if (scmd) {
2710 set_host_byte(scmd, DID_NO_CONNECT);
2711 pqi_scsi_done(scmd);
2712 }
2713
2714 list_del(&io_request->request_list_entry);
2715 }
2716
2717 spin_unlock_irqrestore(
2718 &queue_group->submit_lock[path], flags);
2719 }
2720 }
2721}
2722
2723#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2724#define PQI_MAX_HEARTBEAT_REQUESTS 5
2725
2726static void pqi_heartbeat_timer_handler(unsigned long data)
2727{
2728 int num_interrupts;
2729 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2730
2731 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2732
2733 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2734 ctrl_info->num_heartbeats_requested++;
2735 if (ctrl_info->num_heartbeats_requested >
2736 PQI_MAX_HEARTBEAT_REQUESTS) {
2737 pqi_take_ctrl_offline(ctrl_info);
2738 return;
2739 }
2740 ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2741 schedule_work(&ctrl_info->event_work);
2742 } else {
2743 ctrl_info->num_heartbeats_requested = 0;
2744 }
2745
2746 ctrl_info->previous_num_interrupts = num_interrupts;
2747 mod_timer(&ctrl_info->heartbeat_timer,
2748 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2749}
2750
2751static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2752{
2753 ctrl_info->previous_num_interrupts =
2754 atomic_read(&ctrl_info->num_interrupts);
2755
2756 init_timer(&ctrl_info->heartbeat_timer);
2757 ctrl_info->heartbeat_timer.expires =
2758 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2759 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2760 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2761 add_timer(&ctrl_info->heartbeat_timer);
2762 ctrl_info->heartbeat_timer_started = true;
2763}
2764
2765static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2766{
2767 if (ctrl_info->heartbeat_timer_started)
2768 del_timer_sync(&ctrl_info->heartbeat_timer);
2769}
2770
2771static int pqi_event_type_to_event_index(unsigned int event_type)
2772{
2773 int index;
2774
2775 switch (event_type) {
2776 case PQI_EVENT_TYPE_HEARTBEAT:
2777 index = PQI_EVENT_HEARTBEAT;
2778 break;
2779 case PQI_EVENT_TYPE_HOTPLUG:
2780 index = PQI_EVENT_HOTPLUG;
2781 break;
2782 case PQI_EVENT_TYPE_HARDWARE:
2783 index = PQI_EVENT_HARDWARE;
2784 break;
2785 case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2786 index = PQI_EVENT_PHYSICAL_DEVICE;
2787 break;
2788 case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2789 index = PQI_EVENT_LOGICAL_DEVICE;
2790 break;
2791 case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2792 index = PQI_EVENT_AIO_STATE_CHANGE;
2793 break;
2794 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2795 index = PQI_EVENT_AIO_CONFIG_CHANGE;
2796 break;
2797 default:
2798 index = -1;
2799 break;
2800 }
2801
2802 return index;
2803}
2804
2805static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2806{
2807 unsigned int num_events;
2808 pqi_index_t oq_pi;
2809 pqi_index_t oq_ci;
2810 struct pqi_event_queue *event_queue;
2811 struct pqi_event_response *response;
2812 struct pqi_event *pending_event;
2813 bool need_delayed_work;
2814 int event_index;
2815
2816 event_queue = &ctrl_info->event_queue;
2817 num_events = 0;
2818 need_delayed_work = false;
2819 oq_ci = event_queue->oq_ci_copy;
2820
2821 while (1) {
2822 oq_pi = *event_queue->oq_pi;
2823 if (oq_pi == oq_ci)
2824 break;
2825
2826 num_events++;
2827 response = event_queue->oq_element_array +
2828 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2829
2830 event_index =
2831 pqi_event_type_to_event_index(response->event_type);
2832
2833 if (event_index >= 0) {
2834 if (response->request_acknowlege) {
2835 pending_event =
2836 &ctrl_info->pending_events[event_index];
2837 pending_event->event_type =
2838 response->event_type;
2839 pending_event->event_id = response->event_id;
2840 pending_event->additional_event_id =
2841 response->additional_event_id;
2842 if (event_index != PQI_EVENT_HEARTBEAT) {
2843 pending_event->pending = true;
2844 need_delayed_work = true;
2845 }
2846 }
2847 }
2848
2849 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2850 }
2851
2852 if (num_events) {
2853 event_queue->oq_ci_copy = oq_ci;
2854 writel(oq_ci, event_queue->oq_ci);
2855
2856 if (need_delayed_work)
2857 schedule_work(&ctrl_info->event_work);
2858 }
2859
2860 return num_events;
2861}
2862
2863static irqreturn_t pqi_irq_handler(int irq, void *data)
2864{
2865 struct pqi_ctrl_info *ctrl_info;
2866 struct pqi_queue_group *queue_group;
2867 unsigned int num_responses_handled;
2868
2869 queue_group = data;
2870 ctrl_info = queue_group->ctrl_info;
2871
2872 if (!ctrl_info || !queue_group->oq_ci)
2873 return IRQ_NONE;
2874
2875 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2876
2877 if (irq == ctrl_info->event_irq)
2878 num_responses_handled += pqi_process_event_intr(ctrl_info);
2879
2880 if (num_responses_handled)
2881 atomic_inc(&ctrl_info->num_interrupts);
2882
2883 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2884 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2885
2886 return IRQ_HANDLED;
2887}
2888
2889static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2890{
52198226 2891 struct pci_dev *pdev = ctrl_info->pci_dev;
6c223761
KB
2892 int i;
2893 int rc;
2894
52198226 2895 ctrl_info->event_irq = pci_irq_vector(pdev, 0);
6c223761
KB
2896
2897 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
52198226
CH
2898 rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
2899 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 2900 if (rc) {
52198226 2901 dev_err(&pdev->dev,
6c223761 2902 "irq %u init failed with error %d\n",
52198226 2903 pci_irq_vector(pdev, i), rc);
6c223761
KB
2904 return rc;
2905 }
2906 ctrl_info->num_msix_vectors_initialized++;
2907 }
2908
2909 return 0;
2910}
2911
6c223761
KB
2912static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2913{
52198226 2914 int ret;
6c223761 2915
52198226
CH
2916 ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
2917 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
2918 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
2919 if (ret < 0) {
6c223761 2920 dev_err(&ctrl_info->pci_dev->dev,
52198226
CH
2921 "MSI-X init failed with error %d\n", ret);
2922 return ret;
6c223761
KB
2923 }
2924
52198226 2925 ctrl_info->num_msix_vectors_enabled = ret;
6c223761
KB
2926 return 0;
2927}
2928
6c223761
KB
2929static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2930{
2931 unsigned int i;
2932 size_t alloc_length;
2933 size_t element_array_length_per_iq;
2934 size_t element_array_length_per_oq;
2935 void *element_array;
2936 void *next_queue_index;
2937 void *aligned_pointer;
2938 unsigned int num_inbound_queues;
2939 unsigned int num_outbound_queues;
2940 unsigned int num_queue_indexes;
2941 struct pqi_queue_group *queue_group;
2942
2943 element_array_length_per_iq =
2944 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
2945 ctrl_info->num_elements_per_iq;
2946 element_array_length_per_oq =
2947 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
2948 ctrl_info->num_elements_per_oq;
2949 num_inbound_queues = ctrl_info->num_queue_groups * 2;
2950 num_outbound_queues = ctrl_info->num_queue_groups;
2951 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
2952
2953 aligned_pointer = NULL;
2954
2955 for (i = 0; i < num_inbound_queues; i++) {
2956 aligned_pointer = PTR_ALIGN(aligned_pointer,
2957 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
2958 aligned_pointer += element_array_length_per_iq;
2959 }
2960
2961 for (i = 0; i < num_outbound_queues; i++) {
2962 aligned_pointer = PTR_ALIGN(aligned_pointer,
2963 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
2964 aligned_pointer += element_array_length_per_oq;
2965 }
2966
2967 aligned_pointer = PTR_ALIGN(aligned_pointer,
2968 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
2969 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
2970 PQI_EVENT_OQ_ELEMENT_LENGTH;
2971
2972 for (i = 0; i < num_queue_indexes; i++) {
2973 aligned_pointer = PTR_ALIGN(aligned_pointer,
2974 PQI_OPERATIONAL_INDEX_ALIGNMENT);
2975 aligned_pointer += sizeof(pqi_index_t);
2976 }
2977
2978 alloc_length = (size_t)aligned_pointer +
2979 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
2980
2981 ctrl_info->queue_memory_base =
2982 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
2983 alloc_length,
2984 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
2985
2986 if (!ctrl_info->queue_memory_base) {
2987 dev_err(&ctrl_info->pci_dev->dev,
2988 "failed to allocate memory for PQI admin queues\n");
2989 return -ENOMEM;
2990 }
2991
2992 ctrl_info->queue_memory_length = alloc_length;
2993
2994 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
2995 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
2996
2997 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2998 queue_group = &ctrl_info->queue_groups[i];
2999 queue_group->iq_element_array[RAID_PATH] = element_array;
3000 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3001 ctrl_info->queue_memory_base_dma_handle +
3002 (element_array - ctrl_info->queue_memory_base);
3003 element_array += element_array_length_per_iq;
3004 element_array = PTR_ALIGN(element_array,
3005 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3006 queue_group->iq_element_array[AIO_PATH] = element_array;
3007 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3008 ctrl_info->queue_memory_base_dma_handle +
3009 (element_array - ctrl_info->queue_memory_base);
3010 element_array += element_array_length_per_iq;
3011 element_array = PTR_ALIGN(element_array,
3012 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3013 }
3014
3015 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3016 queue_group = &ctrl_info->queue_groups[i];
3017 queue_group->oq_element_array = element_array;
3018 queue_group->oq_element_array_bus_addr =
3019 ctrl_info->queue_memory_base_dma_handle +
3020 (element_array - ctrl_info->queue_memory_base);
3021 element_array += element_array_length_per_oq;
3022 element_array = PTR_ALIGN(element_array,
3023 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3024 }
3025
3026 ctrl_info->event_queue.oq_element_array = element_array;
3027 ctrl_info->event_queue.oq_element_array_bus_addr =
3028 ctrl_info->queue_memory_base_dma_handle +
3029 (element_array - ctrl_info->queue_memory_base);
3030 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3031 PQI_EVENT_OQ_ELEMENT_LENGTH;
3032
3033 next_queue_index = PTR_ALIGN(element_array,
3034 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3035
3036 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3037 queue_group = &ctrl_info->queue_groups[i];
3038 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3039 queue_group->iq_ci_bus_addr[RAID_PATH] =
3040 ctrl_info->queue_memory_base_dma_handle +
3041 (next_queue_index - ctrl_info->queue_memory_base);
3042 next_queue_index += sizeof(pqi_index_t);
3043 next_queue_index = PTR_ALIGN(next_queue_index,
3044 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3045 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3046 queue_group->iq_ci_bus_addr[AIO_PATH] =
3047 ctrl_info->queue_memory_base_dma_handle +
3048 (next_queue_index - ctrl_info->queue_memory_base);
3049 next_queue_index += sizeof(pqi_index_t);
3050 next_queue_index = PTR_ALIGN(next_queue_index,
3051 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3052 queue_group->oq_pi = next_queue_index;
3053 queue_group->oq_pi_bus_addr =
3054 ctrl_info->queue_memory_base_dma_handle +
3055 (next_queue_index - ctrl_info->queue_memory_base);
3056 next_queue_index += sizeof(pqi_index_t);
3057 next_queue_index = PTR_ALIGN(next_queue_index,
3058 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3059 }
3060
3061 ctrl_info->event_queue.oq_pi = next_queue_index;
3062 ctrl_info->event_queue.oq_pi_bus_addr =
3063 ctrl_info->queue_memory_base_dma_handle +
3064 (next_queue_index - ctrl_info->queue_memory_base);
3065
3066 return 0;
3067}
3068
3069static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3070{
3071 unsigned int i;
3072 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3073 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3074
3075 /*
3076 * Initialize the backpointers to the controller structure in
3077 * each operational queue group structure.
3078 */
3079 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3080 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3081
3082 /*
3083 * Assign IDs to all operational queues. Note that the IDs
3084 * assigned to operational IQs are independent of the IDs
3085 * assigned to operational OQs.
3086 */
3087 ctrl_info->event_queue.oq_id = next_oq_id++;
3088 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3089 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3090 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3091 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3092 }
3093
3094 /*
3095 * Assign MSI-X table entry indexes to all queues. Note that the
3096 * interrupt for the event queue is shared with the first queue group.
3097 */
3098 ctrl_info->event_queue.int_msg_num = 0;
3099 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3100 ctrl_info->queue_groups[i].int_msg_num = i;
3101
3102 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3103 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3104 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3105 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3106 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3107 }
3108}
3109
3110static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3111{
3112 size_t alloc_length;
3113 struct pqi_admin_queues_aligned *admin_queues_aligned;
3114 struct pqi_admin_queues *admin_queues;
3115
3116 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3117 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3118
3119 ctrl_info->admin_queue_memory_base =
3120 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3121 alloc_length,
3122 &ctrl_info->admin_queue_memory_base_dma_handle,
3123 GFP_KERNEL);
3124
3125 if (!ctrl_info->admin_queue_memory_base)
3126 return -ENOMEM;
3127
3128 ctrl_info->admin_queue_memory_length = alloc_length;
3129
3130 admin_queues = &ctrl_info->admin_queues;
3131 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3132 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3133 admin_queues->iq_element_array =
3134 &admin_queues_aligned->iq_element_array;
3135 admin_queues->oq_element_array =
3136 &admin_queues_aligned->oq_element_array;
3137 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3138 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3139
3140 admin_queues->iq_element_array_bus_addr =
3141 ctrl_info->admin_queue_memory_base_dma_handle +
3142 (admin_queues->iq_element_array -
3143 ctrl_info->admin_queue_memory_base);
3144 admin_queues->oq_element_array_bus_addr =
3145 ctrl_info->admin_queue_memory_base_dma_handle +
3146 (admin_queues->oq_element_array -
3147 ctrl_info->admin_queue_memory_base);
3148 admin_queues->iq_ci_bus_addr =
3149 ctrl_info->admin_queue_memory_base_dma_handle +
3150 ((void *)admin_queues->iq_ci -
3151 ctrl_info->admin_queue_memory_base);
3152 admin_queues->oq_pi_bus_addr =
3153 ctrl_info->admin_queue_memory_base_dma_handle +
3154 ((void *)admin_queues->oq_pi -
3155 ctrl_info->admin_queue_memory_base);
3156
3157 return 0;
3158}
3159
3160#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3161#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3162
3163static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3164{
3165 struct pqi_device_registers __iomem *pqi_registers;
3166 struct pqi_admin_queues *admin_queues;
3167 unsigned long timeout;
3168 u8 status;
3169 u32 reg;
3170
3171 pqi_registers = ctrl_info->pqi_registers;
3172 admin_queues = &ctrl_info->admin_queues;
3173
3174 writeq((u64)admin_queues->iq_element_array_bus_addr,
3175 &pqi_registers->admin_iq_element_array_addr);
3176 writeq((u64)admin_queues->oq_element_array_bus_addr,
3177 &pqi_registers->admin_oq_element_array_addr);
3178 writeq((u64)admin_queues->iq_ci_bus_addr,
3179 &pqi_registers->admin_iq_ci_addr);
3180 writeq((u64)admin_queues->oq_pi_bus_addr,
3181 &pqi_registers->admin_oq_pi_addr);
3182
3183 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3184 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3185 (admin_queues->int_msg_num << 16);
3186 writel(reg, &pqi_registers->admin_iq_num_elements);
3187 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3188 &pqi_registers->function_and_status_code);
3189
3190 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3191 while (1) {
3192 status = readb(&pqi_registers->function_and_status_code);
3193 if (status == PQI_STATUS_IDLE)
3194 break;
3195 if (time_after(jiffies, timeout))
3196 return -ETIMEDOUT;
3197 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3198 }
3199
3200 /*
3201 * The offset registers are not initialized to the correct
3202 * offsets until *after* the create admin queue pair command
3203 * completes successfully.
3204 */
3205 admin_queues->iq_pi = ctrl_info->iomem_base +
3206 PQI_DEVICE_REGISTERS_OFFSET +
3207 readq(&pqi_registers->admin_iq_pi_offset);
3208 admin_queues->oq_ci = ctrl_info->iomem_base +
3209 PQI_DEVICE_REGISTERS_OFFSET +
3210 readq(&pqi_registers->admin_oq_ci_offset);
3211
3212 return 0;
3213}
3214
3215static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3216 struct pqi_general_admin_request *request)
3217{
3218 struct pqi_admin_queues *admin_queues;
3219 void *next_element;
3220 pqi_index_t iq_pi;
3221
3222 admin_queues = &ctrl_info->admin_queues;
3223 iq_pi = admin_queues->iq_pi_copy;
3224
3225 next_element = admin_queues->iq_element_array +
3226 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3227
3228 memcpy(next_element, request, sizeof(*request));
3229
3230 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3231 admin_queues->iq_pi_copy = iq_pi;
3232
3233 /*
3234 * This write notifies the controller that an IU is available to be
3235 * processed.
3236 */
3237 writel(iq_pi, admin_queues->iq_pi);
3238}
3239
3240static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3241 struct pqi_general_admin_response *response)
3242{
3243 struct pqi_admin_queues *admin_queues;
3244 pqi_index_t oq_pi;
3245 pqi_index_t oq_ci;
3246 unsigned long timeout;
3247
3248 admin_queues = &ctrl_info->admin_queues;
3249 oq_ci = admin_queues->oq_ci_copy;
3250
3251 timeout = (3 * HZ) + jiffies;
3252
3253 while (1) {
3254 oq_pi = *admin_queues->oq_pi;
3255 if (oq_pi != oq_ci)
3256 break;
3257 if (time_after(jiffies, timeout)) {
3258 dev_err(&ctrl_info->pci_dev->dev,
3259 "timed out waiting for admin response\n");
3260 return -ETIMEDOUT;
3261 }
3262 usleep_range(1000, 2000);
3263 }
3264
3265 memcpy(response, admin_queues->oq_element_array +
3266 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3267
3268 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3269 admin_queues->oq_ci_copy = oq_ci;
3270 writel(oq_ci, admin_queues->oq_ci);
3271
3272 return 0;
3273}
3274
3275static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3276 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3277 struct pqi_io_request *io_request)
3278{
3279 struct pqi_io_request *next;
3280 void *next_element;
3281 pqi_index_t iq_pi;
3282 pqi_index_t iq_ci;
3283 size_t iu_length;
3284 unsigned long flags;
3285 unsigned int num_elements_needed;
3286 unsigned int num_elements_to_end_of_queue;
3287 size_t copy_count;
3288 struct pqi_iu_header *request;
3289
3290 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3291
3292 if (io_request)
3293 list_add_tail(&io_request->request_list_entry,
3294 &queue_group->request_list[path]);
3295
3296 iq_pi = queue_group->iq_pi_copy[path];
3297
3298 list_for_each_entry_safe(io_request, next,
3299 &queue_group->request_list[path], request_list_entry) {
3300
3301 request = io_request->iu;
3302
3303 iu_length = get_unaligned_le16(&request->iu_length) +
3304 PQI_REQUEST_HEADER_LENGTH;
3305 num_elements_needed =
3306 DIV_ROUND_UP(iu_length,
3307 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3308
3309 iq_ci = *queue_group->iq_ci[path];
3310
3311 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3312 ctrl_info->num_elements_per_iq))
3313 break;
3314
3315 put_unaligned_le16(queue_group->oq_id,
3316 &request->response_queue_id);
3317
3318 next_element = queue_group->iq_element_array[path] +
3319 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3320
3321 num_elements_to_end_of_queue =
3322 ctrl_info->num_elements_per_iq - iq_pi;
3323
3324 if (num_elements_needed <= num_elements_to_end_of_queue) {
3325 memcpy(next_element, request, iu_length);
3326 } else {
3327 copy_count = num_elements_to_end_of_queue *
3328 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3329 memcpy(next_element, request, copy_count);
3330 memcpy(queue_group->iq_element_array[path],
3331 (u8 *)request + copy_count,
3332 iu_length - copy_count);
3333 }
3334
3335 iq_pi = (iq_pi + num_elements_needed) %
3336 ctrl_info->num_elements_per_iq;
3337
3338 list_del(&io_request->request_list_entry);
3339 }
3340
3341 if (iq_pi != queue_group->iq_pi_copy[path]) {
3342 queue_group->iq_pi_copy[path] = iq_pi;
3343 /*
3344 * This write notifies the controller that one or more IUs are
3345 * available to be processed.
3346 */
3347 writel(iq_pi, queue_group->iq_pi[path]);
3348 }
3349
3350 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3351}
3352
3353static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3354 void *context)
3355{
3356 struct completion *waiting = context;
3357
3358 complete(waiting);
3359}
3360
3361static int pqi_submit_raid_request_synchronous_with_io_request(
3362 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3363 unsigned long timeout_msecs)
3364{
3365 int rc = 0;
3366 DECLARE_COMPLETION_ONSTACK(wait);
3367
3368 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3369 io_request->context = &wait;
3370
3371 pqi_start_io(ctrl_info,
3372 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3373 io_request);
3374
3375 if (timeout_msecs == NO_TIMEOUT) {
3376 wait_for_completion_io(&wait);
3377 } else {
3378 if (!wait_for_completion_io_timeout(&wait,
3379 msecs_to_jiffies(timeout_msecs))) {
3380 dev_warn(&ctrl_info->pci_dev->dev,
3381 "command timed out\n");
3382 rc = -ETIMEDOUT;
3383 }
3384 }
3385
3386 return rc;
3387}
3388
3389static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3390 struct pqi_iu_header *request, unsigned int flags,
3391 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3392{
3393 int rc;
3394 struct pqi_io_request *io_request;
3395 unsigned long start_jiffies;
3396 unsigned long msecs_blocked;
3397 size_t iu_length;
3398
3399 /*
3400 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3401 * are mutually exclusive.
3402 */
3403
3404 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3405 if (down_interruptible(&ctrl_info->sync_request_sem))
3406 return -ERESTARTSYS;
3407 } else {
3408 if (timeout_msecs == NO_TIMEOUT) {
3409 down(&ctrl_info->sync_request_sem);
3410 } else {
3411 start_jiffies = jiffies;
3412 if (down_timeout(&ctrl_info->sync_request_sem,
3413 msecs_to_jiffies(timeout_msecs)))
3414 return -ETIMEDOUT;
3415 msecs_blocked =
3416 jiffies_to_msecs(jiffies - start_jiffies);
3417 if (msecs_blocked >= timeout_msecs)
3418 return -ETIMEDOUT;
3419 timeout_msecs -= msecs_blocked;
3420 }
3421 }
3422
3423 io_request = pqi_alloc_io_request(ctrl_info);
3424
3425 put_unaligned_le16(io_request->index,
3426 &(((struct pqi_raid_path_request *)request)->request_id));
3427
3428 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3429 ((struct pqi_raid_path_request *)request)->error_index =
3430 ((struct pqi_raid_path_request *)request)->request_id;
3431
3432 iu_length = get_unaligned_le16(&request->iu_length) +
3433 PQI_REQUEST_HEADER_LENGTH;
3434 memcpy(io_request->iu, request, iu_length);
3435
3436 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3437 io_request, timeout_msecs);
3438
3439 if (error_info) {
3440 if (io_request->error_info)
3441 memcpy(error_info, io_request->error_info,
3442 sizeof(*error_info));
3443 else
3444 memset(error_info, 0, sizeof(*error_info));
3445 } else if (rc == 0 && io_request->error_info) {
3446 u8 scsi_status;
3447 struct pqi_raid_error_info *raid_error_info;
3448
3449 raid_error_info = io_request->error_info;
3450 scsi_status = raid_error_info->status;
3451
3452 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3453 raid_error_info->data_out_result ==
3454 PQI_DATA_IN_OUT_UNDERFLOW)
3455 scsi_status = SAM_STAT_GOOD;
3456
3457 if (scsi_status != SAM_STAT_GOOD)
3458 rc = -EIO;
3459 }
3460
3461 pqi_free_io_request(io_request);
3462
3463 up(&ctrl_info->sync_request_sem);
3464
3465 return rc;
3466}
3467
3468static int pqi_validate_admin_response(
3469 struct pqi_general_admin_response *response, u8 expected_function_code)
3470{
3471 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3472 return -EINVAL;
3473
3474 if (get_unaligned_le16(&response->header.iu_length) !=
3475 PQI_GENERAL_ADMIN_IU_LENGTH)
3476 return -EINVAL;
3477
3478 if (response->function_code != expected_function_code)
3479 return -EINVAL;
3480
3481 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3482 return -EINVAL;
3483
3484 return 0;
3485}
3486
3487static int pqi_submit_admin_request_synchronous(
3488 struct pqi_ctrl_info *ctrl_info,
3489 struct pqi_general_admin_request *request,
3490 struct pqi_general_admin_response *response)
3491{
3492 int rc;
3493
3494 pqi_submit_admin_request(ctrl_info, request);
3495
3496 rc = pqi_poll_for_admin_response(ctrl_info, response);
3497
3498 if (rc == 0)
3499 rc = pqi_validate_admin_response(response,
3500 request->function_code);
3501
3502 return rc;
3503}
3504
3505static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3506{
3507 int rc;
3508 struct pqi_general_admin_request request;
3509 struct pqi_general_admin_response response;
3510 struct pqi_device_capability *capability;
3511 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3512
3513 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3514 if (!capability)
3515 return -ENOMEM;
3516
3517 memset(&request, 0, sizeof(request));
3518
3519 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3520 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3521 &request.header.iu_length);
3522 request.function_code =
3523 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3524 put_unaligned_le32(sizeof(*capability),
3525 &request.data.report_device_capability.buffer_length);
3526
3527 rc = pqi_map_single(ctrl_info->pci_dev,
3528 &request.data.report_device_capability.sg_descriptor,
3529 capability, sizeof(*capability),
3530 PCI_DMA_FROMDEVICE);
3531 if (rc)
3532 goto out;
3533
3534 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3535 &response);
3536
3537 pqi_pci_unmap(ctrl_info->pci_dev,
3538 &request.data.report_device_capability.sg_descriptor, 1,
3539 PCI_DMA_FROMDEVICE);
3540
3541 if (rc)
3542 goto out;
3543
3544 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3545 rc = -EIO;
3546 goto out;
3547 }
3548
3549 ctrl_info->max_inbound_queues =
3550 get_unaligned_le16(&capability->max_inbound_queues);
3551 ctrl_info->max_elements_per_iq =
3552 get_unaligned_le16(&capability->max_elements_per_iq);
3553 ctrl_info->max_iq_element_length =
3554 get_unaligned_le16(&capability->max_iq_element_length)
3555 * 16;
3556 ctrl_info->max_outbound_queues =
3557 get_unaligned_le16(&capability->max_outbound_queues);
3558 ctrl_info->max_elements_per_oq =
3559 get_unaligned_le16(&capability->max_elements_per_oq);
3560 ctrl_info->max_oq_element_length =
3561 get_unaligned_le16(&capability->max_oq_element_length)
3562 * 16;
3563
3564 sop_iu_layer_descriptor =
3565 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3566
3567 ctrl_info->max_inbound_iu_length_per_firmware =
3568 get_unaligned_le16(
3569 &sop_iu_layer_descriptor->max_inbound_iu_length);
3570 ctrl_info->inbound_spanning_supported =
3571 sop_iu_layer_descriptor->inbound_spanning_supported;
3572 ctrl_info->outbound_spanning_supported =
3573 sop_iu_layer_descriptor->outbound_spanning_supported;
3574
3575out:
3576 kfree(capability);
3577
3578 return rc;
3579}
3580
3581static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3582{
3583 if (ctrl_info->max_iq_element_length <
3584 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3585 dev_err(&ctrl_info->pci_dev->dev,
3586 "max. inbound queue element length of %d is less than the required length of %d\n",
3587 ctrl_info->max_iq_element_length,
3588 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3589 return -EINVAL;
3590 }
3591
3592 if (ctrl_info->max_oq_element_length <
3593 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3594 dev_err(&ctrl_info->pci_dev->dev,
3595 "max. outbound queue element length of %d is less than the required length of %d\n",
3596 ctrl_info->max_oq_element_length,
3597 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3598 return -EINVAL;
3599 }
3600
3601 if (ctrl_info->max_inbound_iu_length_per_firmware <
3602 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3603 dev_err(&ctrl_info->pci_dev->dev,
3604 "max. inbound IU length of %u is less than the min. required length of %d\n",
3605 ctrl_info->max_inbound_iu_length_per_firmware,
3606 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3607 return -EINVAL;
3608 }
3609
77668f41
KB
3610 if (!ctrl_info->inbound_spanning_supported) {
3611 dev_err(&ctrl_info->pci_dev->dev,
3612 "the controller does not support inbound spanning\n");
3613 return -EINVAL;
3614 }
3615
3616 if (ctrl_info->outbound_spanning_supported) {
3617 dev_err(&ctrl_info->pci_dev->dev,
3618 "the controller supports outbound spanning but this driver does not\n");
3619 return -EINVAL;
3620 }
3621
6c223761
KB
3622 return 0;
3623}
3624
3625static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3626 bool inbound_queue, u16 queue_id)
3627{
3628 struct pqi_general_admin_request request;
3629 struct pqi_general_admin_response response;
3630
3631 memset(&request, 0, sizeof(request));
3632 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3633 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3634 &request.header.iu_length);
3635 if (inbound_queue)
3636 request.function_code =
3637 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3638 else
3639 request.function_code =
3640 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3641 put_unaligned_le16(queue_id,
3642 &request.data.delete_operational_queue.queue_id);
3643
3644 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3645 &response);
3646}
3647
3648static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3649{
3650 int rc;
3651 struct pqi_event_queue *event_queue;
3652 struct pqi_general_admin_request request;
3653 struct pqi_general_admin_response response;
3654
3655 event_queue = &ctrl_info->event_queue;
3656
3657 /*
3658 * Create OQ (Outbound Queue - device to host queue) to dedicate
3659 * to events.
3660 */
3661 memset(&request, 0, sizeof(request));
3662 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3663 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3664 &request.header.iu_length);
3665 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3666 put_unaligned_le16(event_queue->oq_id,
3667 &request.data.create_operational_oq.queue_id);
3668 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3669 &request.data.create_operational_oq.element_array_addr);
3670 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3671 &request.data.create_operational_oq.pi_addr);
3672 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3673 &request.data.create_operational_oq.num_elements);
3674 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3675 &request.data.create_operational_oq.element_length);
3676 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3677 put_unaligned_le16(event_queue->int_msg_num,
3678 &request.data.create_operational_oq.int_msg_num);
3679
3680 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3681 &response);
3682 if (rc)
3683 return rc;
3684
3685 event_queue->oq_ci = ctrl_info->iomem_base +
3686 PQI_DEVICE_REGISTERS_OFFSET +
3687 get_unaligned_le64(
3688 &response.data.create_operational_oq.oq_ci_offset);
3689
3690 return 0;
3691}
3692
3693static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3694{
3695 unsigned int i;
3696 int rc;
3697 struct pqi_queue_group *queue_group;
3698 struct pqi_general_admin_request request;
3699 struct pqi_general_admin_response response;
3700
3701 i = ctrl_info->num_active_queue_groups;
3702 queue_group = &ctrl_info->queue_groups[i];
3703
3704 /*
3705 * Create IQ (Inbound Queue - host to device queue) for
3706 * RAID path.
3707 */
3708 memset(&request, 0, sizeof(request));
3709 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3710 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3711 &request.header.iu_length);
3712 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3713 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3714 &request.data.create_operational_iq.queue_id);
3715 put_unaligned_le64(
3716 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3717 &request.data.create_operational_iq.element_array_addr);
3718 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3719 &request.data.create_operational_iq.ci_addr);
3720 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3721 &request.data.create_operational_iq.num_elements);
3722 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3723 &request.data.create_operational_iq.element_length);
3724 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3725
3726 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3727 &response);
3728 if (rc) {
3729 dev_err(&ctrl_info->pci_dev->dev,
3730 "error creating inbound RAID queue\n");
3731 return rc;
3732 }
3733
3734 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3735 PQI_DEVICE_REGISTERS_OFFSET +
3736 get_unaligned_le64(
3737 &response.data.create_operational_iq.iq_pi_offset);
3738
3739 /*
3740 * Create IQ (Inbound Queue - host to device queue) for
3741 * Advanced I/O (AIO) path.
3742 */
3743 memset(&request, 0, sizeof(request));
3744 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3745 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3746 &request.header.iu_length);
3747 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3748 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3749 &request.data.create_operational_iq.queue_id);
3750 put_unaligned_le64((u64)queue_group->
3751 iq_element_array_bus_addr[AIO_PATH],
3752 &request.data.create_operational_iq.element_array_addr);
3753 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3754 &request.data.create_operational_iq.ci_addr);
3755 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3756 &request.data.create_operational_iq.num_elements);
3757 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3758 &request.data.create_operational_iq.element_length);
3759 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3760
3761 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3762 &response);
3763 if (rc) {
3764 dev_err(&ctrl_info->pci_dev->dev,
3765 "error creating inbound AIO queue\n");
3766 goto delete_inbound_queue_raid;
3767 }
3768
3769 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3770 PQI_DEVICE_REGISTERS_OFFSET +
3771 get_unaligned_le64(
3772 &response.data.create_operational_iq.iq_pi_offset);
3773
3774 /*
3775 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3776 * assumed to be for RAID path I/O unless we change the queue's
3777 * property.
3778 */
3779 memset(&request, 0, sizeof(request));
3780 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3781 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3782 &request.header.iu_length);
3783 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3784 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3785 &request.data.change_operational_iq_properties.queue_id);
3786 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3787 &request.data.change_operational_iq_properties.vendor_specific);
3788
3789 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3790 &response);
3791 if (rc) {
3792 dev_err(&ctrl_info->pci_dev->dev,
3793 "error changing queue property\n");
3794 goto delete_inbound_queue_aio;
3795 }
3796
3797 /*
3798 * Create OQ (Outbound Queue - device to host queue).
3799 */
3800 memset(&request, 0, sizeof(request));
3801 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3802 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3803 &request.header.iu_length);
3804 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3805 put_unaligned_le16(queue_group->oq_id,
3806 &request.data.create_operational_oq.queue_id);
3807 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3808 &request.data.create_operational_oq.element_array_addr);
3809 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3810 &request.data.create_operational_oq.pi_addr);
3811 put_unaligned_le16(ctrl_info->num_elements_per_oq,
3812 &request.data.create_operational_oq.num_elements);
3813 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3814 &request.data.create_operational_oq.element_length);
3815 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3816 put_unaligned_le16(queue_group->int_msg_num,
3817 &request.data.create_operational_oq.int_msg_num);
3818
3819 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3820 &response);
3821 if (rc) {
3822 dev_err(&ctrl_info->pci_dev->dev,
3823 "error creating outbound queue\n");
3824 goto delete_inbound_queue_aio;
3825 }
3826
3827 queue_group->oq_ci = ctrl_info->iomem_base +
3828 PQI_DEVICE_REGISTERS_OFFSET +
3829 get_unaligned_le64(
3830 &response.data.create_operational_oq.oq_ci_offset);
3831
3832 ctrl_info->num_active_queue_groups++;
3833
3834 return 0;
3835
3836delete_inbound_queue_aio:
3837 pqi_delete_operational_queue(ctrl_info, true,
3838 queue_group->iq_id[AIO_PATH]);
3839
3840delete_inbound_queue_raid:
3841 pqi_delete_operational_queue(ctrl_info, true,
3842 queue_group->iq_id[RAID_PATH]);
3843
3844 return rc;
3845}
3846
3847static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3848{
3849 int rc;
3850 unsigned int i;
3851
3852 rc = pqi_create_event_queue(ctrl_info);
3853 if (rc) {
3854 dev_err(&ctrl_info->pci_dev->dev,
3855 "error creating event queue\n");
3856 return rc;
3857 }
3858
3859 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3860 rc = pqi_create_queue_group(ctrl_info);
3861 if (rc) {
3862 dev_err(&ctrl_info->pci_dev->dev,
3863 "error creating queue group number %u/%u\n",
3864 i, ctrl_info->num_queue_groups);
3865 return rc;
3866 }
3867 }
3868
3869 return 0;
3870}
3871
3872#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3873 (offsetof(struct pqi_event_config, descriptors) + \
3874 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3875
3876static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3877{
3878 int rc;
3879 unsigned int i;
3880 struct pqi_event_config *event_config;
3881 struct pqi_general_management_request request;
3882
3883 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3884 GFP_KERNEL);
3885 if (!event_config)
3886 return -ENOMEM;
3887
3888 memset(&request, 0, sizeof(request));
3889
3890 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3891 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3892 data.report_event_configuration.sg_descriptors[1]) -
3893 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3894 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3895 &request.data.report_event_configuration.buffer_length);
3896
3897 rc = pqi_map_single(ctrl_info->pci_dev,
3898 request.data.report_event_configuration.sg_descriptors,
3899 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3900 PCI_DMA_FROMDEVICE);
3901 if (rc)
3902 goto out;
3903
3904 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3905 0, NULL, NO_TIMEOUT);
3906
3907 pqi_pci_unmap(ctrl_info->pci_dev,
3908 request.data.report_event_configuration.sg_descriptors, 1,
3909 PCI_DMA_FROMDEVICE);
3910
3911 if (rc)
3912 goto out;
3913
3914 for (i = 0; i < event_config->num_event_descriptors; i++)
3915 put_unaligned_le16(ctrl_info->event_queue.oq_id,
3916 &event_config->descriptors[i].oq_id);
3917
3918 memset(&request, 0, sizeof(request));
3919
3920 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3921 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3922 data.report_event_configuration.sg_descriptors[1]) -
3923 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3924 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3925 &request.data.report_event_configuration.buffer_length);
3926
3927 rc = pqi_map_single(ctrl_info->pci_dev,
3928 request.data.report_event_configuration.sg_descriptors,
3929 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3930 PCI_DMA_TODEVICE);
3931 if (rc)
3932 goto out;
3933
3934 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
3935 NULL, NO_TIMEOUT);
3936
3937 pqi_pci_unmap(ctrl_info->pci_dev,
3938 request.data.report_event_configuration.sg_descriptors, 1,
3939 PCI_DMA_TODEVICE);
3940
3941out:
3942 kfree(event_config);
3943
3944 return rc;
3945}
3946
3947static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
3948{
3949 unsigned int i;
3950 struct device *dev;
3951 size_t sg_chain_buffer_length;
3952 struct pqi_io_request *io_request;
3953
3954 if (!ctrl_info->io_request_pool)
3955 return;
3956
3957 dev = &ctrl_info->pci_dev->dev;
3958 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
3959 io_request = ctrl_info->io_request_pool;
3960
3961 for (i = 0; i < ctrl_info->max_io_slots; i++) {
3962 kfree(io_request->iu);
3963 if (!io_request->sg_chain_buffer)
3964 break;
3965 dma_free_coherent(dev, sg_chain_buffer_length,
3966 io_request->sg_chain_buffer,
3967 io_request->sg_chain_buffer_dma_handle);
3968 io_request++;
3969 }
3970
3971 kfree(ctrl_info->io_request_pool);
3972 ctrl_info->io_request_pool = NULL;
3973}
3974
3975static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
3976{
3977 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3978 ctrl_info->error_buffer_length,
3979 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
3980
3981 if (!ctrl_info->error_buffer)
3982 return -ENOMEM;
3983
3984 return 0;
3985}
3986
3987static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
3988{
3989 unsigned int i;
3990 void *sg_chain_buffer;
3991 size_t sg_chain_buffer_length;
3992 dma_addr_t sg_chain_buffer_dma_handle;
3993 struct device *dev;
3994 struct pqi_io_request *io_request;
3995
3996 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
3997 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
3998
3999 if (!ctrl_info->io_request_pool) {
4000 dev_err(&ctrl_info->pci_dev->dev,
4001 "failed to allocate I/O request pool\n");
4002 goto error;
4003 }
4004
4005 dev = &ctrl_info->pci_dev->dev;
4006 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4007 io_request = ctrl_info->io_request_pool;
4008
4009 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4010 io_request->iu =
4011 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4012
4013 if (!io_request->iu) {
4014 dev_err(&ctrl_info->pci_dev->dev,
4015 "failed to allocate IU buffers\n");
4016 goto error;
4017 }
4018
4019 sg_chain_buffer = dma_alloc_coherent(dev,
4020 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4021 GFP_KERNEL);
4022
4023 if (!sg_chain_buffer) {
4024 dev_err(&ctrl_info->pci_dev->dev,
4025 "failed to allocate PQI scatter-gather chain buffers\n");
4026 goto error;
4027 }
4028
4029 io_request->index = i;
4030 io_request->sg_chain_buffer = sg_chain_buffer;
4031 io_request->sg_chain_buffer_dma_handle =
4032 sg_chain_buffer_dma_handle;
4033 io_request++;
4034 }
4035
4036 return 0;
4037
4038error:
4039 pqi_free_all_io_requests(ctrl_info);
4040
4041 return -ENOMEM;
4042}
4043
4044/*
4045 * Calculate required resources that are sized based on max. outstanding
4046 * requests and max. transfer size.
4047 */
4048
4049static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4050{
4051 u32 max_transfer_size;
4052 u32 max_sg_entries;
4053
4054 ctrl_info->scsi_ml_can_queue =
4055 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4056 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4057
4058 ctrl_info->error_buffer_length =
4059 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4060
4061 max_transfer_size =
4062 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4063
4064 max_sg_entries = max_transfer_size / PAGE_SIZE;
4065
4066 /* +1 to cover when the buffer is not page-aligned. */
4067 max_sg_entries++;
4068
4069 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4070
4071 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4072
4073 ctrl_info->sg_chain_buffer_length =
4074 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4075 ctrl_info->sg_tablesize = max_sg_entries;
4076 ctrl_info->max_sectors = max_transfer_size / 512;
4077}
4078
4079static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4080{
4081 int num_cpus;
4082 int max_queue_groups;
4083 int num_queue_groups;
4084 u16 num_elements_per_iq;
4085 u16 num_elements_per_oq;
4086
4087 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4088 ctrl_info->max_outbound_queues - 1);
4089 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4090
4091 num_cpus = num_online_cpus();
4092 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4093 num_queue_groups = min(num_queue_groups, max_queue_groups);
4094
4095 ctrl_info->num_queue_groups = num_queue_groups;
4096
77668f41
KB
4097 /*
4098 * Make sure that the max. inbound IU length is an even multiple
4099 * of our inbound element length.
4100 */
4101 ctrl_info->max_inbound_iu_length =
4102 (ctrl_info->max_inbound_iu_length_per_firmware /
4103 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4104 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
4105
4106 num_elements_per_iq =
4107 (ctrl_info->max_inbound_iu_length /
4108 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4109
4110 /* Add one because one element in each queue is unusable. */
4111 num_elements_per_iq++;
4112
4113 num_elements_per_iq = min(num_elements_per_iq,
4114 ctrl_info->max_elements_per_iq);
4115
4116 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4117 num_elements_per_oq = min(num_elements_per_oq,
4118 ctrl_info->max_elements_per_oq);
4119
4120 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4121 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4122
4123 ctrl_info->max_sg_per_iu =
4124 ((ctrl_info->max_inbound_iu_length -
4125 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4126 sizeof(struct pqi_sg_descriptor)) +
4127 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4128}
4129
4130static inline void pqi_set_sg_descriptor(
4131 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4132{
4133 u64 address = (u64)sg_dma_address(sg);
4134 unsigned int length = sg_dma_len(sg);
4135
4136 put_unaligned_le64(address, &sg_descriptor->address);
4137 put_unaligned_le32(length, &sg_descriptor->length);
4138 put_unaligned_le32(0, &sg_descriptor->flags);
4139}
4140
4141static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4142 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4143 struct pqi_io_request *io_request)
4144{
4145 int i;
4146 u16 iu_length;
4147 int sg_count;
4148 bool chained;
4149 unsigned int num_sg_in_iu;
4150 unsigned int max_sg_per_iu;
4151 struct scatterlist *sg;
4152 struct pqi_sg_descriptor *sg_descriptor;
4153
4154 sg_count = scsi_dma_map(scmd);
4155 if (sg_count < 0)
4156 return sg_count;
4157
4158 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4159 PQI_REQUEST_HEADER_LENGTH;
4160
4161 if (sg_count == 0)
4162 goto out;
4163
4164 sg = scsi_sglist(scmd);
4165 sg_descriptor = request->sg_descriptors;
4166 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4167 chained = false;
4168 num_sg_in_iu = 0;
4169 i = 0;
4170
4171 while (1) {
4172 pqi_set_sg_descriptor(sg_descriptor, sg);
4173 if (!chained)
4174 num_sg_in_iu++;
4175 i++;
4176 if (i == sg_count)
4177 break;
4178 sg_descriptor++;
4179 if (i == max_sg_per_iu) {
4180 put_unaligned_le64(
4181 (u64)io_request->sg_chain_buffer_dma_handle,
4182 &sg_descriptor->address);
4183 put_unaligned_le32((sg_count - num_sg_in_iu)
4184 * sizeof(*sg_descriptor),
4185 &sg_descriptor->length);
4186 put_unaligned_le32(CISS_SG_CHAIN,
4187 &sg_descriptor->flags);
4188 chained = true;
4189 num_sg_in_iu++;
4190 sg_descriptor = io_request->sg_chain_buffer;
4191 }
4192 sg = sg_next(sg);
4193 }
4194
4195 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4196 request->partial = chained;
4197 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4198
4199out:
4200 put_unaligned_le16(iu_length, &request->header.iu_length);
4201
4202 return 0;
4203}
4204
4205static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4206 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4207 struct pqi_io_request *io_request)
4208{
4209 int i;
4210 u16 iu_length;
4211 int sg_count;
a60eec02
KB
4212 bool chained;
4213 unsigned int num_sg_in_iu;
4214 unsigned int max_sg_per_iu;
6c223761
KB
4215 struct scatterlist *sg;
4216 struct pqi_sg_descriptor *sg_descriptor;
4217
4218 sg_count = scsi_dma_map(scmd);
4219 if (sg_count < 0)
4220 return sg_count;
a60eec02
KB
4221
4222 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4223 PQI_REQUEST_HEADER_LENGTH;
4224 num_sg_in_iu = 0;
4225
6c223761
KB
4226 if (sg_count == 0)
4227 goto out;
4228
a60eec02
KB
4229 sg = scsi_sglist(scmd);
4230 sg_descriptor = request->sg_descriptors;
4231 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4232 chained = false;
4233 i = 0;
4234
4235 while (1) {
4236 pqi_set_sg_descriptor(sg_descriptor, sg);
4237 if (!chained)
4238 num_sg_in_iu++;
4239 i++;
4240 if (i == sg_count)
4241 break;
4242 sg_descriptor++;
4243 if (i == max_sg_per_iu) {
4244 put_unaligned_le64(
4245 (u64)io_request->sg_chain_buffer_dma_handle,
4246 &sg_descriptor->address);
4247 put_unaligned_le32((sg_count - num_sg_in_iu)
4248 * sizeof(*sg_descriptor),
4249 &sg_descriptor->length);
4250 put_unaligned_le32(CISS_SG_CHAIN,
4251 &sg_descriptor->flags);
4252 chained = true;
4253 num_sg_in_iu++;
4254 sg_descriptor = io_request->sg_chain_buffer;
6c223761 4255 }
a60eec02 4256 sg = sg_next(sg);
6c223761
KB
4257 }
4258
a60eec02
KB
4259 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4260 request->partial = chained;
6c223761 4261 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
4262
4263out:
6c223761
KB
4264 put_unaligned_le16(iu_length, &request->header.iu_length);
4265 request->num_sg_descriptors = num_sg_in_iu;
4266
4267 return 0;
4268}
4269
4270static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4271 void *context)
4272{
4273 struct scsi_cmnd *scmd;
4274
4275 scmd = io_request->scmd;
4276 pqi_free_io_request(io_request);
4277 scsi_dma_unmap(scmd);
4278 pqi_scsi_done(scmd);
4279}
4280
4281static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4282 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4283 struct pqi_queue_group *queue_group)
4284{
4285 int rc;
4286 size_t cdb_length;
4287 struct pqi_io_request *io_request;
4288 struct pqi_raid_path_request *request;
4289
4290 io_request = pqi_alloc_io_request(ctrl_info);
4291 io_request->io_complete_callback = pqi_raid_io_complete;
4292 io_request->scmd = scmd;
4293
4294 scmd->host_scribble = (unsigned char *)io_request;
4295
4296 request = io_request->iu;
4297 memset(request, 0,
4298 offsetof(struct pqi_raid_path_request, sg_descriptors));
4299
4300 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4301 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4302 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4303 put_unaligned_le16(io_request->index, &request->request_id);
4304 request->error_index = request->request_id;
4305 memcpy(request->lun_number, device->scsi3addr,
4306 sizeof(request->lun_number));
4307
4308 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4309 memcpy(request->cdb, scmd->cmnd, cdb_length);
4310
4311 switch (cdb_length) {
4312 case 6:
4313 case 10:
4314 case 12:
4315 case 16:
4316 /* No bytes in the Additional CDB bytes field */
4317 request->additional_cdb_bytes_usage =
4318 SOP_ADDITIONAL_CDB_BYTES_0;
4319 break;
4320 case 20:
4321 /* 4 bytes in the Additional cdb field */
4322 request->additional_cdb_bytes_usage =
4323 SOP_ADDITIONAL_CDB_BYTES_4;
4324 break;
4325 case 24:
4326 /* 8 bytes in the Additional cdb field */
4327 request->additional_cdb_bytes_usage =
4328 SOP_ADDITIONAL_CDB_BYTES_8;
4329 break;
4330 case 28:
4331 /* 12 bytes in the Additional cdb field */
4332 request->additional_cdb_bytes_usage =
4333 SOP_ADDITIONAL_CDB_BYTES_12;
4334 break;
4335 case 32:
4336 default:
4337 /* 16 bytes in the Additional cdb field */
4338 request->additional_cdb_bytes_usage =
4339 SOP_ADDITIONAL_CDB_BYTES_16;
4340 break;
4341 }
4342
4343 switch (scmd->sc_data_direction) {
4344 case DMA_TO_DEVICE:
4345 request->data_direction = SOP_READ_FLAG;
4346 break;
4347 case DMA_FROM_DEVICE:
4348 request->data_direction = SOP_WRITE_FLAG;
4349 break;
4350 case DMA_NONE:
4351 request->data_direction = SOP_NO_DIRECTION_FLAG;
4352 break;
4353 case DMA_BIDIRECTIONAL:
4354 request->data_direction = SOP_BIDIRECTIONAL;
4355 break;
4356 default:
4357 dev_err(&ctrl_info->pci_dev->dev,
4358 "unknown data direction: %d\n",
4359 scmd->sc_data_direction);
4360 WARN_ON(scmd->sc_data_direction);
4361 break;
4362 }
4363
4364 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4365 if (rc) {
4366 pqi_free_io_request(io_request);
4367 return SCSI_MLQUEUE_HOST_BUSY;
4368 }
4369
4370 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4371
4372 return 0;
4373}
4374
4375static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4376 void *context)
4377{
4378 struct scsi_cmnd *scmd;
4379
4380 scmd = io_request->scmd;
4381 scsi_dma_unmap(scmd);
4382 if (io_request->status == -EAGAIN)
4383 set_host_byte(scmd, DID_IMM_RETRY);
4384 pqi_free_io_request(io_request);
4385 pqi_scsi_done(scmd);
4386}
4387
4388static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4389 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4390 struct pqi_queue_group *queue_group)
4391{
4392 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4393 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4394}
4395
4396static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4397 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4398 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4399 struct pqi_encryption_info *encryption_info)
4400{
4401 int rc;
4402 struct pqi_io_request *io_request;
4403 struct pqi_aio_path_request *request;
4404
4405 io_request = pqi_alloc_io_request(ctrl_info);
4406 io_request->io_complete_callback = pqi_aio_io_complete;
4407 io_request->scmd = scmd;
4408
4409 scmd->host_scribble = (unsigned char *)io_request;
4410
4411 request = io_request->iu;
4412 memset(request, 0,
4413 offsetof(struct pqi_raid_path_request, sg_descriptors));
4414
4415 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4416 put_unaligned_le32(aio_handle, &request->nexus_id);
4417 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4418 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4419 put_unaligned_le16(io_request->index, &request->request_id);
4420 request->error_index = request->request_id;
4421 if (cdb_length > sizeof(request->cdb))
4422 cdb_length = sizeof(request->cdb);
4423 request->cdb_length = cdb_length;
4424 memcpy(request->cdb, cdb, cdb_length);
4425
4426 switch (scmd->sc_data_direction) {
4427 case DMA_TO_DEVICE:
4428 request->data_direction = SOP_READ_FLAG;
4429 break;
4430 case DMA_FROM_DEVICE:
4431 request->data_direction = SOP_WRITE_FLAG;
4432 break;
4433 case DMA_NONE:
4434 request->data_direction = SOP_NO_DIRECTION_FLAG;
4435 break;
4436 case DMA_BIDIRECTIONAL:
4437 request->data_direction = SOP_BIDIRECTIONAL;
4438 break;
4439 default:
4440 dev_err(&ctrl_info->pci_dev->dev,
4441 "unknown data direction: %d\n",
4442 scmd->sc_data_direction);
4443 WARN_ON(scmd->sc_data_direction);
4444 break;
4445 }
4446
4447 if (encryption_info) {
4448 request->encryption_enable = true;
4449 put_unaligned_le16(encryption_info->data_encryption_key_index,
4450 &request->data_encryption_key_index);
4451 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4452 &request->encrypt_tweak_lower);
4453 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4454 &request->encrypt_tweak_upper);
4455 }
4456
4457 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4458 if (rc) {
4459 pqi_free_io_request(io_request);
4460 return SCSI_MLQUEUE_HOST_BUSY;
4461 }
4462
4463 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4464
4465 return 0;
4466}
4467
4468static int pqi_scsi_queue_command(struct Scsi_Host *shost,
7d81d2b8 4469 struct scsi_cmnd *scmd)
6c223761
KB
4470{
4471 int rc;
4472 struct pqi_ctrl_info *ctrl_info;
4473 struct pqi_scsi_dev *device;
4474 u16 hwq;
4475 struct pqi_queue_group *queue_group;
4476 bool raid_bypassed;
4477
4478 device = scmd->device->hostdata;
6c223761
KB
4479 ctrl_info = shost_to_hba(shost);
4480
4481 if (pqi_ctrl_offline(ctrl_info)) {
4482 set_host_byte(scmd, DID_NO_CONNECT);
4483 pqi_scsi_done(scmd);
4484 return 0;
4485 }
4486
7d81d2b8
KB
4487 /*
4488 * This is necessary because the SML doesn't zero out this field during
4489 * error recovery.
4490 */
4491 scmd->result = 0;
4492
6c223761
KB
4493 hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4494 if (hwq >= ctrl_info->num_queue_groups)
4495 hwq = 0;
4496
4497 queue_group = &ctrl_info->queue_groups[hwq];
4498
4499 if (pqi_is_logical_device(device)) {
4500 raid_bypassed = false;
4501 if (device->offload_enabled &&
57292b58 4502 !blk_rq_is_passthrough(scmd->request)) {
6c223761
KB
4503 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4504 scmd, queue_group);
4505 if (rc == 0 ||
4506 rc == SCSI_MLQUEUE_HOST_BUSY ||
4507 rc == SAM_STAT_CHECK_CONDITION ||
4508 rc == SAM_STAT_RESERVATION_CONFLICT)
4509 raid_bypassed = true;
4510 }
4511 if (!raid_bypassed)
4512 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4513 queue_group);
4514 } else {
4515 if (device->aio_enabled)
4516 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4517 queue_group);
4518 else
4519 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4520 queue_group);
4521 }
4522
4523 return rc;
4524}
4525
14bb215d
KB
4526static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4527 void *context)
6c223761 4528{
14bb215d 4529 struct completion *waiting = context;
6c223761 4530
14bb215d
KB
4531 complete(waiting);
4532}
6c223761 4533
14bb215d
KB
4534#define PQI_LUN_RESET_TIMEOUT_SECS 10
4535
4536static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4537 struct pqi_scsi_dev *device, struct completion *wait)
4538{
4539 int rc;
4540 unsigned int wait_secs = 0;
4541
4542 while (1) {
4543 if (wait_for_completion_io_timeout(wait,
4544 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4545 rc = 0;
4546 break;
6c223761
KB
4547 }
4548
14bb215d
KB
4549 pqi_check_ctrl_health(ctrl_info);
4550 if (pqi_ctrl_offline(ctrl_info)) {
4551 rc = -ETIMEDOUT;
4552 break;
4553 }
6c223761 4554
14bb215d 4555 wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
6c223761 4556
14bb215d
KB
4557 dev_err(&ctrl_info->pci_dev->dev,
4558 "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4559 ctrl_info->scsi_host->host_no, device->bus,
4560 device->target, device->lun, wait_secs);
6c223761 4561 }
6c223761 4562
14bb215d 4563 return rc;
6c223761
KB
4564}
4565
14bb215d 4566static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
4567 struct pqi_scsi_dev *device)
4568{
4569 int rc;
4570 struct pqi_io_request *io_request;
4571 DECLARE_COMPLETION_ONSTACK(wait);
4572 struct pqi_task_management_request *request;
4573
4574 down(&ctrl_info->lun_reset_sem);
4575
4576 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 4577 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
4578 io_request->context = &wait;
4579
4580 request = io_request->iu;
4581 memset(request, 0, sizeof(*request));
4582
4583 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4584 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4585 &request->header.iu_length);
4586 put_unaligned_le16(io_request->index, &request->request_id);
4587 memcpy(request->lun_number, device->scsi3addr,
4588 sizeof(request->lun_number));
4589 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4590
4591 pqi_start_io(ctrl_info,
4592 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4593 io_request);
4594
14bb215d
KB
4595 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4596 if (rc == 0)
6c223761 4597 rc = io_request->status;
6c223761
KB
4598
4599 pqi_free_io_request(io_request);
4600 up(&ctrl_info->lun_reset_sem);
4601
4602 return rc;
4603}
4604
4605/* Performs a reset at the LUN level. */
4606
4607static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4608 struct pqi_scsi_dev *device)
4609{
4610 int rc;
4611
4612 pqi_check_ctrl_health(ctrl_info);
4613 if (pqi_ctrl_offline(ctrl_info))
4614 return FAILED;
4615
14bb215d 4616 rc = pqi_lun_reset(ctrl_info, device);
6c223761 4617
14bb215d 4618 return rc == 0 ? SUCCESS : FAILED;
6c223761
KB
4619}
4620
4621static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4622{
4623 int rc;
4624 struct pqi_ctrl_info *ctrl_info;
4625 struct pqi_scsi_dev *device;
4626
4627 ctrl_info = shost_to_hba(scmd->device->host);
6c223761
KB
4628 device = scmd->device->hostdata;
4629
4630 dev_err(&ctrl_info->pci_dev->dev,
4631 "resetting scsi %d:%d:%d:%d\n",
4632 ctrl_info->scsi_host->host_no,
4633 device->bus, device->target, device->lun);
4634
4635 rc = pqi_device_reset(ctrl_info, device);
4636
4637 dev_err(&ctrl_info->pci_dev->dev,
4638 "reset of scsi %d:%d:%d:%d: %s\n",
4639 ctrl_info->scsi_host->host_no,
4640 device->bus, device->target, device->lun,
4641 rc == SUCCESS ? "SUCCESS" : "FAILED");
4642
4643 return rc;
4644}
4645
4646static int pqi_slave_alloc(struct scsi_device *sdev)
4647{
4648 struct pqi_scsi_dev *device;
4649 unsigned long flags;
4650 struct pqi_ctrl_info *ctrl_info;
4651 struct scsi_target *starget;
4652 struct sas_rphy *rphy;
4653
4654 ctrl_info = shost_to_hba(sdev->host);
4655
4656 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4657
4658 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4659 starget = scsi_target(sdev);
4660 rphy = target_to_rphy(starget);
4661 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4662 if (device) {
4663 device->target = sdev_id(sdev);
4664 device->lun = sdev->lun;
4665 device->target_lun_valid = true;
4666 }
4667 } else {
4668 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4669 sdev_id(sdev), sdev->lun);
4670 }
4671
4672 if (device && device->expose_device) {
4673 sdev->hostdata = device;
4674 device->sdev = sdev;
4675 if (device->queue_depth) {
4676 device->advertised_queue_depth = device->queue_depth;
4677 scsi_change_queue_depth(sdev,
4678 device->advertised_queue_depth);
4679 }
4680 }
4681
4682 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4683
4684 return 0;
4685}
4686
4687static int pqi_slave_configure(struct scsi_device *sdev)
4688{
4689 struct pqi_scsi_dev *device;
4690
4691 device = sdev->hostdata;
4692 if (!device->expose_device)
4693 sdev->no_uld_attach = true;
4694
4695 return 0;
4696}
4697
52198226
CH
4698static int pqi_map_queues(struct Scsi_Host *shost)
4699{
4700 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
4701
4702 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
4703}
4704
6c223761
KB
4705static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4706 void __user *arg)
4707{
4708 struct pci_dev *pci_dev;
4709 u32 subsystem_vendor;
4710 u32 subsystem_device;
4711 cciss_pci_info_struct pciinfo;
4712
4713 if (!arg)
4714 return -EINVAL;
4715
4716 pci_dev = ctrl_info->pci_dev;
4717
4718 pciinfo.domain = pci_domain_nr(pci_dev->bus);
4719 pciinfo.bus = pci_dev->bus->number;
4720 pciinfo.dev_fn = pci_dev->devfn;
4721 subsystem_vendor = pci_dev->subsystem_vendor;
4722 subsystem_device = pci_dev->subsystem_device;
4723 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4724 subsystem_vendor;
4725
4726 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4727 return -EFAULT;
4728
4729 return 0;
4730}
4731
4732static int pqi_getdrivver_ioctl(void __user *arg)
4733{
4734 u32 version;
4735
4736 if (!arg)
4737 return -EINVAL;
4738
4739 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4740 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
4741
4742 if (copy_to_user(arg, &version, sizeof(version)))
4743 return -EFAULT;
4744
4745 return 0;
4746}
4747
4748struct ciss_error_info {
4749 u8 scsi_status;
4750 int command_status;
4751 size_t sense_data_length;
4752};
4753
4754static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4755 struct ciss_error_info *ciss_error_info)
4756{
4757 int ciss_cmd_status;
4758 size_t sense_data_length;
4759
4760 switch (pqi_error_info->data_out_result) {
4761 case PQI_DATA_IN_OUT_GOOD:
4762 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4763 break;
4764 case PQI_DATA_IN_OUT_UNDERFLOW:
4765 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4766 break;
4767 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4768 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4769 break;
4770 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4771 case PQI_DATA_IN_OUT_BUFFER_ERROR:
4772 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4773 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4774 case PQI_DATA_IN_OUT_ERROR:
4775 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4776 break;
4777 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4778 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4779 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4780 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4781 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4782 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4783 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4784 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4785 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4786 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4787 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4788 break;
4789 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4790 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4791 break;
4792 case PQI_DATA_IN_OUT_ABORTED:
4793 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4794 break;
4795 case PQI_DATA_IN_OUT_TIMEOUT:
4796 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4797 break;
4798 default:
4799 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4800 break;
4801 }
4802
4803 sense_data_length =
4804 get_unaligned_le16(&pqi_error_info->sense_data_length);
4805 if (sense_data_length == 0)
4806 sense_data_length =
4807 get_unaligned_le16(&pqi_error_info->response_data_length);
4808 if (sense_data_length)
4809 if (sense_data_length > sizeof(pqi_error_info->data))
4810 sense_data_length = sizeof(pqi_error_info->data);
4811
4812 ciss_error_info->scsi_status = pqi_error_info->status;
4813 ciss_error_info->command_status = ciss_cmd_status;
4814 ciss_error_info->sense_data_length = sense_data_length;
4815}
4816
4817static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4818{
4819 int rc;
4820 char *kernel_buffer = NULL;
4821 u16 iu_length;
4822 size_t sense_data_length;
4823 IOCTL_Command_struct iocommand;
4824 struct pqi_raid_path_request request;
4825 struct pqi_raid_error_info pqi_error_info;
4826 struct ciss_error_info ciss_error_info;
4827
4828 if (pqi_ctrl_offline(ctrl_info))
4829 return -ENXIO;
4830 if (!arg)
4831 return -EINVAL;
4832 if (!capable(CAP_SYS_RAWIO))
4833 return -EPERM;
4834 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4835 return -EFAULT;
4836 if (iocommand.buf_size < 1 &&
4837 iocommand.Request.Type.Direction != XFER_NONE)
4838 return -EINVAL;
4839 if (iocommand.Request.CDBLen > sizeof(request.cdb))
4840 return -EINVAL;
4841 if (iocommand.Request.Type.Type != TYPE_CMD)
4842 return -EINVAL;
4843
4844 switch (iocommand.Request.Type.Direction) {
4845 case XFER_NONE:
4846 case XFER_WRITE:
4847 case XFER_READ:
4848 break;
4849 default:
4850 return -EINVAL;
4851 }
4852
4853 if (iocommand.buf_size > 0) {
4854 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4855 if (!kernel_buffer)
4856 return -ENOMEM;
4857 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4858 if (copy_from_user(kernel_buffer, iocommand.buf,
4859 iocommand.buf_size)) {
4860 rc = -EFAULT;
4861 goto out;
4862 }
4863 } else {
4864 memset(kernel_buffer, 0, iocommand.buf_size);
4865 }
4866 }
4867
4868 memset(&request, 0, sizeof(request));
4869
4870 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4871 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4872 PQI_REQUEST_HEADER_LENGTH;
4873 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4874 sizeof(request.lun_number));
4875 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4876 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4877
4878 switch (iocommand.Request.Type.Direction) {
4879 case XFER_NONE:
4880 request.data_direction = SOP_NO_DIRECTION_FLAG;
4881 break;
4882 case XFER_WRITE:
4883 request.data_direction = SOP_WRITE_FLAG;
4884 break;
4885 case XFER_READ:
4886 request.data_direction = SOP_READ_FLAG;
4887 break;
4888 }
4889
4890 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4891
4892 if (iocommand.buf_size > 0) {
4893 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4894
4895 rc = pqi_map_single(ctrl_info->pci_dev,
4896 &request.sg_descriptors[0], kernel_buffer,
4897 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4898 if (rc)
4899 goto out;
4900
4901 iu_length += sizeof(request.sg_descriptors[0]);
4902 }
4903
4904 put_unaligned_le16(iu_length, &request.header.iu_length);
4905
4906 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4907 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4908
4909 if (iocommand.buf_size > 0)
4910 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4911 PCI_DMA_BIDIRECTIONAL);
4912
4913 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4914
4915 if (rc == 0) {
4916 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4917 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4918 iocommand.error_info.CommandStatus =
4919 ciss_error_info.command_status;
4920 sense_data_length = ciss_error_info.sense_data_length;
4921 if (sense_data_length) {
4922 if (sense_data_length >
4923 sizeof(iocommand.error_info.SenseInfo))
4924 sense_data_length =
4925 sizeof(iocommand.error_info.SenseInfo);
4926 memcpy(iocommand.error_info.SenseInfo,
4927 pqi_error_info.data, sense_data_length);
4928 iocommand.error_info.SenseLen = sense_data_length;
4929 }
4930 }
4931
4932 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
4933 rc = -EFAULT;
4934 goto out;
4935 }
4936
4937 if (rc == 0 && iocommand.buf_size > 0 &&
4938 (iocommand.Request.Type.Direction & XFER_READ)) {
4939 if (copy_to_user(iocommand.buf, kernel_buffer,
4940 iocommand.buf_size)) {
4941 rc = -EFAULT;
4942 }
4943 }
4944
4945out:
4946 kfree(kernel_buffer);
4947
4948 return rc;
4949}
4950
4951static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4952{
4953 int rc;
4954 struct pqi_ctrl_info *ctrl_info;
4955
4956 ctrl_info = shost_to_hba(sdev->host);
4957
4958 switch (cmd) {
4959 case CCISS_DEREGDISK:
4960 case CCISS_REGNEWDISK:
4961 case CCISS_REGNEWD:
4962 rc = pqi_scan_scsi_devices(ctrl_info);
4963 break;
4964 case CCISS_GETPCIINFO:
4965 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
4966 break;
4967 case CCISS_GETDRIVVER:
4968 rc = pqi_getdrivver_ioctl(arg);
4969 break;
4970 case CCISS_PASSTHRU:
4971 rc = pqi_passthru_ioctl(ctrl_info, arg);
4972 break;
4973 default:
4974 rc = -EINVAL;
4975 break;
4976 }
4977
4978 return rc;
4979}
4980
4981static ssize_t pqi_version_show(struct device *dev,
4982 struct device_attribute *attr, char *buffer)
4983{
4984 ssize_t count = 0;
4985 struct Scsi_Host *shost;
4986 struct pqi_ctrl_info *ctrl_info;
4987
4988 shost = class_to_shost(dev);
4989 ctrl_info = shost_to_hba(shost);
4990
4991 count += snprintf(buffer + count, PAGE_SIZE - count,
4992 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
4993
4994 count += snprintf(buffer + count, PAGE_SIZE - count,
4995 "firmware: %s\n", ctrl_info->firmware_version);
4996
4997 return count;
4998}
4999
5000static ssize_t pqi_host_rescan_store(struct device *dev,
5001 struct device_attribute *attr, const char *buffer, size_t count)
5002{
5003 struct Scsi_Host *shost = class_to_shost(dev);
5004
5005 pqi_scan_start(shost);
5006
5007 return count;
5008}
5009
5010static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5011static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5012
5013static struct device_attribute *pqi_shost_attrs[] = {
5014 &dev_attr_version,
5015 &dev_attr_rescan,
5016 NULL
5017};
5018
5019static ssize_t pqi_sas_address_show(struct device *dev,
5020 struct device_attribute *attr, char *buffer)
5021{
5022 struct pqi_ctrl_info *ctrl_info;
5023 struct scsi_device *sdev;
5024 struct pqi_scsi_dev *device;
5025 unsigned long flags;
5026 u64 sas_address;
5027
5028 sdev = to_scsi_device(dev);
5029 ctrl_info = shost_to_hba(sdev->host);
5030
5031 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5032
5033 device = sdev->hostdata;
5034 if (pqi_is_logical_device(device)) {
5035 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5036 flags);
5037 return -ENODEV;
5038 }
5039 sas_address = device->sas_address;
5040
5041 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5042
5043 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5044}
5045
5046static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5047 struct device_attribute *attr, char *buffer)
5048{
5049 struct pqi_ctrl_info *ctrl_info;
5050 struct scsi_device *sdev;
5051 struct pqi_scsi_dev *device;
5052 unsigned long flags;
5053
5054 sdev = to_scsi_device(dev);
5055 ctrl_info = shost_to_hba(sdev->host);
5056
5057 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5058
5059 device = sdev->hostdata;
5060 buffer[0] = device->offload_enabled ? '1' : '0';
5061 buffer[1] = '\n';
5062 buffer[2] = '\0';
5063
5064 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5065
5066 return 2;
5067}
5068
5069static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5070static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5071 pqi_ssd_smart_path_enabled_show, NULL);
5072
5073static struct device_attribute *pqi_sdev_attrs[] = {
5074 &dev_attr_sas_address,
5075 &dev_attr_ssd_smart_path_enabled,
5076 NULL
5077};
5078
5079static struct scsi_host_template pqi_driver_template = {
5080 .module = THIS_MODULE,
5081 .name = DRIVER_NAME_SHORT,
5082 .proc_name = DRIVER_NAME_SHORT,
5083 .queuecommand = pqi_scsi_queue_command,
5084 .scan_start = pqi_scan_start,
5085 .scan_finished = pqi_scan_finished,
5086 .this_id = -1,
5087 .use_clustering = ENABLE_CLUSTERING,
5088 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5089 .ioctl = pqi_ioctl,
5090 .slave_alloc = pqi_slave_alloc,
5091 .slave_configure = pqi_slave_configure,
52198226 5092 .map_queues = pqi_map_queues,
6c223761
KB
5093 .sdev_attrs = pqi_sdev_attrs,
5094 .shost_attrs = pqi_shost_attrs,
5095};
5096
5097static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5098{
5099 int rc;
5100 struct Scsi_Host *shost;
5101
5102 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5103 if (!shost) {
5104 dev_err(&ctrl_info->pci_dev->dev,
5105 "scsi_host_alloc failed for controller %u\n",
5106 ctrl_info->ctrl_id);
5107 return -ENOMEM;
5108 }
5109
5110 shost->io_port = 0;
5111 shost->n_io_port = 0;
5112 shost->this_id = -1;
5113 shost->max_channel = PQI_MAX_BUS;
5114 shost->max_cmd_len = MAX_COMMAND_SIZE;
5115 shost->max_lun = ~0;
5116 shost->max_id = ~0;
5117 shost->max_sectors = ctrl_info->max_sectors;
5118 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5119 shost->cmd_per_lun = shost->can_queue;
5120 shost->sg_tablesize = ctrl_info->sg_tablesize;
5121 shost->transportt = pqi_sas_transport_template;
52198226 5122 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
5123 shost->unique_id = shost->irq;
5124 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5125 shost->hostdata[0] = (unsigned long)ctrl_info;
5126
5127 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5128 if (rc) {
5129 dev_err(&ctrl_info->pci_dev->dev,
5130 "scsi_add_host failed for controller %u\n",
5131 ctrl_info->ctrl_id);
5132 goto free_host;
5133 }
5134
5135 rc = pqi_add_sas_host(shost, ctrl_info);
5136 if (rc) {
5137 dev_err(&ctrl_info->pci_dev->dev,
5138 "add SAS host failed for controller %u\n",
5139 ctrl_info->ctrl_id);
5140 goto remove_host;
5141 }
5142
5143 ctrl_info->scsi_host = shost;
5144
5145 return 0;
5146
5147remove_host:
5148 scsi_remove_host(shost);
5149free_host:
5150 scsi_host_put(shost);
5151
5152 return rc;
5153}
5154
5155static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5156{
5157 struct Scsi_Host *shost;
5158
5159 pqi_delete_sas_host(ctrl_info);
5160
5161 shost = ctrl_info->scsi_host;
5162 if (!shost)
5163 return;
5164
5165 scsi_remove_host(shost);
5166 scsi_host_put(shost);
5167}
5168
5169#define PQI_RESET_ACTION_RESET 0x1
5170
5171#define PQI_RESET_TYPE_NO_RESET 0x0
5172#define PQI_RESET_TYPE_SOFT_RESET 0x1
5173#define PQI_RESET_TYPE_FIRM_RESET 0x2
5174#define PQI_RESET_TYPE_HARD_RESET 0x3
5175
5176static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5177{
5178 int rc;
5179 u32 reset_params;
5180
5181 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5182 PQI_RESET_TYPE_HARD_RESET;
5183
5184 writel(reset_params,
5185 &ctrl_info->pqi_registers->device_reset);
5186
5187 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5188 if (rc)
5189 dev_err(&ctrl_info->pci_dev->dev,
5190 "PQI reset failed\n");
5191
5192 return rc;
5193}
5194
5195static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5196{
5197 int rc;
5198 struct bmic_identify_controller *identify;
5199
5200 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5201 if (!identify)
5202 return -ENOMEM;
5203
5204 rc = pqi_identify_controller(ctrl_info, identify);
5205 if (rc)
5206 goto out;
5207
5208 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5209 sizeof(identify->firmware_version));
5210 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5211 snprintf(ctrl_info->firmware_version +
5212 strlen(ctrl_info->firmware_version),
5213 sizeof(ctrl_info->firmware_version),
5214 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5215
5216out:
5217 kfree(identify);
5218
5219 return rc;
5220}
5221
ff6abb73
KB
5222static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
5223{
5224 if (!sis_is_firmware_running(ctrl_info))
5225 return -ENXIO;
5226
5227 if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
5228 sis_disable_msix(ctrl_info);
5229 if (pqi_reset(ctrl_info) == 0)
5230 sis_reenable_sis_mode(ctrl_info);
5231 }
5232
5233 return 0;
5234}
5235
6c223761
KB
5236static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5237{
5238 int rc;
5239
ff6abb73
KB
5240 if (reset_devices) {
5241 rc = pqi_kdump_init(ctrl_info);
5242 if (rc)
5243 return rc;
5244 }
5245
6c223761
KB
5246 /*
5247 * When the controller comes out of reset, it is always running
5248 * in legacy SIS mode. This is so that it can be compatible
5249 * with legacy drivers shipped with OSes. So we have to talk
5250 * to it using SIS commands at first. Once we are satisified
5251 * that the controller supports PQI, we transition it into PQI
5252 * mode.
5253 */
5254
5255 /*
5256 * Wait until the controller is ready to start accepting SIS
5257 * commands.
5258 */
5259 rc = sis_wait_for_ctrl_ready(ctrl_info);
5260 if (rc) {
5261 dev_err(&ctrl_info->pci_dev->dev,
5262 "error initializing SIS interface\n");
5263 return rc;
5264 }
5265
5266 /*
5267 * Get the controller properties. This allows us to determine
5268 * whether or not it supports PQI mode.
5269 */
5270 rc = sis_get_ctrl_properties(ctrl_info);
5271 if (rc) {
5272 dev_err(&ctrl_info->pci_dev->dev,
5273 "error obtaining controller properties\n");
5274 return rc;
5275 }
5276
5277 rc = sis_get_pqi_capabilities(ctrl_info);
5278 if (rc) {
5279 dev_err(&ctrl_info->pci_dev->dev,
5280 "error obtaining controller capabilities\n");
5281 return rc;
5282 }
5283
5284 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5285 ctrl_info->max_outstanding_requests =
5286 PQI_MAX_OUTSTANDING_REQUESTS;
5287
5288 pqi_calculate_io_resources(ctrl_info);
5289
5290 rc = pqi_alloc_error_buffer(ctrl_info);
5291 if (rc) {
5292 dev_err(&ctrl_info->pci_dev->dev,
5293 "failed to allocate PQI error buffer\n");
5294 return rc;
5295 }
5296
5297 /*
5298 * If the function we are about to call succeeds, the
5299 * controller will transition from legacy SIS mode
5300 * into PQI mode.
5301 */
5302 rc = sis_init_base_struct_addr(ctrl_info);
5303 if (rc) {
5304 dev_err(&ctrl_info->pci_dev->dev,
5305 "error initializing PQI mode\n");
5306 return rc;
5307 }
5308
5309 /* Wait for the controller to complete the SIS -> PQI transition. */
5310 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5311 if (rc) {
5312 dev_err(&ctrl_info->pci_dev->dev,
5313 "transition to PQI mode failed\n");
5314 return rc;
5315 }
5316
5317 /* From here on, we are running in PQI mode. */
5318 ctrl_info->pqi_mode_enabled = true;
ff6abb73 5319 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761
KB
5320
5321 rc = pqi_alloc_admin_queues(ctrl_info);
5322 if (rc) {
5323 dev_err(&ctrl_info->pci_dev->dev,
5324 "error allocating admin queues\n");
5325 return rc;
5326 }
5327
5328 rc = pqi_create_admin_queues(ctrl_info);
5329 if (rc) {
5330 dev_err(&ctrl_info->pci_dev->dev,
5331 "error creating admin queues\n");
5332 return rc;
5333 }
5334
5335 rc = pqi_report_device_capability(ctrl_info);
5336 if (rc) {
5337 dev_err(&ctrl_info->pci_dev->dev,
5338 "obtaining device capability failed\n");
5339 return rc;
5340 }
5341
5342 rc = pqi_validate_device_capability(ctrl_info);
5343 if (rc)
5344 return rc;
5345
5346 pqi_calculate_queue_resources(ctrl_info);
5347
5348 rc = pqi_enable_msix_interrupts(ctrl_info);
5349 if (rc)
5350 return rc;
5351
5352 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5353 ctrl_info->max_msix_vectors =
5354 ctrl_info->num_msix_vectors_enabled;
5355 pqi_calculate_queue_resources(ctrl_info);
5356 }
5357
5358 rc = pqi_alloc_io_resources(ctrl_info);
5359 if (rc)
5360 return rc;
5361
5362 rc = pqi_alloc_operational_queues(ctrl_info);
5363 if (rc)
5364 return rc;
5365
5366 pqi_init_operational_queues(ctrl_info);
5367
5368 rc = pqi_request_irqs(ctrl_info);
5369 if (rc)
5370 return rc;
5371
6c223761
KB
5372 rc = pqi_create_queues(ctrl_info);
5373 if (rc)
5374 return rc;
5375
5376 sis_enable_msix(ctrl_info);
5377
5378 rc = pqi_configure_events(ctrl_info);
5379 if (rc) {
5380 dev_err(&ctrl_info->pci_dev->dev,
5381 "error configuring events\n");
5382 return rc;
5383 }
5384
5385 pqi_start_heartbeat_timer(ctrl_info);
5386
5387 ctrl_info->controller_online = true;
5388
5389 /* Register with the SCSI subsystem. */
5390 rc = pqi_register_scsi(ctrl_info);
5391 if (rc)
5392 return rc;
5393
5394 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5395 if (rc) {
5396 dev_err(&ctrl_info->pci_dev->dev,
5397 "error obtaining firmware version\n");
5398 return rc;
5399 }
5400
5401 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5402 if (rc) {
5403 dev_err(&ctrl_info->pci_dev->dev,
5404 "error updating host wellness\n");
5405 return rc;
5406 }
5407
5408 pqi_schedule_update_time_worker(ctrl_info);
5409
5410 pqi_scan_scsi_devices(ctrl_info);
5411
5412 return 0;
5413}
5414
5415static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5416{
5417 int rc;
5418 u64 mask;
5419
5420 rc = pci_enable_device(ctrl_info->pci_dev);
5421 if (rc) {
5422 dev_err(&ctrl_info->pci_dev->dev,
5423 "failed to enable PCI device\n");
5424 return rc;
5425 }
5426
5427 if (sizeof(dma_addr_t) > 4)
5428 mask = DMA_BIT_MASK(64);
5429 else
5430 mask = DMA_BIT_MASK(32);
5431
5432 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5433 if (rc) {
5434 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5435 goto disable_device;
5436 }
5437
5438 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5439 if (rc) {
5440 dev_err(&ctrl_info->pci_dev->dev,
5441 "failed to obtain PCI resources\n");
5442 goto disable_device;
5443 }
5444
5445 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5446 ctrl_info->pci_dev, 0),
5447 sizeof(struct pqi_ctrl_registers));
5448 if (!ctrl_info->iomem_base) {
5449 dev_err(&ctrl_info->pci_dev->dev,
5450 "failed to map memory for controller registers\n");
5451 rc = -ENOMEM;
5452 goto release_regions;
5453 }
5454
5455 ctrl_info->registers = ctrl_info->iomem_base;
5456 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5457
5458 /* Enable bus mastering. */
5459 pci_set_master(ctrl_info->pci_dev);
5460
5461 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5462
5463 return 0;
5464
5465release_regions:
5466 pci_release_regions(ctrl_info->pci_dev);
5467disable_device:
5468 pci_disable_device(ctrl_info->pci_dev);
5469
5470 return rc;
5471}
5472
5473static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5474{
5475 iounmap(ctrl_info->iomem_base);
5476 pci_release_regions(ctrl_info->pci_dev);
5477 pci_disable_device(ctrl_info->pci_dev);
5478 pci_set_drvdata(ctrl_info->pci_dev, NULL);
5479}
5480
5481static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5482{
5483 struct pqi_ctrl_info *ctrl_info;
5484
5485 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5486 GFP_KERNEL, numa_node);
5487 if (!ctrl_info)
5488 return NULL;
5489
5490 mutex_init(&ctrl_info->scan_mutex);
5491
5492 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5493 spin_lock_init(&ctrl_info->scsi_device_list_lock);
5494
5495 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5496 atomic_set(&ctrl_info->num_interrupts, 0);
5497
5498 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5499 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5500
5501 sema_init(&ctrl_info->sync_request_sem,
5502 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5503 sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5504
5505 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5506 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5507
5508 return ctrl_info;
5509}
5510
5511static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5512{
5513 kfree(ctrl_info);
5514}
5515
5516static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5517{
52198226
CH
5518 int i;
5519
5520 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
5521 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
5522 &ctrl_info->queue_groups[i]);
5523 }
5524
5525 pci_free_irq_vectors(ctrl_info->pci_dev);
6c223761
KB
5526}
5527
5528static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5529{
5530 pqi_stop_heartbeat_timer(ctrl_info);
5531 pqi_free_interrupts(ctrl_info);
5532 if (ctrl_info->queue_memory_base)
5533 dma_free_coherent(&ctrl_info->pci_dev->dev,
5534 ctrl_info->queue_memory_length,
5535 ctrl_info->queue_memory_base,
5536 ctrl_info->queue_memory_base_dma_handle);
5537 if (ctrl_info->admin_queue_memory_base)
5538 dma_free_coherent(&ctrl_info->pci_dev->dev,
5539 ctrl_info->admin_queue_memory_length,
5540 ctrl_info->admin_queue_memory_base,
5541 ctrl_info->admin_queue_memory_base_dma_handle);
5542 pqi_free_all_io_requests(ctrl_info);
5543 if (ctrl_info->error_buffer)
5544 dma_free_coherent(&ctrl_info->pci_dev->dev,
5545 ctrl_info->error_buffer_length,
5546 ctrl_info->error_buffer,
5547 ctrl_info->error_buffer_dma_handle);
5548 if (ctrl_info->iomem_base)
5549 pqi_cleanup_pci_init(ctrl_info);
5550 pqi_free_ctrl_info(ctrl_info);
5551}
5552
5553static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5554{
e57a1f9b
KB
5555 cancel_delayed_work_sync(&ctrl_info->rescan_work);
5556 cancel_delayed_work_sync(&ctrl_info->update_time_work);
5557 pqi_remove_all_scsi_devices(ctrl_info);
5558 pqi_unregister_scsi(ctrl_info);
6c223761 5559
6c223761
KB
5560 if (ctrl_info->pqi_mode_enabled) {
5561 sis_disable_msix(ctrl_info);
e57a1f9b 5562 if (pqi_reset(ctrl_info) == 0)
6c223761
KB
5563 sis_reenable_sis_mode(ctrl_info);
5564 }
5565 pqi_free_ctrl_resources(ctrl_info);
5566}
5567
5568static void pqi_print_ctrl_info(struct pci_dev *pdev,
5569 const struct pci_device_id *id)
5570{
5571 char *ctrl_description;
5572
5573 if (id->driver_data) {
5574 ctrl_description = (char *)id->driver_data;
5575 } else {
5576 switch (id->subvendor) {
5577 case PCI_VENDOR_ID_HP:
5578 ctrl_description = hpe_branded_controller;
5579 break;
5580 case PCI_VENDOR_ID_ADAPTEC2:
5581 default:
5582 ctrl_description = microsemi_branded_controller;
5583 break;
5584 }
5585 }
5586
5587 dev_info(&pdev->dev, "%s found\n", ctrl_description);
5588}
5589
5590static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5591{
5592 int rc;
5593 int node;
5594 struct pqi_ctrl_info *ctrl_info;
5595
5596 pqi_print_ctrl_info(pdev, id);
5597
5598 if (pqi_disable_device_id_wildcards &&
5599 id->subvendor == PCI_ANY_ID &&
5600 id->subdevice == PCI_ANY_ID) {
5601 dev_warn(&pdev->dev,
5602 "controller not probed because device ID wildcards are disabled\n");
5603 return -ENODEV;
5604 }
5605
5606 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5607 dev_warn(&pdev->dev,
5608 "controller device ID matched using wildcards\n");
5609
5610 node = dev_to_node(&pdev->dev);
5611 if (node == NUMA_NO_NODE)
5612 set_dev_node(&pdev->dev, 0);
5613
5614 ctrl_info = pqi_alloc_ctrl_info(node);
5615 if (!ctrl_info) {
5616 dev_err(&pdev->dev,
5617 "failed to allocate controller info block\n");
5618 return -ENOMEM;
5619 }
5620
5621 ctrl_info->pci_dev = pdev;
5622
5623 rc = pqi_pci_init(ctrl_info);
5624 if (rc)
5625 goto error;
5626
5627 rc = pqi_ctrl_init(ctrl_info);
5628 if (rc)
5629 goto error;
5630
5631 return 0;
5632
5633error:
5634 pqi_remove_ctrl(ctrl_info);
5635
5636 return rc;
5637}
5638
5639static void pqi_pci_remove(struct pci_dev *pdev)
5640{
5641 struct pqi_ctrl_info *ctrl_info;
5642
5643 ctrl_info = pci_get_drvdata(pdev);
5644 if (!ctrl_info)
5645 return;
5646
5647 pqi_remove_ctrl(ctrl_info);
5648}
5649
5650static void pqi_shutdown(struct pci_dev *pdev)
5651{
5652 int rc;
5653 struct pqi_ctrl_info *ctrl_info;
5654
5655 ctrl_info = pci_get_drvdata(pdev);
5656 if (!ctrl_info)
5657 goto error;
5658
5659 /*
5660 * Write all data in the controller's battery-backed cache to
5661 * storage.
5662 */
5663 rc = pqi_flush_cache(ctrl_info);
5664 if (rc == 0)
5665 return;
5666
5667error:
5668 dev_warn(&pdev->dev,
5669 "unable to flush controller cache\n");
5670}
5671
5672/* Define the PCI IDs for the controllers that we support. */
5673static const struct pci_device_id pqi_pci_id_table[] = {
5674 {
5675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5676 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5677 },
5678 {
5679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5680 PCI_VENDOR_ID_HP, 0x0600)
5681 },
5682 {
5683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5684 PCI_VENDOR_ID_HP, 0x0601)
5685 },
5686 {
5687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5688 PCI_VENDOR_ID_HP, 0x0602)
5689 },
5690 {
5691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5692 PCI_VENDOR_ID_HP, 0x0603)
5693 },
5694 {
5695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5696 PCI_VENDOR_ID_HP, 0x0650)
5697 },
5698 {
5699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5700 PCI_VENDOR_ID_HP, 0x0651)
5701 },
5702 {
5703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5704 PCI_VENDOR_ID_HP, 0x0652)
5705 },
5706 {
5707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5708 PCI_VENDOR_ID_HP, 0x0653)
5709 },
5710 {
5711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5712 PCI_VENDOR_ID_HP, 0x0654)
5713 },
5714 {
5715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5716 PCI_VENDOR_ID_HP, 0x0655)
5717 },
5718 {
5719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5720 PCI_VENDOR_ID_HP, 0x0700)
5721 },
5722 {
5723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5724 PCI_VENDOR_ID_HP, 0x0701)
5725 },
5726 {
5727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5728 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5729 },
5730 {
5731 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5732 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5733 },
5734 {
5735 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5736 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5737 },
5738 {
5739 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5740 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5741 },
5742 {
5743 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5744 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5745 },
5746 {
5747 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5748 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5749 },
5750 {
5751 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5752 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5753 },
5754 {
5755 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5756 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5757 },
5758 {
5759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5760 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5761 },
5762 {
5763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5764 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5765 },
5766 {
5767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5768 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5769 },
5770 {
5771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5772 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5773 },
5774 {
5775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5776 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5777 },
5778 {
5779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5780 PCI_VENDOR_ID_HP, 0x1001)
5781 },
5782 {
5783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5784 PCI_VENDOR_ID_HP, 0x1100)
5785 },
5786 {
5787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5788 PCI_VENDOR_ID_HP, 0x1101)
5789 },
5790 {
5791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5792 PCI_VENDOR_ID_HP, 0x1102)
5793 },
5794 {
5795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5796 PCI_VENDOR_ID_HP, 0x1150)
5797 },
5798 {
5799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5800 PCI_ANY_ID, PCI_ANY_ID)
5801 },
5802 { 0 }
5803};
5804
5805MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5806
5807static struct pci_driver pqi_pci_driver = {
5808 .name = DRIVER_NAME_SHORT,
5809 .id_table = pqi_pci_id_table,
5810 .probe = pqi_pci_probe,
5811 .remove = pqi_pci_remove,
5812 .shutdown = pqi_shutdown,
5813};
5814
5815static int __init pqi_init(void)
5816{
5817 int rc;
5818
5819 pr_info(DRIVER_NAME "\n");
5820
5821 pqi_sas_transport_template =
5822 sas_attach_transport(&pqi_sas_transport_functions);
5823 if (!pqi_sas_transport_template)
5824 return -ENODEV;
5825
5826 rc = pci_register_driver(&pqi_pci_driver);
5827 if (rc)
5828 sas_release_transport(pqi_sas_transport_template);
5829
5830 return rc;
5831}
5832
5833static void __exit pqi_cleanup(void)
5834{
5835 pci_unregister_driver(&pqi_pci_driver);
5836 sas_release_transport(pqi_sas_transport_template);
5837}
5838
5839module_init(pqi_init);
5840module_exit(pqi_cleanup);
5841
5842static void __attribute__((unused)) verify_structures(void)
5843{
5844 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5845 sis_host_to_ctrl_doorbell) != 0x20);
5846 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5847 sis_interrupt_mask) != 0x34);
5848 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5849 sis_ctrl_to_host_doorbell) != 0x9c);
5850 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5851 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
5852 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5853 sis_driver_scratch) != 0xb0);
6c223761
KB
5854 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5855 sis_firmware_status) != 0xbc);
5856 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5857 sis_mailbox) != 0x1000);
5858 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5859 pqi_registers) != 0x4000);
5860
5861 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5862 iu_type) != 0x0);
5863 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5864 iu_length) != 0x2);
5865 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5866 response_queue_id) != 0x4);
5867 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5868 work_area) != 0x6);
5869 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5870
5871 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5872 status) != 0x0);
5873 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5874 service_response) != 0x1);
5875 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5876 data_present) != 0x2);
5877 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5878 reserved) != 0x3);
5879 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5880 residual_count) != 0x4);
5881 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5882 data_length) != 0x8);
5883 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5884 reserved1) != 0xa);
5885 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5886 data) != 0xc);
5887 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5888
5889 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5890 data_in_result) != 0x0);
5891 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5892 data_out_result) != 0x1);
5893 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5894 reserved) != 0x2);
5895 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5896 status) != 0x5);
5897 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5898 status_qualifier) != 0x6);
5899 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5900 sense_data_length) != 0x8);
5901 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5902 response_data_length) != 0xa);
5903 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5904 data_in_transferred) != 0xc);
5905 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5906 data_out_transferred) != 0x10);
5907 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5908 data) != 0x14);
5909 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5910
5911 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5912 signature) != 0x0);
5913 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5914 function_and_status_code) != 0x8);
5915 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5916 max_admin_iq_elements) != 0x10);
5917 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5918 max_admin_oq_elements) != 0x11);
5919 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5920 admin_iq_element_length) != 0x12);
5921 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5922 admin_oq_element_length) != 0x13);
5923 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5924 max_reset_timeout) != 0x14);
5925 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5926 legacy_intx_status) != 0x18);
5927 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5928 legacy_intx_mask_set) != 0x1c);
5929 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5930 legacy_intx_mask_clear) != 0x20);
5931 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5932 device_status) != 0x40);
5933 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5934 admin_iq_pi_offset) != 0x48);
5935 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5936 admin_oq_ci_offset) != 0x50);
5937 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5938 admin_iq_element_array_addr) != 0x58);
5939 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5940 admin_oq_element_array_addr) != 0x60);
5941 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5942 admin_iq_ci_addr) != 0x68);
5943 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5944 admin_oq_pi_addr) != 0x70);
5945 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5946 admin_iq_num_elements) != 0x78);
5947 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5948 admin_oq_num_elements) != 0x79);
5949 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5950 admin_queue_int_msg_num) != 0x7a);
5951 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5952 device_error) != 0x80);
5953 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5954 error_details) != 0x88);
5955 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5956 device_reset) != 0x90);
5957 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5958 power_action) != 0x94);
5959 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
5960
5961 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5962 header.iu_type) != 0);
5963 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5964 header.iu_length) != 2);
5965 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5966 header.work_area) != 6);
5967 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5968 request_id) != 8);
5969 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5970 function_code) != 10);
5971 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5972 data.report_device_capability.buffer_length) != 44);
5973 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5974 data.report_device_capability.sg_descriptor) != 48);
5975 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5976 data.create_operational_iq.queue_id) != 12);
5977 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5978 data.create_operational_iq.element_array_addr) != 16);
5979 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5980 data.create_operational_iq.ci_addr) != 24);
5981 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5982 data.create_operational_iq.num_elements) != 32);
5983 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5984 data.create_operational_iq.element_length) != 34);
5985 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5986 data.create_operational_iq.queue_protocol) != 36);
5987 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5988 data.create_operational_oq.queue_id) != 12);
5989 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5990 data.create_operational_oq.element_array_addr) != 16);
5991 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5992 data.create_operational_oq.pi_addr) != 24);
5993 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5994 data.create_operational_oq.num_elements) != 32);
5995 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5996 data.create_operational_oq.element_length) != 34);
5997 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
5998 data.create_operational_oq.queue_protocol) != 36);
5999 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6000 data.create_operational_oq.int_msg_num) != 40);
6001 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6002 data.create_operational_oq.coalescing_count) != 42);
6003 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6004 data.create_operational_oq.min_coalescing_time) != 44);
6005 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6006 data.create_operational_oq.max_coalescing_time) != 48);
6007 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6008 data.delete_operational_queue.queue_id) != 12);
6009 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6010 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6011 data.create_operational_iq) != 64 - 11);
6012 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6013 data.create_operational_oq) != 64 - 11);
6014 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6015 data.delete_operational_queue) != 64 - 11);
6016
6017 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6018 header.iu_type) != 0);
6019 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6020 header.iu_length) != 2);
6021 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6022 header.work_area) != 6);
6023 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6024 request_id) != 8);
6025 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6026 function_code) != 10);
6027 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6028 status) != 11);
6029 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6030 data.create_operational_iq.status_descriptor) != 12);
6031 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6032 data.create_operational_iq.iq_pi_offset) != 16);
6033 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6034 data.create_operational_oq.status_descriptor) != 12);
6035 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6036 data.create_operational_oq.oq_ci_offset) != 16);
6037 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6038
6039 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6040 header.iu_type) != 0);
6041 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6042 header.iu_length) != 2);
6043 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6044 header.response_queue_id) != 4);
6045 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6046 header.work_area) != 6);
6047 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6048 request_id) != 8);
6049 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6050 nexus_id) != 10);
6051 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6052 buffer_length) != 12);
6053 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6054 lun_number) != 16);
6055 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6056 protocol_specific) != 24);
6057 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6058 error_index) != 27);
6059 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6060 cdb) != 32);
6061 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6062 sg_descriptors) != 64);
6063 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6064 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6065
6066 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6067 header.iu_type) != 0);
6068 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6069 header.iu_length) != 2);
6070 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6071 header.response_queue_id) != 4);
6072 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6073 header.work_area) != 6);
6074 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6075 request_id) != 8);
6076 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6077 nexus_id) != 12);
6078 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6079 buffer_length) != 16);
6080 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6081 data_encryption_key_index) != 22);
6082 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6083 encrypt_tweak_lower) != 24);
6084 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6085 encrypt_tweak_upper) != 28);
6086 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6087 cdb) != 32);
6088 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6089 error_index) != 48);
6090 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6091 num_sg_descriptors) != 50);
6092 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6093 cdb_length) != 51);
6094 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6095 lun_number) != 52);
6096 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6097 sg_descriptors) != 64);
6098 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6099 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6100
6101 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6102 header.iu_type) != 0);
6103 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6104 header.iu_length) != 2);
6105 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6106 request_id) != 8);
6107 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6108 error_index) != 10);
6109
6110 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6111 header.iu_type) != 0);
6112 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6113 header.iu_length) != 2);
6114 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6115 header.response_queue_id) != 4);
6116 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6117 request_id) != 8);
6118 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6119 data.report_event_configuration.buffer_length) != 12);
6120 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6121 data.report_event_configuration.sg_descriptors) != 16);
6122 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6123 data.set_event_configuration.global_event_oq_id) != 10);
6124 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6125 data.set_event_configuration.buffer_length) != 12);
6126 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6127 data.set_event_configuration.sg_descriptors) != 16);
6128
6129 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6130 max_inbound_iu_length) != 6);
6131 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6132 max_outbound_iu_length) != 14);
6133 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6134
6135 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6136 data_length) != 0);
6137 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6138 iq_arbitration_priority_support_bitmask) != 8);
6139 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6140 maximum_aw_a) != 9);
6141 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6142 maximum_aw_b) != 10);
6143 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6144 maximum_aw_c) != 11);
6145 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6146 max_inbound_queues) != 16);
6147 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6148 max_elements_per_iq) != 18);
6149 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6150 max_iq_element_length) != 24);
6151 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6152 min_iq_element_length) != 26);
6153 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6154 max_outbound_queues) != 30);
6155 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6156 max_elements_per_oq) != 32);
6157 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6158 intr_coalescing_time_granularity) != 34);
6159 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6160 max_oq_element_length) != 36);
6161 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6162 min_oq_element_length) != 38);
6163 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6164 iu_layer_descriptors) != 64);
6165 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6166
6167 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6168 event_type) != 0);
6169 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6170 oq_id) != 2);
6171 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6172
6173 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6174 num_event_descriptors) != 2);
6175 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6176 descriptors) != 4);
6177
6178 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6179 header.iu_type) != 0);
6180 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6181 header.iu_length) != 2);
6182 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6183 event_type) != 8);
6184 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6185 event_id) != 10);
6186 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6187 additional_event_id) != 12);
6188 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6189 data) != 16);
6190 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6191
6192 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6193 header.iu_type) != 0);
6194 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6195 header.iu_length) != 2);
6196 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6197 event_type) != 8);
6198 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6199 event_id) != 10);
6200 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6201 additional_event_id) != 12);
6202 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6203
6204 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6205 header.iu_type) != 0);
6206 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6207 header.iu_length) != 2);
6208 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6209 request_id) != 8);
6210 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6211 nexus_id) != 10);
6212 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6213 lun_number) != 16);
6214 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6215 protocol_specific) != 24);
6216 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6217 outbound_queue_id_to_manage) != 26);
6218 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6219 request_id_to_manage) != 28);
6220 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6221 task_management_function) != 30);
6222 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6223
6224 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6225 header.iu_type) != 0);
6226 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6227 header.iu_length) != 2);
6228 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6229 request_id) != 8);
6230 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6231 nexus_id) != 10);
6232 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6233 additional_response_info) != 12);
6234 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6235 response_code) != 15);
6236 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6237
6238 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6239 configured_logical_drive_count) != 0);
6240 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6241 configuration_signature) != 1);
6242 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6243 firmware_version) != 5);
6244 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6245 extended_logical_unit_count) != 154);
6246 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6247 firmware_build_number) != 190);
6248 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6249 controller_mode) != 292);
6250
6251 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6252 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6253 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6254 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6255 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6256 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6257 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6258 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6259 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6260 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6261 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6262 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6263
6264 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6265}