]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/smartpqi/smartpqi_init.c
scsi: smartpqi: update device offline
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
6c223761
KB
1/*
2 * driver for Microsemi PQI-based storage controllers
b805dbfe 3 * Copyright (c) 2016-2017 Microsemi Corporation
6c223761
KB
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
3c50976f 27#include <linux/reboot.h>
6c223761 28#include <linux/cciss_ioctl.h>
52198226 29#include <linux/blk-mq-pci.h>
6c223761
KB
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_transport_sas.h>
35#include <asm/unaligned.h>
36#include "smartpqi.h"
37#include "smartpqi_sis.h"
38
39#if !defined(BUILD_TIMESTAMP)
40#define BUILD_TIMESTAMP
41#endif
42
699bed75 43#define DRIVER_VERSION "0.9.13-370"
6c223761
KB
44#define DRIVER_MAJOR 0
45#define DRIVER_MINOR 9
699bed75
KB
46#define DRIVER_RELEASE 13
47#define DRIVER_REVISION 370
6c223761
KB
48
49#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
50#define DRIVER_NAME_SHORT "smartpqi"
51
e1d213bd
KB
52#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
53
6c223761
KB
54MODULE_AUTHOR("Microsemi");
55MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
56 DRIVER_VERSION);
57MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
58MODULE_VERSION(DRIVER_VERSION);
59MODULE_LICENSE("GPL");
60
6c223761
KB
61static char *hpe_branded_controller = "HPE Smart Array Controller";
62static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
63
3c50976f 64static void pqi_perform_lockup_action(void);
6c223761 65static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
376fb880
KB
66static void pqi_complete_all_queued_raid_bypass_retries(
67 struct pqi_ctrl_info *ctrl_info, int result);
68static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
6c223761
KB
69static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
70static void pqi_scan_start(struct Scsi_Host *shost);
71static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
72 struct pqi_queue_group *queue_group, enum pqi_io_path path,
73 struct pqi_io_request *io_request);
74static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
75 struct pqi_iu_header *request, unsigned int flags,
76 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
77static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
78 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
79 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 80 struct pqi_encryption_info *encryption_info, bool raid_bypass);
6c223761
KB
81
82/* for flags argument to pqi_submit_raid_request_synchronous() */
83#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
84
85static struct scsi_transport_template *pqi_sas_transport_template;
86
87static atomic_t pqi_controller_count = ATOMIC_INIT(0);
88
3c50976f
KB
89enum pqi_lockup_action {
90 NONE,
91 REBOOT,
92 PANIC
93};
94
95static enum pqi_lockup_action pqi_lockup_action = NONE;
96
97static struct {
98 enum pqi_lockup_action action;
99 char *name;
100} pqi_lockup_actions[] = {
101 {
102 .action = NONE,
103 .name = "none",
104 },
105 {
106 .action = REBOOT,
107 .name = "reboot",
108 },
109 {
110 .action = PANIC,
111 .name = "panic",
112 },
113};
114
6a50d6ad
KB
115static unsigned int pqi_supported_event_types[] = {
116 PQI_EVENT_TYPE_HOTPLUG,
117 PQI_EVENT_TYPE_HARDWARE,
118 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
119 PQI_EVENT_TYPE_LOGICAL_DEVICE,
120 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
121 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
122};
123
6c223761
KB
124static int pqi_disable_device_id_wildcards;
125module_param_named(disable_device_id_wildcards,
cbe0c7b1 126 pqi_disable_device_id_wildcards, int, 0644);
6c223761
KB
127MODULE_PARM_DESC(disable_device_id_wildcards,
128 "Disable device ID wildcards.");
129
3c50976f
KB
130static char *pqi_lockup_action_param;
131module_param_named(lockup_action,
132 pqi_lockup_action_param, charp, 0644);
133MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
134 "\t\tSupported: none, reboot, panic\n"
135 "\t\tDefault: none");
136
6c223761
KB
137static char *raid_levels[] = {
138 "RAID-0",
139 "RAID-4",
140 "RAID-1(1+0)",
141 "RAID-5",
142 "RAID-5+1",
143 "RAID-ADG",
144 "RAID-1(ADM)",
145};
146
147static char *pqi_raid_level_to_string(u8 raid_level)
148{
149 if (raid_level < ARRAY_SIZE(raid_levels))
150 return raid_levels[raid_level];
151
152 return "";
153}
154
155#define SA_RAID_0 0
156#define SA_RAID_4 1
157#define SA_RAID_1 2 /* also used for RAID 10 */
158#define SA_RAID_5 3 /* also used for RAID 50 */
159#define SA_RAID_51 4
160#define SA_RAID_6 5 /* also used for RAID 60 */
161#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
162#define SA_RAID_MAX SA_RAID_ADM
163#define SA_RAID_UNKNOWN 0xff
164
165static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
166{
7561a7e4 167 pqi_prep_for_scsi_done(scmd);
6c223761
KB
168 scmd->scsi_done(scmd);
169}
170
171static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
172{
173 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
174}
175
176static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
177{
178 void *hostdata = shost_priv(shost);
179
180 return *((struct pqi_ctrl_info **)hostdata);
181}
182
183static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
184{
185 return !device->is_physical_device;
186}
187
bd10cf0b
KB
188static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
189{
190 return scsi3addr[2] != 0;
191}
192
6c223761
KB
193static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
194{
195 return !ctrl_info->controller_online;
196}
197
198static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
199{
200 if (ctrl_info->controller_online)
201 if (!sis_is_firmware_running(ctrl_info))
202 pqi_take_ctrl_offline(ctrl_info);
203}
204
205static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
206{
207 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
208}
209
ff6abb73
KB
210static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
211 struct pqi_ctrl_info *ctrl_info)
212{
213 return sis_read_driver_scratch(ctrl_info);
214}
215
216static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
217 enum pqi_ctrl_mode mode)
218{
219 sis_write_driver_scratch(ctrl_info, mode);
220}
221
7561a7e4
KB
222#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
223static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
224{
225 ctrl_info->block_requests = true;
226 scsi_block_requests(ctrl_info->scsi_host);
227}
228
229static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
230{
231 ctrl_info->block_requests = false;
232 wake_up_all(&ctrl_info->block_requests_wait);
376fb880 233 pqi_retry_raid_bypass_requests(ctrl_info);
7561a7e4
KB
234 scsi_unblock_requests(ctrl_info->scsi_host);
235}
236
237static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
238{
239 return ctrl_info->block_requests;
240}
241
242static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
243 unsigned long timeout_msecs)
244{
245 unsigned long remaining_msecs;
246
247 if (!pqi_ctrl_blocked(ctrl_info))
248 return timeout_msecs;
249
250 atomic_inc(&ctrl_info->num_blocked_threads);
251
252 if (timeout_msecs == NO_TIMEOUT) {
253 wait_event(ctrl_info->block_requests_wait,
254 !pqi_ctrl_blocked(ctrl_info));
255 remaining_msecs = timeout_msecs;
256 } else {
257 unsigned long remaining_jiffies;
258
259 remaining_jiffies =
260 wait_event_timeout(ctrl_info->block_requests_wait,
261 !pqi_ctrl_blocked(ctrl_info),
262 msecs_to_jiffies(timeout_msecs));
263 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
264 }
265
266 atomic_dec(&ctrl_info->num_blocked_threads);
267
268 return remaining_msecs;
269}
270
271static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
272{
273 atomic_inc(&ctrl_info->num_busy_threads);
274}
275
276static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
277{
278 atomic_dec(&ctrl_info->num_busy_threads);
279}
280
281static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
282{
283 while (atomic_read(&ctrl_info->num_busy_threads) >
284 atomic_read(&ctrl_info->num_blocked_threads))
285 usleep_range(1000, 2000);
286}
287
03b288cf
KB
288static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
289{
290 return device->device_offline;
291}
292
7561a7e4
KB
293static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
294{
295 device->in_reset = true;
296}
297
298static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
299{
300 device->in_reset = false;
301}
302
303static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
304{
305 return device->in_reset;
306}
6c223761
KB
307
308static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
309{
310 schedule_delayed_work(&ctrl_info->rescan_work,
311 PQI_RESCAN_WORK_INTERVAL);
312}
313
061ef06a
KB
314static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
315{
316 cancel_delayed_work_sync(&ctrl_info->rescan_work);
317}
318
98f87667
KB
319static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
320{
321 if (!ctrl_info->heartbeat_counter)
322 return 0;
323
324 return readl(ctrl_info->heartbeat_counter);
325}
326
6c223761
KB
327static int pqi_map_single(struct pci_dev *pci_dev,
328 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
329 size_t buffer_length, int data_direction)
330{
331 dma_addr_t bus_address;
332
333 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
334 return 0;
335
336 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
337 data_direction);
338 if (pci_dma_mapping_error(pci_dev, bus_address))
339 return -ENOMEM;
340
341 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
342 put_unaligned_le32(buffer_length, &sg_descriptor->length);
343 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
344
345 return 0;
346}
347
348static void pqi_pci_unmap(struct pci_dev *pci_dev,
349 struct pqi_sg_descriptor *descriptors, int num_descriptors,
350 int data_direction)
351{
352 int i;
353
354 if (data_direction == PCI_DMA_NONE)
355 return;
356
357 for (i = 0; i < num_descriptors; i++)
358 pci_unmap_single(pci_dev,
359 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
360 get_unaligned_le32(&descriptors[i].length),
361 data_direction);
362}
363
364static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
365 struct pqi_raid_path_request *request, u8 cmd,
366 u8 *scsi3addr, void *buffer, size_t buffer_length,
367 u16 vpd_page, int *pci_direction)
368{
369 u8 *cdb;
370 int pci_dir;
371
372 memset(request, 0, sizeof(*request));
373
374 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
375 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
376 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
377 &request->header.iu_length);
378 put_unaligned_le32(buffer_length, &request->buffer_length);
379 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
380 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
381 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
382
383 cdb = request->cdb;
384
385 switch (cmd) {
386 case INQUIRY:
387 request->data_direction = SOP_READ_FLAG;
388 cdb[0] = INQUIRY;
389 if (vpd_page & VPD_PAGE) {
390 cdb[1] = 0x1;
391 cdb[2] = (u8)vpd_page;
392 }
393 cdb[4] = (u8)buffer_length;
394 break;
395 case CISS_REPORT_LOG:
396 case CISS_REPORT_PHYS:
397 request->data_direction = SOP_READ_FLAG;
398 cdb[0] = cmd;
399 if (cmd == CISS_REPORT_PHYS)
400 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
401 else
402 cdb[1] = CISS_REPORT_LOG_EXTENDED;
403 put_unaligned_be32(buffer_length, &cdb[6]);
404 break;
405 case CISS_GET_RAID_MAP:
406 request->data_direction = SOP_READ_FLAG;
407 cdb[0] = CISS_READ;
408 cdb[1] = CISS_GET_RAID_MAP;
409 put_unaligned_be32(buffer_length, &cdb[6]);
410 break;
411 case SA_CACHE_FLUSH:
412 request->data_direction = SOP_WRITE_FLAG;
413 cdb[0] = BMIC_WRITE;
414 cdb[6] = BMIC_CACHE_FLUSH;
415 put_unaligned_be16(buffer_length, &cdb[7]);
416 break;
417 case BMIC_IDENTIFY_CONTROLLER:
418 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
419 request->data_direction = SOP_READ_FLAG;
420 cdb[0] = BMIC_READ;
421 cdb[6] = cmd;
422 put_unaligned_be16(buffer_length, &cdb[7]);
423 break;
424 case BMIC_WRITE_HOST_WELLNESS:
425 request->data_direction = SOP_WRITE_FLAG;
426 cdb[0] = BMIC_WRITE;
427 cdb[6] = cmd;
428 put_unaligned_be16(buffer_length, &cdb[7]);
429 break;
430 default:
431 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
432 cmd);
6c223761
KB
433 break;
434 }
435
436 switch (request->data_direction) {
437 case SOP_READ_FLAG:
438 pci_dir = PCI_DMA_FROMDEVICE;
439 break;
440 case SOP_WRITE_FLAG:
441 pci_dir = PCI_DMA_TODEVICE;
442 break;
443 case SOP_NO_DIRECTION_FLAG:
444 pci_dir = PCI_DMA_NONE;
445 break;
446 default:
447 pci_dir = PCI_DMA_BIDIRECTIONAL;
448 break;
449 }
450
451 *pci_direction = pci_dir;
452
453 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
454 buffer, buffer_length, pci_dir);
455}
456
376fb880
KB
457static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
458{
459 io_request->scmd = NULL;
460 io_request->status = 0;
461 io_request->error_info = NULL;
462 io_request->raid_bypass = false;
463}
464
6c223761
KB
465static struct pqi_io_request *pqi_alloc_io_request(
466 struct pqi_ctrl_info *ctrl_info)
467{
468 struct pqi_io_request *io_request;
469 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
470
471 while (1) {
472 io_request = &ctrl_info->io_request_pool[i];
473 if (atomic_inc_return(&io_request->refcount) == 1)
474 break;
475 atomic_dec(&io_request->refcount);
476 i = (i + 1) % ctrl_info->max_io_slots;
477 }
478
479 /* benignly racy */
480 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
481
376fb880 482 pqi_reinit_io_request(io_request);
6c223761
KB
483
484 return io_request;
485}
486
487static void pqi_free_io_request(struct pqi_io_request *io_request)
488{
489 atomic_dec(&io_request->refcount);
490}
491
492static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
493 struct bmic_identify_controller *buffer)
494{
495 int rc;
496 int pci_direction;
497 struct pqi_raid_path_request request;
498
499 rc = pqi_build_raid_path_request(ctrl_info, &request,
500 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
501 sizeof(*buffer), 0, &pci_direction);
502 if (rc)
503 return rc;
504
505 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
506 NULL, NO_TIMEOUT);
507
508 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
509 pci_direction);
510
511 return rc;
512}
513
514static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
515 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
516{
517 int rc;
518 int pci_direction;
519 struct pqi_raid_path_request request;
520
521 rc = pqi_build_raid_path_request(ctrl_info, &request,
522 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
523 &pci_direction);
524 if (rc)
525 return rc;
526
527 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
528 NULL, NO_TIMEOUT);
529
530 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
531 pci_direction);
532
533 return rc;
534}
535
536static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
537 struct pqi_scsi_dev *device,
538 struct bmic_identify_physical_device *buffer,
539 size_t buffer_length)
540{
541 int rc;
542 int pci_direction;
543 u16 bmic_device_index;
544 struct pqi_raid_path_request request;
545
546 rc = pqi_build_raid_path_request(ctrl_info, &request,
547 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
548 buffer_length, 0, &pci_direction);
549 if (rc)
550 return rc;
551
552 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
553 request.cdb[2] = (u8)bmic_device_index;
554 request.cdb[9] = (u8)(bmic_device_index >> 8);
555
556 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
557 0, NULL, NO_TIMEOUT);
558
559 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
560 pci_direction);
561
562 return rc;
563}
564
565#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
6c223761
KB
566
567static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
568{
569 int rc;
570 struct pqi_raid_path_request request;
571 int pci_direction;
572 u8 *buffer;
573
574 /*
575 * Don't bother trying to flush the cache if the controller is
576 * locked up.
577 */
578 if (pqi_ctrl_offline(ctrl_info))
579 return -ENXIO;
580
581 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
582 if (!buffer)
583 return -ENOMEM;
584
585 rc = pqi_build_raid_path_request(ctrl_info, &request,
586 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
587 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
588 if (rc)
589 goto out;
590
591 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
d48f8fad 592 0, NULL, NO_TIMEOUT);
6c223761
KB
593
594 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
595 pci_direction);
596
597out:
598 kfree(buffer);
599
600 return rc;
601}
602
603static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
604 void *buffer, size_t buffer_length)
605{
606 int rc;
607 struct pqi_raid_path_request request;
608 int pci_direction;
609
610 rc = pqi_build_raid_path_request(ctrl_info, &request,
611 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
612 buffer_length, 0, &pci_direction);
613 if (rc)
614 return rc;
615
616 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
617 0, NULL, NO_TIMEOUT);
618
619 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
620 pci_direction);
621
622 return rc;
623}
624
625#pragma pack(1)
626
627struct bmic_host_wellness_driver_version {
628 u8 start_tag[4];
629 u8 driver_version_tag[2];
630 __le16 driver_version_length;
631 char driver_version[32];
632 u8 end_tag[2];
633};
634
635#pragma pack()
636
637static int pqi_write_driver_version_to_host_wellness(
638 struct pqi_ctrl_info *ctrl_info)
639{
640 int rc;
641 struct bmic_host_wellness_driver_version *buffer;
642 size_t buffer_length;
643
644 buffer_length = sizeof(*buffer);
645
646 buffer = kmalloc(buffer_length, GFP_KERNEL);
647 if (!buffer)
648 return -ENOMEM;
649
650 buffer->start_tag[0] = '<';
651 buffer->start_tag[1] = 'H';
652 buffer->start_tag[2] = 'W';
653 buffer->start_tag[3] = '>';
654 buffer->driver_version_tag[0] = 'D';
655 buffer->driver_version_tag[1] = 'V';
656 put_unaligned_le16(sizeof(buffer->driver_version),
657 &buffer->driver_version_length);
061ef06a 658 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
659 sizeof(buffer->driver_version) - 1);
660 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
661 buffer->end_tag[0] = 'Z';
662 buffer->end_tag[1] = 'Z';
663
664 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
665
666 kfree(buffer);
667
668 return rc;
669}
670
671#pragma pack(1)
672
673struct bmic_host_wellness_time {
674 u8 start_tag[4];
675 u8 time_tag[2];
676 __le16 time_length;
677 u8 time[8];
678 u8 dont_write_tag[2];
679 u8 end_tag[2];
680};
681
682#pragma pack()
683
684static int pqi_write_current_time_to_host_wellness(
685 struct pqi_ctrl_info *ctrl_info)
686{
687 int rc;
688 struct bmic_host_wellness_time *buffer;
689 size_t buffer_length;
690 time64_t local_time;
691 unsigned int year;
ed10858e 692 struct tm tm;
6c223761
KB
693
694 buffer_length = sizeof(*buffer);
695
696 buffer = kmalloc(buffer_length, GFP_KERNEL);
697 if (!buffer)
698 return -ENOMEM;
699
700 buffer->start_tag[0] = '<';
701 buffer->start_tag[1] = 'H';
702 buffer->start_tag[2] = 'W';
703 buffer->start_tag[3] = '>';
704 buffer->time_tag[0] = 'T';
705 buffer->time_tag[1] = 'D';
706 put_unaligned_le16(sizeof(buffer->time),
707 &buffer->time_length);
708
ed10858e
AB
709 local_time = ktime_get_real_seconds();
710 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
711 year = tm.tm_year + 1900;
712
713 buffer->time[0] = bin2bcd(tm.tm_hour);
714 buffer->time[1] = bin2bcd(tm.tm_min);
715 buffer->time[2] = bin2bcd(tm.tm_sec);
716 buffer->time[3] = 0;
717 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
718 buffer->time[5] = bin2bcd(tm.tm_mday);
719 buffer->time[6] = bin2bcd(year / 100);
720 buffer->time[7] = bin2bcd(year % 100);
721
722 buffer->dont_write_tag[0] = 'D';
723 buffer->dont_write_tag[1] = 'W';
724 buffer->end_tag[0] = 'Z';
725 buffer->end_tag[1] = 'Z';
726
727 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
728
729 kfree(buffer);
730
731 return rc;
732}
733
734#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
735
736static void pqi_update_time_worker(struct work_struct *work)
737{
738 int rc;
739 struct pqi_ctrl_info *ctrl_info;
740
741 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
742 update_time_work);
743
6c223761
KB
744 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
745 if (rc)
746 dev_warn(&ctrl_info->pci_dev->dev,
747 "error updating time on controller\n");
748
749 schedule_delayed_work(&ctrl_info->update_time_work,
750 PQI_UPDATE_TIME_WORK_INTERVAL);
751}
752
753static inline void pqi_schedule_update_time_worker(
4fbebf1a 754 struct pqi_ctrl_info *ctrl_info)
6c223761 755{
061ef06a
KB
756 if (ctrl_info->update_time_worker_scheduled)
757 return;
758
4fbebf1a 759 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
760 ctrl_info->update_time_worker_scheduled = true;
761}
762
763static inline void pqi_cancel_update_time_worker(
764 struct pqi_ctrl_info *ctrl_info)
765{
766 if (!ctrl_info->update_time_worker_scheduled)
767 return;
768
769 cancel_delayed_work_sync(&ctrl_info->update_time_work);
770 ctrl_info->update_time_worker_scheduled = false;
6c223761
KB
771}
772
773static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
774 void *buffer, size_t buffer_length)
775{
776 int rc;
777 int pci_direction;
778 struct pqi_raid_path_request request;
779
780 rc = pqi_build_raid_path_request(ctrl_info, &request,
781 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
782 if (rc)
783 return rc;
784
785 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
786 NULL, NO_TIMEOUT);
787
788 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
789 pci_direction);
790
791 return rc;
792}
793
794static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
795 void **buffer)
796{
797 int rc;
798 size_t lun_list_length;
799 size_t lun_data_length;
800 size_t new_lun_list_length;
801 void *lun_data = NULL;
802 struct report_lun_header *report_lun_header;
803
804 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
805 if (!report_lun_header) {
806 rc = -ENOMEM;
807 goto out;
808 }
809
810 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
811 sizeof(*report_lun_header));
812 if (rc)
813 goto out;
814
815 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
816
817again:
818 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
819
820 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
821 if (!lun_data) {
822 rc = -ENOMEM;
823 goto out;
824 }
825
826 if (lun_list_length == 0) {
827 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
828 goto out;
829 }
830
831 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
832 if (rc)
833 goto out;
834
835 new_lun_list_length = get_unaligned_be32(
836 &((struct report_lun_header *)lun_data)->list_length);
837
838 if (new_lun_list_length > lun_list_length) {
839 lun_list_length = new_lun_list_length;
840 kfree(lun_data);
841 goto again;
842 }
843
844out:
845 kfree(report_lun_header);
846
847 if (rc) {
848 kfree(lun_data);
849 lun_data = NULL;
850 }
851
852 *buffer = lun_data;
853
854 return rc;
855}
856
857static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
858 void **buffer)
859{
860 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
861 buffer);
862}
863
864static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
865 void **buffer)
866{
867 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
868}
869
870static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
871 struct report_phys_lun_extended **physdev_list,
872 struct report_log_lun_extended **logdev_list)
873{
874 int rc;
875 size_t logdev_list_length;
876 size_t logdev_data_length;
877 struct report_log_lun_extended *internal_logdev_list;
878 struct report_log_lun_extended *logdev_data;
879 struct report_lun_header report_lun_header;
880
881 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
882 if (rc)
883 dev_err(&ctrl_info->pci_dev->dev,
884 "report physical LUNs failed\n");
885
886 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
887 if (rc)
888 dev_err(&ctrl_info->pci_dev->dev,
889 "report logical LUNs failed\n");
890
891 /*
892 * Tack the controller itself onto the end of the logical device list.
893 */
894
895 logdev_data = *logdev_list;
896
897 if (logdev_data) {
898 logdev_list_length =
899 get_unaligned_be32(&logdev_data->header.list_length);
900 } else {
901 memset(&report_lun_header, 0, sizeof(report_lun_header));
902 logdev_data =
903 (struct report_log_lun_extended *)&report_lun_header;
904 logdev_list_length = 0;
905 }
906
907 logdev_data_length = sizeof(struct report_lun_header) +
908 logdev_list_length;
909
910 internal_logdev_list = kmalloc(logdev_data_length +
911 sizeof(struct report_log_lun_extended), GFP_KERNEL);
912 if (!internal_logdev_list) {
913 kfree(*logdev_list);
914 *logdev_list = NULL;
915 return -ENOMEM;
916 }
917
918 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
919 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
920 sizeof(struct report_log_lun_extended_entry));
921 put_unaligned_be32(logdev_list_length +
922 sizeof(struct report_log_lun_extended_entry),
923 &internal_logdev_list->header.list_length);
924
925 kfree(*logdev_list);
926 *logdev_list = internal_logdev_list;
927
928 return 0;
929}
930
931static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
932 int bus, int target, int lun)
933{
934 device->bus = bus;
935 device->target = target;
936 device->lun = lun;
937}
938
939static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
940{
941 u8 *scsi3addr;
942 u32 lunid;
bd10cf0b
KB
943 int bus;
944 int target;
945 int lun;
6c223761
KB
946
947 scsi3addr = device->scsi3addr;
948 lunid = get_unaligned_le32(scsi3addr);
949
950 if (pqi_is_hba_lunid(scsi3addr)) {
951 /* The specified device is the controller. */
952 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
953 device->target_lun_valid = true;
954 return;
955 }
956
957 if (pqi_is_logical_device(device)) {
bd10cf0b
KB
958 if (device->is_external_raid_device) {
959 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
960 target = (lunid >> 16) & 0x3fff;
961 lun = lunid & 0xff;
962 } else {
963 bus = PQI_RAID_VOLUME_BUS;
964 target = 0;
965 lun = lunid & 0x3fff;
966 }
967 pqi_set_bus_target_lun(device, bus, target, lun);
6c223761
KB
968 device->target_lun_valid = true;
969 return;
970 }
971
972 /*
973 * Defer target and LUN assignment for non-controller physical devices
974 * because the SAS transport layer will make these assignments later.
975 */
976 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
977}
978
979static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
980 struct pqi_scsi_dev *device)
981{
982 int rc;
983 u8 raid_level;
984 u8 *buffer;
985
986 raid_level = SA_RAID_UNKNOWN;
987
988 buffer = kmalloc(64, GFP_KERNEL);
989 if (buffer) {
990 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
991 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
992 if (rc == 0) {
993 raid_level = buffer[8];
994 if (raid_level > SA_RAID_MAX)
995 raid_level = SA_RAID_UNKNOWN;
996 }
997 kfree(buffer);
998 }
999
1000 device->raid_level = raid_level;
1001}
1002
1003static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1004 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1005{
1006 char *err_msg;
1007 u32 raid_map_size;
1008 u32 r5or6_blocks_per_row;
1009 unsigned int num_phys_disks;
1010 unsigned int num_raid_map_entries;
1011
1012 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1013
1014 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1015 err_msg = "RAID map too small";
1016 goto bad_raid_map;
1017 }
1018
1019 if (raid_map_size > sizeof(*raid_map)) {
1020 err_msg = "RAID map too large";
1021 goto bad_raid_map;
1022 }
1023
1024 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1025 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1026 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1027 num_raid_map_entries = num_phys_disks *
1028 get_unaligned_le16(&raid_map->row_cnt);
1029
1030 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
1031 err_msg = "invalid number of map entries in RAID map";
1032 goto bad_raid_map;
1033 }
1034
1035 if (device->raid_level == SA_RAID_1) {
1036 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1037 err_msg = "invalid RAID-1 map";
1038 goto bad_raid_map;
1039 }
1040 } else if (device->raid_level == SA_RAID_ADM) {
1041 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1042 err_msg = "invalid RAID-1(ADM) map";
1043 goto bad_raid_map;
1044 }
1045 } else if ((device->raid_level == SA_RAID_5 ||
1046 device->raid_level == SA_RAID_6) &&
1047 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1048 /* RAID 50/60 */
1049 r5or6_blocks_per_row =
1050 get_unaligned_le16(&raid_map->strip_size) *
1051 get_unaligned_le16(&raid_map->data_disks_per_row);
1052 if (r5or6_blocks_per_row == 0) {
1053 err_msg = "invalid RAID-5 or RAID-6 map";
1054 goto bad_raid_map;
1055 }
1056 }
1057
1058 return 0;
1059
1060bad_raid_map:
d87d5474
KB
1061 dev_warn(&ctrl_info->pci_dev->dev,
1062 "scsi %d:%d:%d:%d %s\n",
1063 ctrl_info->scsi_host->host_no,
1064 device->bus, device->target, device->lun, err_msg);
6c223761
KB
1065
1066 return -EINVAL;
1067}
1068
1069static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1070 struct pqi_scsi_dev *device)
1071{
1072 int rc;
1073 int pci_direction;
1074 struct pqi_raid_path_request request;
1075 struct raid_map *raid_map;
1076
1077 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1078 if (!raid_map)
1079 return -ENOMEM;
1080
1081 rc = pqi_build_raid_path_request(ctrl_info, &request,
1082 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1083 sizeof(*raid_map), 0, &pci_direction);
1084 if (rc)
1085 goto error;
1086
1087 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1088 NULL, NO_TIMEOUT);
1089
1090 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1091 pci_direction);
1092
1093 if (rc)
1094 goto error;
1095
1096 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1097 if (rc)
1098 goto error;
1099
1100 device->raid_map = raid_map;
1101
1102 return 0;
1103
1104error:
1105 kfree(raid_map);
1106
1107 return rc;
1108}
1109
1110static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
1111 struct pqi_scsi_dev *device)
1112{
1113 int rc;
1114 u8 *buffer;
1115 u8 offload_status;
1116
1117 buffer = kmalloc(64, GFP_KERNEL);
1118 if (!buffer)
1119 return;
1120
1121 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1122 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
1123 if (rc)
1124 goto out;
1125
1126#define OFFLOAD_STATUS_BYTE 4
1127#define OFFLOAD_CONFIGURED_BIT 0x1
1128#define OFFLOAD_ENABLED_BIT 0x2
1129
1130 offload_status = buffer[OFFLOAD_STATUS_BYTE];
1131 device->offload_configured =
1132 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
1133 if (device->offload_configured) {
1134 device->offload_enabled_pending =
1135 !!(offload_status & OFFLOAD_ENABLED_BIT);
1136 if (pqi_get_raid_map(ctrl_info, device))
1137 device->offload_enabled_pending = false;
1138 }
1139
1140out:
1141 kfree(buffer);
1142}
1143
1144/*
1145 * Use vendor-specific VPD to determine online/offline status of a volume.
1146 */
1147
1148static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1149 struct pqi_scsi_dev *device)
1150{
1151 int rc;
1152 size_t page_length;
1153 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1154 bool volume_offline = true;
1155 u32 volume_flags;
1156 struct ciss_vpd_logical_volume_status *vpd;
1157
1158 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1159 if (!vpd)
1160 goto no_buffer;
1161
1162 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1163 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1164 if (rc)
1165 goto out;
1166
1167 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1168 volume_status) + vpd->page_length;
1169 if (page_length < sizeof(*vpd))
1170 goto out;
1171
1172 volume_status = vpd->volume_status;
1173 volume_flags = get_unaligned_be32(&vpd->flags);
1174 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1175
1176out:
1177 kfree(vpd);
1178no_buffer:
1179 device->volume_status = volume_status;
1180 device->volume_offline = volume_offline;
1181}
1182
1183static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1184 struct pqi_scsi_dev *device)
1185{
1186 int rc;
1187 u8 *buffer;
1188
1189 buffer = kmalloc(64, GFP_KERNEL);
1190 if (!buffer)
1191 return -ENOMEM;
1192
1193 /* Send an inquiry to the device to see what it is. */
1194 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1195 if (rc)
1196 goto out;
1197
1198 scsi_sanitize_inquiry_string(&buffer[8], 8);
1199 scsi_sanitize_inquiry_string(&buffer[16], 16);
1200
1201 device->devtype = buffer[0] & 0x1f;
cbe0c7b1
KB
1202 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1203 memcpy(device->model, &buffer[16], sizeof(device->model));
6c223761
KB
1204
1205 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
bd10cf0b
KB
1206 if (device->is_external_raid_device) {
1207 device->raid_level = SA_RAID_UNKNOWN;
1208 device->volume_status = CISS_LV_OK;
1209 device->volume_offline = false;
1210 } else {
1211 pqi_get_raid_level(ctrl_info, device);
1212 pqi_get_offload_status(ctrl_info, device);
1213 pqi_get_volume_status(ctrl_info, device);
1214 }
6c223761
KB
1215 }
1216
1217out:
1218 kfree(buffer);
1219
1220 return rc;
1221}
1222
1223static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1224 struct pqi_scsi_dev *device,
1225 struct bmic_identify_physical_device *id_phys)
1226{
1227 int rc;
1228
1229 memset(id_phys, 0, sizeof(*id_phys));
1230
1231 rc = pqi_identify_physical_device(ctrl_info, device,
1232 id_phys, sizeof(*id_phys));
1233 if (rc) {
1234 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1235 return;
1236 }
1237
1238 device->queue_depth =
1239 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1240 device->device_type = id_phys->device_type;
1241 device->active_path_index = id_phys->active_path_number;
1242 device->path_map = id_phys->redundant_path_present_map;
1243 memcpy(&device->box,
1244 &id_phys->alternate_paths_phys_box_on_port,
1245 sizeof(device->box));
1246 memcpy(&device->phys_connector,
1247 &id_phys->alternate_paths_phys_connector,
1248 sizeof(device->phys_connector));
1249 device->bay = id_phys->phys_bay_in_box;
1250}
1251
1252static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1253 struct pqi_scsi_dev *device)
1254{
1255 char *status;
1256 static const char unknown_state_str[] =
1257 "Volume is in an unknown state (%u)";
1258 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1259
1260 switch (device->volume_status) {
1261 case CISS_LV_OK:
1262 status = "Volume online";
1263 break;
1264 case CISS_LV_FAILED:
1265 status = "Volume failed";
1266 break;
1267 case CISS_LV_NOT_CONFIGURED:
1268 status = "Volume not configured";
1269 break;
1270 case CISS_LV_DEGRADED:
1271 status = "Volume degraded";
1272 break;
1273 case CISS_LV_READY_FOR_RECOVERY:
1274 status = "Volume ready for recovery operation";
1275 break;
1276 case CISS_LV_UNDERGOING_RECOVERY:
1277 status = "Volume undergoing recovery";
1278 break;
1279 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1280 status = "Wrong physical drive was replaced";
1281 break;
1282 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1283 status = "A physical drive not properly connected";
1284 break;
1285 case CISS_LV_HARDWARE_OVERHEATING:
1286 status = "Hardware is overheating";
1287 break;
1288 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1289 status = "Hardware has overheated";
1290 break;
1291 case CISS_LV_UNDERGOING_EXPANSION:
1292 status = "Volume undergoing expansion";
1293 break;
1294 case CISS_LV_NOT_AVAILABLE:
1295 status = "Volume waiting for transforming volume";
1296 break;
1297 case CISS_LV_QUEUED_FOR_EXPANSION:
1298 status = "Volume queued for expansion";
1299 break;
1300 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1301 status = "Volume disabled due to SCSI ID conflict";
1302 break;
1303 case CISS_LV_EJECTED:
1304 status = "Volume has been ejected";
1305 break;
1306 case CISS_LV_UNDERGOING_ERASE:
1307 status = "Volume undergoing background erase";
1308 break;
1309 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1310 status = "Volume ready for predictive spare rebuild";
1311 break;
1312 case CISS_LV_UNDERGOING_RPI:
1313 status = "Volume undergoing rapid parity initialization";
1314 break;
1315 case CISS_LV_PENDING_RPI:
1316 status = "Volume queued for rapid parity initialization";
1317 break;
1318 case CISS_LV_ENCRYPTED_NO_KEY:
1319 status = "Encrypted volume inaccessible - key not present";
1320 break;
1321 case CISS_LV_UNDERGOING_ENCRYPTION:
1322 status = "Volume undergoing encryption process";
1323 break;
1324 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1325 status = "Volume undergoing encryption re-keying process";
1326 break;
1327 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
d87d5474 1328 status = "Volume encrypted but encryption is disabled";
6c223761
KB
1329 break;
1330 case CISS_LV_PENDING_ENCRYPTION:
1331 status = "Volume pending migration to encrypted state";
1332 break;
1333 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1334 status = "Volume pending encryption rekeying";
1335 break;
1336 case CISS_LV_NOT_SUPPORTED:
1337 status = "Volume not supported on this controller";
1338 break;
1339 case CISS_LV_STATUS_UNAVAILABLE:
1340 status = "Volume status not available";
1341 break;
1342 default:
1343 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1344 unknown_state_str, device->volume_status);
1345 status = unknown_state_buffer;
1346 break;
1347 }
1348
1349 dev_info(&ctrl_info->pci_dev->dev,
1350 "scsi %d:%d:%d:%d %s\n",
1351 ctrl_info->scsi_host->host_no,
1352 device->bus, device->target, device->lun, status);
1353}
1354
6c223761
KB
1355static void pqi_rescan_worker(struct work_struct *work)
1356{
1357 struct pqi_ctrl_info *ctrl_info;
1358
1359 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1360 rescan_work);
1361
1362 pqi_scan_scsi_devices(ctrl_info);
1363}
1364
1365static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1366 struct pqi_scsi_dev *device)
1367{
1368 int rc;
1369
1370 if (pqi_is_logical_device(device))
1371 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1372 device->target, device->lun);
1373 else
1374 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1375
1376 return rc;
1377}
1378
1379static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1380 struct pqi_scsi_dev *device)
1381{
1382 if (pqi_is_logical_device(device))
1383 scsi_remove_device(device->sdev);
1384 else
1385 pqi_remove_sas_device(device);
1386}
1387
1388/* Assumes the SCSI device list lock is held. */
1389
1390static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1391 int bus, int target, int lun)
1392{
1393 struct pqi_scsi_dev *device;
1394
1395 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1396 scsi_device_list_entry)
1397 if (device->bus == bus && device->target == target &&
1398 device->lun == lun)
1399 return device;
1400
1401 return NULL;
1402}
1403
1404static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1405 struct pqi_scsi_dev *dev2)
1406{
1407 if (dev1->is_physical_device != dev2->is_physical_device)
1408 return false;
1409
1410 if (dev1->is_physical_device)
1411 return dev1->wwid == dev2->wwid;
1412
1413 return memcmp(dev1->volume_id, dev2->volume_id,
1414 sizeof(dev1->volume_id)) == 0;
1415}
1416
1417enum pqi_find_result {
1418 DEVICE_NOT_FOUND,
1419 DEVICE_CHANGED,
1420 DEVICE_SAME,
1421};
1422
1423static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1424 struct pqi_scsi_dev *device_to_find,
1425 struct pqi_scsi_dev **matching_device)
1426{
1427 struct pqi_scsi_dev *device;
1428
1429 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1430 scsi_device_list_entry) {
1431 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1432 device->scsi3addr)) {
1433 *matching_device = device;
1434 if (pqi_device_equal(device_to_find, device)) {
1435 if (device_to_find->volume_offline)
1436 return DEVICE_CHANGED;
1437 return DEVICE_SAME;
1438 }
1439 return DEVICE_CHANGED;
1440 }
1441 }
1442
1443 return DEVICE_NOT_FOUND;
1444}
1445
1446static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1447 char *action, struct pqi_scsi_dev *device)
1448{
1449 dev_info(&ctrl_info->pci_dev->dev,
94086f5b 1450 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c qd=%d\n",
6c223761
KB
1451 action,
1452 ctrl_info->scsi_host->host_no,
1453 device->bus,
1454 device->target,
1455 device->lun,
1456 scsi_device_type(device->devtype),
1457 device->vendor,
1458 device->model,
bd10cf0b
KB
1459 pqi_is_logical_device(device) ?
1460 pqi_raid_level_to_string(device->raid_level) : "",
6c223761
KB
1461 device->offload_configured ? '+' : '-',
1462 device->offload_enabled_pending ? '+' : '-',
6c223761
KB
1463 device->queue_depth);
1464}
1465
1466/* Assumes the SCSI device list lock is held. */
1467
1468static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1469 struct pqi_scsi_dev *new_device)
1470{
1471 existing_device->devtype = new_device->devtype;
1472 existing_device->device_type = new_device->device_type;
1473 existing_device->bus = new_device->bus;
1474 if (new_device->target_lun_valid) {
1475 existing_device->target = new_device->target;
1476 existing_device->lun = new_device->lun;
1477 existing_device->target_lun_valid = true;
1478 }
1479
1480 /* By definition, the scsi3addr and wwid fields are already the same. */
1481
1482 existing_device->is_physical_device = new_device->is_physical_device;
bd10cf0b
KB
1483 existing_device->is_external_raid_device =
1484 new_device->is_external_raid_device;
6c223761
KB
1485 existing_device->aio_enabled = new_device->aio_enabled;
1486 memcpy(existing_device->vendor, new_device->vendor,
1487 sizeof(existing_device->vendor));
1488 memcpy(existing_device->model, new_device->model,
1489 sizeof(existing_device->model));
1490 existing_device->sas_address = new_device->sas_address;
1491 existing_device->raid_level = new_device->raid_level;
1492 existing_device->queue_depth = new_device->queue_depth;
1493 existing_device->aio_handle = new_device->aio_handle;
1494 existing_device->volume_status = new_device->volume_status;
1495 existing_device->active_path_index = new_device->active_path_index;
1496 existing_device->path_map = new_device->path_map;
1497 existing_device->bay = new_device->bay;
1498 memcpy(existing_device->box, new_device->box,
1499 sizeof(existing_device->box));
1500 memcpy(existing_device->phys_connector, new_device->phys_connector,
1501 sizeof(existing_device->phys_connector));
1502 existing_device->offload_configured = new_device->offload_configured;
1503 existing_device->offload_enabled = false;
1504 existing_device->offload_enabled_pending =
1505 new_device->offload_enabled_pending;
1506 existing_device->offload_to_mirror = 0;
1507 kfree(existing_device->raid_map);
1508 existing_device->raid_map = new_device->raid_map;
1509
1510 /* To prevent this from being freed later. */
1511 new_device->raid_map = NULL;
1512}
1513
1514static inline void pqi_free_device(struct pqi_scsi_dev *device)
1515{
1516 if (device) {
1517 kfree(device->raid_map);
1518 kfree(device);
1519 }
1520}
1521
1522/*
1523 * Called when exposing a new device to the OS fails in order to re-adjust
1524 * our internal SCSI device list to match the SCSI ML's view.
1525 */
1526
1527static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1528 struct pqi_scsi_dev *device)
1529{
1530 unsigned long flags;
1531
1532 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1533 list_del(&device->scsi_device_list_entry);
1534 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1535
1536 /* Allow the device structure to be freed later. */
1537 device->keep_device = false;
1538}
1539
1540static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1541 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1542{
1543 int rc;
1544 unsigned int i;
1545 unsigned long flags;
1546 enum pqi_find_result find_result;
1547 struct pqi_scsi_dev *device;
1548 struct pqi_scsi_dev *next;
1549 struct pqi_scsi_dev *matching_device;
1550 struct list_head add_list;
1551 struct list_head delete_list;
1552
1553 INIT_LIST_HEAD(&add_list);
1554 INIT_LIST_HEAD(&delete_list);
1555
1556 /*
1557 * The idea here is to do as little work as possible while holding the
1558 * spinlock. That's why we go to great pains to defer anything other
1559 * than updating the internal device list until after we release the
1560 * spinlock.
1561 */
1562
1563 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1564
1565 /* Assume that all devices in the existing list have gone away. */
1566 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1567 scsi_device_list_entry)
1568 device->device_gone = true;
1569
1570 for (i = 0; i < num_new_devices; i++) {
1571 device = new_device_list[i];
1572
1573 find_result = pqi_scsi_find_entry(ctrl_info, device,
1574 &matching_device);
1575
1576 switch (find_result) {
1577 case DEVICE_SAME:
1578 /*
1579 * The newly found device is already in the existing
1580 * device list.
1581 */
1582 device->new_device = false;
1583 matching_device->device_gone = false;
1584 pqi_scsi_update_device(matching_device, device);
1585 break;
1586 case DEVICE_NOT_FOUND:
1587 /*
1588 * The newly found device is NOT in the existing device
1589 * list.
1590 */
1591 device->new_device = true;
1592 break;
1593 case DEVICE_CHANGED:
1594 /*
1595 * The original device has gone away and we need to add
1596 * the new device.
1597 */
1598 device->new_device = true;
1599 break;
6c223761
KB
1600 }
1601 }
1602
1603 /* Process all devices that have gone away. */
1604 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1605 scsi_device_list_entry) {
1606 if (device->device_gone) {
1607 list_del(&device->scsi_device_list_entry);
1608 list_add_tail(&device->delete_list_entry, &delete_list);
1609 }
1610 }
1611
1612 /* Process all new devices. */
1613 for (i = 0; i < num_new_devices; i++) {
1614 device = new_device_list[i];
1615 if (!device->new_device)
1616 continue;
1617 if (device->volume_offline)
1618 continue;
1619 list_add_tail(&device->scsi_device_list_entry,
1620 &ctrl_info->scsi_device_list);
1621 list_add_tail(&device->add_list_entry, &add_list);
1622 /* To prevent this device structure from being freed later. */
1623 device->keep_device = true;
1624 }
1625
6c223761
KB
1626 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1627 scsi_device_list_entry)
1628 device->offload_enabled =
1629 device->offload_enabled_pending;
1630
1631 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1632
1633 /* Remove all devices that have gone away. */
1634 list_for_each_entry_safe(device, next, &delete_list,
1635 delete_list_entry) {
1636 if (device->sdev)
1637 pqi_remove_device(ctrl_info, device);
1638 if (device->volume_offline) {
1639 pqi_dev_info(ctrl_info, "offline", device);
1640 pqi_show_volume_status(ctrl_info, device);
1641 } else {
1642 pqi_dev_info(ctrl_info, "removed", device);
1643 }
1644 list_del(&device->delete_list_entry);
1645 pqi_free_device(device);
1646 }
1647
1648 /*
1649 * Notify the SCSI ML if the queue depth of any existing device has
1650 * changed.
1651 */
1652 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1653 scsi_device_list_entry) {
1654 if (device->sdev && device->queue_depth !=
1655 device->advertised_queue_depth) {
1656 device->advertised_queue_depth = device->queue_depth;
1657 scsi_change_queue_depth(device->sdev,
1658 device->advertised_queue_depth);
1659 }
1660 }
1661
1662 /* Expose any new devices. */
1663 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
94086f5b 1664 if (!device->sdev) {
6c223761
KB
1665 rc = pqi_add_device(ctrl_info, device);
1666 if (rc) {
1667 dev_warn(&ctrl_info->pci_dev->dev,
1668 "scsi %d:%d:%d:%d addition failed, device not added\n",
1669 ctrl_info->scsi_host->host_no,
1670 device->bus, device->target,
1671 device->lun);
1672 pqi_fixup_botched_add(ctrl_info, device);
1673 continue;
1674 }
1675 }
1676 pqi_dev_info(ctrl_info, "added", device);
1677 }
1678}
1679
1680static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1681{
1682 bool is_supported = false;
1683
1684 switch (device->devtype) {
1685 case TYPE_DISK:
1686 case TYPE_ZBC:
1687 case TYPE_TAPE:
1688 case TYPE_MEDIUM_CHANGER:
1689 case TYPE_ENCLOSURE:
1690 is_supported = true;
1691 break;
1692 case TYPE_RAID:
1693 /*
1694 * Only support the HBA controller itself as a RAID
1695 * controller. If it's a RAID controller other than
376fb880
KB
1696 * the HBA itself (an external RAID controller, for
1697 * example), we don't support it.
6c223761
KB
1698 */
1699 if (pqi_is_hba_lunid(device->scsi3addr))
1700 is_supported = true;
1701 break;
1702 }
1703
1704 return is_supported;
1705}
1706
94086f5b 1707static inline bool pqi_skip_device(u8 *scsi3addr)
6c223761 1708{
94086f5b
KB
1709 /* Ignore all masked devices. */
1710 if (MASKED_DEVICE(scsi3addr))
6c223761 1711 return true;
6c223761
KB
1712
1713 return false;
1714}
1715
6c223761
KB
1716static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1717{
1718 int i;
1719 int rc;
1720 struct list_head new_device_list_head;
1721 struct report_phys_lun_extended *physdev_list = NULL;
1722 struct report_log_lun_extended *logdev_list = NULL;
1723 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1724 struct report_log_lun_extended_entry *log_lun_ext_entry;
1725 struct bmic_identify_physical_device *id_phys = NULL;
1726 u32 num_physicals;
1727 u32 num_logicals;
1728 struct pqi_scsi_dev **new_device_list = NULL;
1729 struct pqi_scsi_dev *device;
1730 struct pqi_scsi_dev *next;
1731 unsigned int num_new_devices;
1732 unsigned int num_valid_devices;
1733 bool is_physical_device;
1734 u8 *scsi3addr;
1735 static char *out_of_memory_msg =
1736 "out of memory, device discovery stopped";
1737
1738 INIT_LIST_HEAD(&new_device_list_head);
1739
1740 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1741 if (rc)
1742 goto out;
1743
1744 if (physdev_list)
1745 num_physicals =
1746 get_unaligned_be32(&physdev_list->header.list_length)
1747 / sizeof(physdev_list->lun_entries[0]);
1748 else
1749 num_physicals = 0;
1750
1751 if (logdev_list)
1752 num_logicals =
1753 get_unaligned_be32(&logdev_list->header.list_length)
1754 / sizeof(logdev_list->lun_entries[0]);
1755 else
1756 num_logicals = 0;
1757
1758 if (num_physicals) {
1759 /*
1760 * We need this buffer for calls to pqi_get_physical_disk_info()
1761 * below. We allocate it here instead of inside
1762 * pqi_get_physical_disk_info() because it's a fairly large
1763 * buffer.
1764 */
1765 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1766 if (!id_phys) {
1767 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1768 out_of_memory_msg);
1769 rc = -ENOMEM;
1770 goto out;
1771 }
1772 }
1773
1774 num_new_devices = num_physicals + num_logicals;
1775
1776 new_device_list = kmalloc(sizeof(*new_device_list) *
1777 num_new_devices, GFP_KERNEL);
1778 if (!new_device_list) {
1779 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1780 rc = -ENOMEM;
1781 goto out;
1782 }
1783
1784 for (i = 0; i < num_new_devices; i++) {
1785 device = kzalloc(sizeof(*device), GFP_KERNEL);
1786 if (!device) {
1787 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1788 out_of_memory_msg);
1789 rc = -ENOMEM;
1790 goto out;
1791 }
1792 list_add_tail(&device->new_device_list_entry,
1793 &new_device_list_head);
1794 }
1795
1796 device = NULL;
1797 num_valid_devices = 0;
1798
1799 for (i = 0; i < num_new_devices; i++) {
1800
1801 if (i < num_physicals) {
1802 is_physical_device = true;
1803 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1804 log_lun_ext_entry = NULL;
1805 scsi3addr = phys_lun_ext_entry->lunid;
1806 } else {
1807 is_physical_device = false;
1808 phys_lun_ext_entry = NULL;
1809 log_lun_ext_entry =
1810 &logdev_list->lun_entries[i - num_physicals];
1811 scsi3addr = log_lun_ext_entry->lunid;
1812 }
1813
94086f5b 1814 if (is_physical_device && pqi_skip_device(scsi3addr))
6c223761
KB
1815 continue;
1816
1817 if (device)
1818 device = list_next_entry(device, new_device_list_entry);
1819 else
1820 device = list_first_entry(&new_device_list_head,
1821 struct pqi_scsi_dev, new_device_list_entry);
1822
1823 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1824 device->is_physical_device = is_physical_device;
bd10cf0b
KB
1825 if (!is_physical_device)
1826 device->is_external_raid_device =
1827 pqi_is_external_raid_addr(scsi3addr);
6c223761
KB
1828
1829 /* Gather information about the device. */
1830 rc = pqi_get_device_info(ctrl_info, device);
1831 if (rc == -ENOMEM) {
1832 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1833 out_of_memory_msg);
1834 goto out;
1835 }
1836 if (rc) {
1837 dev_warn(&ctrl_info->pci_dev->dev,
1838 "obtaining device info failed, skipping device %016llx\n",
1839 get_unaligned_be64(device->scsi3addr));
1840 rc = 0;
1841 continue;
1842 }
1843
1844 if (!pqi_is_supported_device(device))
1845 continue;
1846
1847 pqi_assign_bus_target_lun(device);
1848
6c223761
KB
1849 if (device->is_physical_device) {
1850 device->wwid = phys_lun_ext_entry->wwid;
1851 if ((phys_lun_ext_entry->device_flags &
1852 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1853 phys_lun_ext_entry->aio_handle)
1854 device->aio_enabled = true;
1855 } else {
1856 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1857 sizeof(device->volume_id));
1858 }
1859
1860 switch (device->devtype) {
1861 case TYPE_DISK:
1862 case TYPE_ZBC:
1863 case TYPE_ENCLOSURE:
1864 if (device->is_physical_device) {
1865 device->sas_address =
1866 get_unaligned_be64(&device->wwid);
1867 if (device->devtype == TYPE_DISK ||
1868 device->devtype == TYPE_ZBC) {
1869 device->aio_handle =
1870 phys_lun_ext_entry->aio_handle;
1871 pqi_get_physical_disk_info(ctrl_info,
1872 device, id_phys);
1873 }
1874 }
1875 break;
1876 }
1877
1878 new_device_list[num_valid_devices++] = device;
1879 }
1880
1881 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1882
1883out:
1884 list_for_each_entry_safe(device, next, &new_device_list_head,
1885 new_device_list_entry) {
1886 if (device->keep_device)
1887 continue;
1888 list_del(&device->new_device_list_entry);
1889 pqi_free_device(device);
1890 }
1891
1892 kfree(new_device_list);
1893 kfree(physdev_list);
1894 kfree(logdev_list);
1895 kfree(id_phys);
1896
1897 return rc;
1898}
1899
1900static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1901{
1902 unsigned long flags;
1903 struct pqi_scsi_dev *device;
6c223761 1904
a37ef745
KB
1905 while (1) {
1906 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1907
1908 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1909 struct pqi_scsi_dev, scsi_device_list_entry);
1910 if (device)
1911 list_del(&device->scsi_device_list_entry);
1912
1913 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1914 flags);
1915
1916 if (!device)
1917 break;
6c223761 1918
6c223761
KB
1919 if (device->sdev)
1920 pqi_remove_device(ctrl_info, device);
6c223761
KB
1921 pqi_free_device(device);
1922 }
6c223761
KB
1923}
1924
1925static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1926{
1927 int rc;
1928
1929 if (pqi_ctrl_offline(ctrl_info))
1930 return -ENXIO;
1931
1932 mutex_lock(&ctrl_info->scan_mutex);
1933
1934 rc = pqi_update_scsi_devices(ctrl_info);
1935 if (rc)
1936 pqi_schedule_rescan_worker(ctrl_info);
1937
1938 mutex_unlock(&ctrl_info->scan_mutex);
1939
1940 return rc;
1941}
1942
1943static void pqi_scan_start(struct Scsi_Host *shost)
1944{
1945 pqi_scan_scsi_devices(shost_to_hba(shost));
1946}
1947
1948/* Returns TRUE if scan is finished. */
1949
1950static int pqi_scan_finished(struct Scsi_Host *shost,
1951 unsigned long elapsed_time)
1952{
1953 struct pqi_ctrl_info *ctrl_info;
1954
1955 ctrl_info = shost_priv(shost);
1956
1957 return !mutex_is_locked(&ctrl_info->scan_mutex);
1958}
1959
061ef06a
KB
1960static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
1961{
1962 mutex_lock(&ctrl_info->scan_mutex);
1963 mutex_unlock(&ctrl_info->scan_mutex);
1964}
1965
1966static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
1967{
1968 mutex_lock(&ctrl_info->lun_reset_mutex);
1969 mutex_unlock(&ctrl_info->lun_reset_mutex);
1970}
1971
6c223761
KB
1972static inline void pqi_set_encryption_info(
1973 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1974 u64 first_block)
1975{
1976 u32 volume_blk_size;
1977
1978 /*
1979 * Set the encryption tweak values based on logical block address.
1980 * If the block size is 512, the tweak value is equal to the LBA.
1981 * For other block sizes, tweak value is (LBA * block size) / 512.
1982 */
1983 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1984 if (volume_blk_size != 512)
1985 first_block = (first_block * volume_blk_size) / 512;
1986
1987 encryption_info->data_encryption_key_index =
1988 get_unaligned_le16(&raid_map->data_encryption_key_index);
1989 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1990 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1991}
1992
1993/*
1994 * Attempt to perform offload RAID mapping for a logical volume I/O.
1995 */
1996
1997#define PQI_RAID_BYPASS_INELIGIBLE 1
1998
1999static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2000 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2001 struct pqi_queue_group *queue_group)
2002{
2003 struct raid_map *raid_map;
2004 bool is_write = false;
2005 u32 map_index;
2006 u64 first_block;
2007 u64 last_block;
2008 u32 block_cnt;
2009 u32 blocks_per_row;
2010 u64 first_row;
2011 u64 last_row;
2012 u32 first_row_offset;
2013 u32 last_row_offset;
2014 u32 first_column;
2015 u32 last_column;
2016 u64 r0_first_row;
2017 u64 r0_last_row;
2018 u32 r5or6_blocks_per_row;
2019 u64 r5or6_first_row;
2020 u64 r5or6_last_row;
2021 u32 r5or6_first_row_offset;
2022 u32 r5or6_last_row_offset;
2023 u32 r5or6_first_column;
2024 u32 r5or6_last_column;
2025 u16 data_disks_per_row;
2026 u32 total_disks_per_row;
2027 u16 layout_map_count;
2028 u32 stripesize;
2029 u16 strip_size;
2030 u32 first_group;
2031 u32 last_group;
2032 u32 current_group;
2033 u32 map_row;
2034 u32 aio_handle;
2035 u64 disk_block;
2036 u32 disk_block_cnt;
2037 u8 cdb[16];
2038 u8 cdb_length;
2039 int offload_to_mirror;
2040 struct pqi_encryption_info *encryption_info_ptr;
2041 struct pqi_encryption_info encryption_info;
2042#if BITS_PER_LONG == 32
2043 u64 tmpdiv;
2044#endif
2045
2046 /* Check for valid opcode, get LBA and block count. */
2047 switch (scmd->cmnd[0]) {
2048 case WRITE_6:
2049 is_write = true;
2050 /* fall through */
2051 case READ_6:
e018ef57
B
2052 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2053 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
6c223761
KB
2054 block_cnt = (u32)scmd->cmnd[4];
2055 if (block_cnt == 0)
2056 block_cnt = 256;
2057 break;
2058 case WRITE_10:
2059 is_write = true;
2060 /* fall through */
2061 case READ_10:
2062 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2063 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2064 break;
2065 case WRITE_12:
2066 is_write = true;
2067 /* fall through */
2068 case READ_12:
2069 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2070 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2071 break;
2072 case WRITE_16:
2073 is_write = true;
2074 /* fall through */
2075 case READ_16:
2076 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2077 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2078 break;
2079 default:
2080 /* Process via normal I/O path. */
2081 return PQI_RAID_BYPASS_INELIGIBLE;
2082 }
2083
2084 /* Check for write to non-RAID-0. */
2085 if (is_write && device->raid_level != SA_RAID_0)
2086 return PQI_RAID_BYPASS_INELIGIBLE;
2087
2088 if (unlikely(block_cnt == 0))
2089 return PQI_RAID_BYPASS_INELIGIBLE;
2090
2091 last_block = first_block + block_cnt - 1;
2092 raid_map = device->raid_map;
2093
2094 /* Check for invalid block or wraparound. */
2095 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2096 last_block < first_block)
2097 return PQI_RAID_BYPASS_INELIGIBLE;
2098
2099 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2100 strip_size = get_unaligned_le16(&raid_map->strip_size);
2101 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2102
2103 /* Calculate stripe information for the request. */
2104 blocks_per_row = data_disks_per_row * strip_size;
2105#if BITS_PER_LONG == 32
2106 tmpdiv = first_block;
2107 do_div(tmpdiv, blocks_per_row);
2108 first_row = tmpdiv;
2109 tmpdiv = last_block;
2110 do_div(tmpdiv, blocks_per_row);
2111 last_row = tmpdiv;
2112 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2113 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2114 tmpdiv = first_row_offset;
2115 do_div(tmpdiv, strip_size);
2116 first_column = tmpdiv;
2117 tmpdiv = last_row_offset;
2118 do_div(tmpdiv, strip_size);
2119 last_column = tmpdiv;
2120#else
2121 first_row = first_block / blocks_per_row;
2122 last_row = last_block / blocks_per_row;
2123 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2124 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2125 first_column = first_row_offset / strip_size;
2126 last_column = last_row_offset / strip_size;
2127#endif
2128
2129 /* If this isn't a single row/column then give to the controller. */
2130 if (first_row != last_row || first_column != last_column)
2131 return PQI_RAID_BYPASS_INELIGIBLE;
2132
2133 /* Proceeding with driver mapping. */
2134 total_disks_per_row = data_disks_per_row +
2135 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2136 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2137 get_unaligned_le16(&raid_map->row_cnt);
2138 map_index = (map_row * total_disks_per_row) + first_column;
2139
2140 /* RAID 1 */
2141 if (device->raid_level == SA_RAID_1) {
2142 if (device->offload_to_mirror)
2143 map_index += data_disks_per_row;
2144 device->offload_to_mirror = !device->offload_to_mirror;
2145 } else if (device->raid_level == SA_RAID_ADM) {
2146 /* RAID ADM */
2147 /*
2148 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2149 * divisible by 3.
2150 */
2151 offload_to_mirror = device->offload_to_mirror;
2152 if (offload_to_mirror == 0) {
2153 /* use physical disk in the first mirrored group. */
2154 map_index %= data_disks_per_row;
2155 } else {
2156 do {
2157 /*
2158 * Determine mirror group that map_index
2159 * indicates.
2160 */
2161 current_group = map_index / data_disks_per_row;
2162
2163 if (offload_to_mirror != current_group) {
2164 if (current_group <
2165 layout_map_count - 1) {
2166 /*
2167 * Select raid index from
2168 * next group.
2169 */
2170 map_index += data_disks_per_row;
2171 current_group++;
2172 } else {
2173 /*
2174 * Select raid index from first
2175 * group.
2176 */
2177 map_index %= data_disks_per_row;
2178 current_group = 0;
2179 }
2180 }
2181 } while (offload_to_mirror != current_group);
2182 }
2183
2184 /* Set mirror group to use next time. */
2185 offload_to_mirror =
2186 (offload_to_mirror >= layout_map_count - 1) ?
2187 0 : offload_to_mirror + 1;
2188 WARN_ON(offload_to_mirror >= layout_map_count);
2189 device->offload_to_mirror = offload_to_mirror;
2190 /*
2191 * Avoid direct use of device->offload_to_mirror within this
2192 * function since multiple threads might simultaneously
2193 * increment it beyond the range of device->layout_map_count -1.
2194 */
2195 } else if ((device->raid_level == SA_RAID_5 ||
2196 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2197 /* RAID 50/60 */
2198 /* Verify first and last block are in same RAID group */
2199 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2200 stripesize = r5or6_blocks_per_row * layout_map_count;
2201#if BITS_PER_LONG == 32
2202 tmpdiv = first_block;
2203 first_group = do_div(tmpdiv, stripesize);
2204 tmpdiv = first_group;
2205 do_div(tmpdiv, r5or6_blocks_per_row);
2206 first_group = tmpdiv;
2207 tmpdiv = last_block;
2208 last_group = do_div(tmpdiv, stripesize);
2209 tmpdiv = last_group;
2210 do_div(tmpdiv, r5or6_blocks_per_row);
2211 last_group = tmpdiv;
2212#else
2213 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2214 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2215#endif
2216 if (first_group != last_group)
2217 return PQI_RAID_BYPASS_INELIGIBLE;
2218
2219 /* Verify request is in a single row of RAID 5/6 */
2220#if BITS_PER_LONG == 32
2221 tmpdiv = first_block;
2222 do_div(tmpdiv, stripesize);
2223 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2224 tmpdiv = last_block;
2225 do_div(tmpdiv, stripesize);
2226 r5or6_last_row = r0_last_row = tmpdiv;
2227#else
2228 first_row = r5or6_first_row = r0_first_row =
2229 first_block / stripesize;
2230 r5or6_last_row = r0_last_row = last_block / stripesize;
2231#endif
2232 if (r5or6_first_row != r5or6_last_row)
2233 return PQI_RAID_BYPASS_INELIGIBLE;
2234
2235 /* Verify request is in a single column */
2236#if BITS_PER_LONG == 32
2237 tmpdiv = first_block;
2238 first_row_offset = do_div(tmpdiv, stripesize);
2239 tmpdiv = first_row_offset;
2240 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2241 r5or6_first_row_offset = first_row_offset;
2242 tmpdiv = last_block;
2243 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2244 tmpdiv = r5or6_last_row_offset;
2245 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2246 tmpdiv = r5or6_first_row_offset;
2247 do_div(tmpdiv, strip_size);
2248 first_column = r5or6_first_column = tmpdiv;
2249 tmpdiv = r5or6_last_row_offset;
2250 do_div(tmpdiv, strip_size);
2251 r5or6_last_column = tmpdiv;
2252#else
2253 first_row_offset = r5or6_first_row_offset =
2254 (u32)((first_block % stripesize) %
2255 r5or6_blocks_per_row);
2256
2257 r5or6_last_row_offset =
2258 (u32)((last_block % stripesize) %
2259 r5or6_blocks_per_row);
2260
2261 first_column = r5or6_first_row_offset / strip_size;
2262 r5or6_first_column = first_column;
2263 r5or6_last_column = r5or6_last_row_offset / strip_size;
2264#endif
2265 if (r5or6_first_column != r5or6_last_column)
2266 return PQI_RAID_BYPASS_INELIGIBLE;
2267
2268 /* Request is eligible */
2269 map_row =
2270 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2271 get_unaligned_le16(&raid_map->row_cnt);
2272
2273 map_index = (first_group *
2274 (get_unaligned_le16(&raid_map->row_cnt) *
2275 total_disks_per_row)) +
2276 (map_row * total_disks_per_row) + first_column;
2277 }
2278
2279 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2280 return PQI_RAID_BYPASS_INELIGIBLE;
2281
2282 aio_handle = raid_map->disk_data[map_index].aio_handle;
2283 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2284 first_row * strip_size +
2285 (first_row_offset - first_column * strip_size);
2286 disk_block_cnt = block_cnt;
2287
2288 /* Handle differing logical/physical block sizes. */
2289 if (raid_map->phys_blk_shift) {
2290 disk_block <<= raid_map->phys_blk_shift;
2291 disk_block_cnt <<= raid_map->phys_blk_shift;
2292 }
2293
2294 if (unlikely(disk_block_cnt > 0xffff))
2295 return PQI_RAID_BYPASS_INELIGIBLE;
2296
2297 /* Build the new CDB for the physical disk I/O. */
2298 if (disk_block > 0xffffffff) {
2299 cdb[0] = is_write ? WRITE_16 : READ_16;
2300 cdb[1] = 0;
2301 put_unaligned_be64(disk_block, &cdb[2]);
2302 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2303 cdb[14] = 0;
2304 cdb[15] = 0;
2305 cdb_length = 16;
2306 } else {
2307 cdb[0] = is_write ? WRITE_10 : READ_10;
2308 cdb[1] = 0;
2309 put_unaligned_be32((u32)disk_block, &cdb[2]);
2310 cdb[6] = 0;
2311 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2312 cdb[9] = 0;
2313 cdb_length = 10;
2314 }
2315
2316 if (get_unaligned_le16(&raid_map->flags) &
2317 RAID_MAP_ENCRYPTION_ENABLED) {
2318 pqi_set_encryption_info(&encryption_info, raid_map,
2319 first_block);
2320 encryption_info_ptr = &encryption_info;
2321 } else {
2322 encryption_info_ptr = NULL;
2323 }
2324
2325 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
376fb880 2326 cdb, cdb_length, queue_group, encryption_info_ptr, true);
6c223761
KB
2327}
2328
2329#define PQI_STATUS_IDLE 0x0
2330
2331#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2332#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2333
2334#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2335#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2336#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2337#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2338#define PQI_DEVICE_STATE_ERROR 0x4
2339
2340#define PQI_MODE_READY_TIMEOUT_SECS 30
2341#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2342
2343static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2344{
2345 struct pqi_device_registers __iomem *pqi_registers;
2346 unsigned long timeout;
2347 u64 signature;
2348 u8 status;
2349
2350 pqi_registers = ctrl_info->pqi_registers;
2351 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2352
2353 while (1) {
2354 signature = readq(&pqi_registers->signature);
2355 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2356 sizeof(signature)) == 0)
2357 break;
2358 if (time_after(jiffies, timeout)) {
2359 dev_err(&ctrl_info->pci_dev->dev,
2360 "timed out waiting for PQI signature\n");
2361 return -ETIMEDOUT;
2362 }
2363 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2364 }
2365
2366 while (1) {
2367 status = readb(&pqi_registers->function_and_status_code);
2368 if (status == PQI_STATUS_IDLE)
2369 break;
2370 if (time_after(jiffies, timeout)) {
2371 dev_err(&ctrl_info->pci_dev->dev,
2372 "timed out waiting for PQI IDLE\n");
2373 return -ETIMEDOUT;
2374 }
2375 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2376 }
2377
2378 while (1) {
2379 if (readl(&pqi_registers->device_status) ==
2380 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2381 break;
2382 if (time_after(jiffies, timeout)) {
2383 dev_err(&ctrl_info->pci_dev->dev,
2384 "timed out waiting for PQI all registers ready\n");
2385 return -ETIMEDOUT;
2386 }
2387 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2388 }
2389
2390 return 0;
2391}
2392
2393static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2394{
2395 struct pqi_scsi_dev *device;
2396
2397 device = io_request->scmd->device->hostdata;
2398 device->offload_enabled = false;
376fb880 2399 device->aio_enabled = false;
6c223761
KB
2400}
2401
d87d5474 2402static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
6c223761
KB
2403{
2404 struct pqi_ctrl_info *ctrl_info;
e58081a7 2405 struct pqi_scsi_dev *device;
6c223761 2406
03b288cf
KB
2407 device = sdev->hostdata;
2408 if (device->device_offline)
2409 return;
2410
2411 device->device_offline = true;
2412 scsi_device_set_state(sdev, SDEV_OFFLINE);
2413 ctrl_info = shost_to_hba(sdev->host);
2414 pqi_schedule_rescan_worker(ctrl_info);
2415 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2416 path, ctrl_info->scsi_host->host_no, device->bus,
2417 device->target, device->lun);
6c223761
KB
2418}
2419
2420static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2421{
2422 u8 scsi_status;
2423 u8 host_byte;
2424 struct scsi_cmnd *scmd;
2425 struct pqi_raid_error_info *error_info;
2426 size_t sense_data_length;
2427 int residual_count;
2428 int xfer_count;
2429 struct scsi_sense_hdr sshdr;
2430
2431 scmd = io_request->scmd;
2432 if (!scmd)
2433 return;
2434
2435 error_info = io_request->error_info;
2436 scsi_status = error_info->status;
2437 host_byte = DID_OK;
2438
2439 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2440 xfer_count =
2441 get_unaligned_le32(&error_info->data_out_transferred);
2442 residual_count = scsi_bufflen(scmd) - xfer_count;
2443 scsi_set_resid(scmd, residual_count);
2444 if (xfer_count < scmd->underflow)
2445 host_byte = DID_SOFT_ERROR;
2446 }
2447
2448 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2449 if (sense_data_length == 0)
2450 sense_data_length =
2451 get_unaligned_le16(&error_info->response_data_length);
2452 if (sense_data_length) {
2453 if (sense_data_length > sizeof(error_info->data))
2454 sense_data_length = sizeof(error_info->data);
2455
2456 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2457 scsi_normalize_sense(error_info->data,
2458 sense_data_length, &sshdr) &&
2459 sshdr.sense_key == HARDWARE_ERROR &&
2460 sshdr.asc == 0x3e &&
2461 sshdr.ascq == 0x1) {
d87d5474 2462 pqi_take_device_offline(scmd->device, "RAID");
6c223761
KB
2463 host_byte = DID_NO_CONNECT;
2464 }
2465
2466 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2467 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2468 memcpy(scmd->sense_buffer, error_info->data,
2469 sense_data_length);
2470 }
2471
2472 scmd->result = scsi_status;
2473 set_host_byte(scmd, host_byte);
2474}
2475
2476static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2477{
2478 u8 scsi_status;
2479 u8 host_byte;
2480 struct scsi_cmnd *scmd;
2481 struct pqi_aio_error_info *error_info;
2482 size_t sense_data_length;
2483 int residual_count;
2484 int xfer_count;
2485 bool device_offline;
2486
2487 scmd = io_request->scmd;
2488 error_info = io_request->error_info;
2489 host_byte = DID_OK;
2490 sense_data_length = 0;
2491 device_offline = false;
2492
2493 switch (error_info->service_response) {
2494 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2495 scsi_status = error_info->status;
2496 break;
2497 case PQI_AIO_SERV_RESPONSE_FAILURE:
2498 switch (error_info->status) {
2499 case PQI_AIO_STATUS_IO_ABORTED:
2500 scsi_status = SAM_STAT_TASK_ABORTED;
2501 break;
2502 case PQI_AIO_STATUS_UNDERRUN:
2503 scsi_status = SAM_STAT_GOOD;
2504 residual_count = get_unaligned_le32(
2505 &error_info->residual_count);
2506 scsi_set_resid(scmd, residual_count);
2507 xfer_count = scsi_bufflen(scmd) - residual_count;
2508 if (xfer_count < scmd->underflow)
2509 host_byte = DID_SOFT_ERROR;
2510 break;
2511 case PQI_AIO_STATUS_OVERRUN:
2512 scsi_status = SAM_STAT_GOOD;
2513 break;
2514 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2515 pqi_aio_path_disabled(io_request);
2516 scsi_status = SAM_STAT_GOOD;
2517 io_request->status = -EAGAIN;
2518 break;
2519 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2520 case PQI_AIO_STATUS_INVALID_DEVICE:
376fb880
KB
2521 if (!io_request->raid_bypass) {
2522 device_offline = true;
2523 pqi_take_device_offline(scmd->device, "AIO");
2524 host_byte = DID_NO_CONNECT;
2525 }
6c223761
KB
2526 scsi_status = SAM_STAT_CHECK_CONDITION;
2527 break;
2528 case PQI_AIO_STATUS_IO_ERROR:
2529 default:
2530 scsi_status = SAM_STAT_CHECK_CONDITION;
2531 break;
2532 }
2533 break;
2534 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2535 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2536 scsi_status = SAM_STAT_GOOD;
2537 break;
2538 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2539 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2540 default:
2541 scsi_status = SAM_STAT_CHECK_CONDITION;
2542 break;
2543 }
2544
2545 if (error_info->data_present) {
2546 sense_data_length =
2547 get_unaligned_le16(&error_info->data_length);
2548 if (sense_data_length) {
2549 if (sense_data_length > sizeof(error_info->data))
2550 sense_data_length = sizeof(error_info->data);
2551 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2552 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2553 memcpy(scmd->sense_buffer, error_info->data,
2554 sense_data_length);
2555 }
2556 }
2557
2558 if (device_offline && sense_data_length == 0)
2559 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2560 0x3e, 0x1);
2561
2562 scmd->result = scsi_status;
2563 set_host_byte(scmd, host_byte);
2564}
2565
2566static void pqi_process_io_error(unsigned int iu_type,
2567 struct pqi_io_request *io_request)
2568{
2569 switch (iu_type) {
2570 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2571 pqi_process_raid_io_error(io_request);
2572 break;
2573 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2574 pqi_process_aio_io_error(io_request);
2575 break;
2576 }
2577}
2578
2579static int pqi_interpret_task_management_response(
2580 struct pqi_task_management_response *response)
2581{
2582 int rc;
2583
2584 switch (response->response_code) {
b17f0486
KB
2585 case SOP_TMF_COMPLETE:
2586 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
2587 rc = 0;
2588 break;
2589 default:
2590 rc = -EIO;
2591 break;
2592 }
2593
2594 return rc;
2595}
2596
2597static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2598 struct pqi_queue_group *queue_group)
2599{
2600 unsigned int num_responses;
2601 pqi_index_t oq_pi;
2602 pqi_index_t oq_ci;
2603 struct pqi_io_request *io_request;
2604 struct pqi_io_response *response;
2605 u16 request_id;
2606
2607 num_responses = 0;
2608 oq_ci = queue_group->oq_ci_copy;
2609
2610 while (1) {
2611 oq_pi = *queue_group->oq_pi;
2612 if (oq_pi == oq_ci)
2613 break;
2614
2615 num_responses++;
2616 response = queue_group->oq_element_array +
2617 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2618
2619 request_id = get_unaligned_le16(&response->request_id);
2620 WARN_ON(request_id >= ctrl_info->max_io_slots);
2621
2622 io_request = &ctrl_info->io_request_pool[request_id];
2623 WARN_ON(atomic_read(&io_request->refcount) == 0);
2624
2625 switch (response->header.iu_type) {
2626 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2627 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2628 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2629 break;
2630 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2631 io_request->status =
2632 pqi_interpret_task_management_response(
2633 (void *)response);
2634 break;
2635 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2636 pqi_aio_path_disabled(io_request);
2637 io_request->status = -EAGAIN;
2638 break;
2639 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2640 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2641 io_request->error_info = ctrl_info->error_buffer +
2642 (get_unaligned_le16(&response->error_index) *
2643 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2644 pqi_process_io_error(response->header.iu_type,
2645 io_request);
2646 break;
2647 default:
2648 dev_err(&ctrl_info->pci_dev->dev,
2649 "unexpected IU type: 0x%x\n",
2650 response->header.iu_type);
6c223761
KB
2651 break;
2652 }
2653
2654 io_request->io_complete_callback(io_request,
2655 io_request->context);
2656
2657 /*
2658 * Note that the I/O request structure CANNOT BE TOUCHED after
2659 * returning from the I/O completion callback!
2660 */
2661
2662 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2663 }
2664
2665 if (num_responses) {
2666 queue_group->oq_ci_copy = oq_ci;
2667 writel(oq_ci, queue_group->oq_ci);
2668 }
2669
2670 return num_responses;
2671}
2672
2673static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 2674 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
2675{
2676 unsigned int num_elements_used;
2677
2678 if (pi >= ci)
2679 num_elements_used = pi - ci;
2680 else
2681 num_elements_used = elements_in_queue - ci + pi;
2682
2683 return elements_in_queue - num_elements_used - 1;
2684}
2685
98f87667 2686static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
2687 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2688{
2689 pqi_index_t iq_pi;
2690 pqi_index_t iq_ci;
2691 unsigned long flags;
2692 void *next_element;
6c223761
KB
2693 struct pqi_queue_group *queue_group;
2694
2695 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2696 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2697
6c223761
KB
2698 while (1) {
2699 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2700
2701 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2702 iq_ci = *queue_group->iq_ci[RAID_PATH];
2703
2704 if (pqi_num_elements_free(iq_pi, iq_ci,
2705 ctrl_info->num_elements_per_iq))
2706 break;
2707
2708 spin_unlock_irqrestore(
2709 &queue_group->submit_lock[RAID_PATH], flags);
2710
98f87667 2711 if (pqi_ctrl_offline(ctrl_info))
6c223761 2712 return;
6c223761
KB
2713 }
2714
2715 next_element = queue_group->iq_element_array[RAID_PATH] +
2716 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2717
2718 memcpy(next_element, iu, iu_length);
2719
2720 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
2721 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2722
2723 /*
2724 * This write notifies the controller that an IU is available to be
2725 * processed.
2726 */
2727 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2728
2729 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
2730}
2731
2732static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2733 struct pqi_event *event)
2734{
2735 struct pqi_event_acknowledge_request request;
2736
2737 memset(&request, 0, sizeof(request));
2738
2739 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2740 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2741 &request.header.iu_length);
2742 request.event_type = event->event_type;
2743 request.event_id = event->event_id;
2744 request.additional_event_id = event->additional_event_id;
2745
98f87667 2746 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
2747}
2748
2749static void pqi_event_worker(struct work_struct *work)
2750{
2751 unsigned int i;
2752 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 2753 struct pqi_event *event;
6c223761
KB
2754
2755 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2756
7561a7e4
KB
2757 pqi_ctrl_busy(ctrl_info);
2758 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
2759
6a50d6ad 2760 event = ctrl_info->events;
6c223761 2761 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
2762 if (event->pending) {
2763 event->pending = false;
2764 pqi_acknowledge_event(ctrl_info, event);
6c223761 2765 }
6a50d6ad 2766 event++;
6c223761
KB
2767 }
2768
7561a7e4
KB
2769 pqi_ctrl_unbusy(ctrl_info);
2770
2771 pqi_schedule_rescan_worker(ctrl_info);
6c223761
KB
2772}
2773
98f87667 2774#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
6c223761
KB
2775
2776static void pqi_heartbeat_timer_handler(unsigned long data)
2777{
2778 int num_interrupts;
98f87667 2779 u32 heartbeat_count;
6c223761
KB
2780 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2781
98f87667
KB
2782 pqi_check_ctrl_health(ctrl_info);
2783 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
2784 return;
2785
6c223761 2786 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 2787 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
2788
2789 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
2790 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2791 dev_err(&ctrl_info->pci_dev->dev,
2792 "no heartbeat detected - last heartbeat count: %u\n",
2793 heartbeat_count);
6c223761
KB
2794 pqi_take_ctrl_offline(ctrl_info);
2795 return;
2796 }
6c223761 2797 } else {
98f87667 2798 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
2799 }
2800
98f87667 2801 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
2802 mod_timer(&ctrl_info->heartbeat_timer,
2803 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2804}
2805
2806static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2807{
98f87667
KB
2808 if (!ctrl_info->heartbeat_counter)
2809 return;
2810
6c223761
KB
2811 ctrl_info->previous_num_interrupts =
2812 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
2813 ctrl_info->previous_heartbeat_count =
2814 pqi_read_heartbeat_counter(ctrl_info);
6c223761 2815
6c223761
KB
2816 ctrl_info->heartbeat_timer.expires =
2817 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2818 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2819 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
061ef06a 2820 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
2821}
2822
2823static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2824{
98f87667 2825 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
2826}
2827
6a50d6ad 2828static inline int pqi_event_type_to_event_index(unsigned int event_type)
6c223761
KB
2829{
2830 int index;
2831
6a50d6ad
KB
2832 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2833 if (event_type == pqi_supported_event_types[index])
2834 return index;
6c223761 2835
6a50d6ad
KB
2836 return -1;
2837}
2838
2839static inline bool pqi_is_supported_event(unsigned int event_type)
2840{
2841 return pqi_event_type_to_event_index(event_type) != -1;
6c223761
KB
2842}
2843
2844static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2845{
2846 unsigned int num_events;
2847 pqi_index_t oq_pi;
2848 pqi_index_t oq_ci;
2849 struct pqi_event_queue *event_queue;
2850 struct pqi_event_response *response;
6a50d6ad 2851 struct pqi_event *event;
6c223761
KB
2852 int event_index;
2853
2854 event_queue = &ctrl_info->event_queue;
2855 num_events = 0;
6c223761
KB
2856 oq_ci = event_queue->oq_ci_copy;
2857
2858 while (1) {
2859 oq_pi = *event_queue->oq_pi;
2860 if (oq_pi == oq_ci)
2861 break;
2862
2863 num_events++;
2864 response = event_queue->oq_element_array +
2865 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2866
2867 event_index =
2868 pqi_event_type_to_event_index(response->event_type);
2869
2870 if (event_index >= 0) {
2871 if (response->request_acknowlege) {
6a50d6ad
KB
2872 event = &ctrl_info->events[event_index];
2873 event->pending = true;
2874 event->event_type = response->event_type;
2875 event->event_id = response->event_id;
2876 event->additional_event_id =
6c223761 2877 response->additional_event_id;
6c223761
KB
2878 }
2879 }
2880
2881 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2882 }
2883
2884 if (num_events) {
2885 event_queue->oq_ci_copy = oq_ci;
2886 writel(oq_ci, event_queue->oq_ci);
98f87667 2887 schedule_work(&ctrl_info->event_work);
6c223761
KB
2888 }
2889
2890 return num_events;
2891}
2892
061ef06a
KB
2893#define PQI_LEGACY_INTX_MASK 0x1
2894
2895static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2896 bool enable_intx)
2897{
2898 u32 intx_mask;
2899 struct pqi_device_registers __iomem *pqi_registers;
2900 volatile void __iomem *register_addr;
2901
2902 pqi_registers = ctrl_info->pqi_registers;
2903
2904 if (enable_intx)
2905 register_addr = &pqi_registers->legacy_intx_mask_clear;
2906 else
2907 register_addr = &pqi_registers->legacy_intx_mask_set;
2908
2909 intx_mask = readl(register_addr);
2910 intx_mask |= PQI_LEGACY_INTX_MASK;
2911 writel(intx_mask, register_addr);
2912}
2913
2914static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
2915 enum pqi_irq_mode new_mode)
2916{
2917 switch (ctrl_info->irq_mode) {
2918 case IRQ_MODE_MSIX:
2919 switch (new_mode) {
2920 case IRQ_MODE_MSIX:
2921 break;
2922 case IRQ_MODE_INTX:
2923 pqi_configure_legacy_intx(ctrl_info, true);
2924 sis_disable_msix(ctrl_info);
2925 sis_enable_intx(ctrl_info);
2926 break;
2927 case IRQ_MODE_NONE:
2928 sis_disable_msix(ctrl_info);
2929 break;
2930 }
2931 break;
2932 case IRQ_MODE_INTX:
2933 switch (new_mode) {
2934 case IRQ_MODE_MSIX:
2935 pqi_configure_legacy_intx(ctrl_info, false);
2936 sis_disable_intx(ctrl_info);
2937 sis_enable_msix(ctrl_info);
2938 break;
2939 case IRQ_MODE_INTX:
2940 break;
2941 case IRQ_MODE_NONE:
2942 pqi_configure_legacy_intx(ctrl_info, false);
2943 sis_disable_intx(ctrl_info);
2944 break;
2945 }
2946 break;
2947 case IRQ_MODE_NONE:
2948 switch (new_mode) {
2949 case IRQ_MODE_MSIX:
2950 sis_enable_msix(ctrl_info);
2951 break;
2952 case IRQ_MODE_INTX:
2953 pqi_configure_legacy_intx(ctrl_info, true);
2954 sis_enable_intx(ctrl_info);
2955 break;
2956 case IRQ_MODE_NONE:
2957 break;
2958 }
2959 break;
2960 }
2961
2962 ctrl_info->irq_mode = new_mode;
2963}
2964
2965#define PQI_LEGACY_INTX_PENDING 0x1
2966
2967static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
2968{
2969 bool valid_irq;
2970 u32 intx_status;
2971
2972 switch (ctrl_info->irq_mode) {
2973 case IRQ_MODE_MSIX:
2974 valid_irq = true;
2975 break;
2976 case IRQ_MODE_INTX:
2977 intx_status =
2978 readl(&ctrl_info->pqi_registers->legacy_intx_status);
2979 if (intx_status & PQI_LEGACY_INTX_PENDING)
2980 valid_irq = true;
2981 else
2982 valid_irq = false;
2983 break;
2984 case IRQ_MODE_NONE:
2985 default:
2986 valid_irq = false;
2987 break;
2988 }
2989
2990 return valid_irq;
2991}
2992
6c223761
KB
2993static irqreturn_t pqi_irq_handler(int irq, void *data)
2994{
2995 struct pqi_ctrl_info *ctrl_info;
2996 struct pqi_queue_group *queue_group;
2997 unsigned int num_responses_handled;
2998
2999 queue_group = data;
3000 ctrl_info = queue_group->ctrl_info;
3001
061ef06a 3002 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
3003 return IRQ_NONE;
3004
3005 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3006
3007 if (irq == ctrl_info->event_irq)
3008 num_responses_handled += pqi_process_event_intr(ctrl_info);
3009
3010 if (num_responses_handled)
3011 atomic_inc(&ctrl_info->num_interrupts);
3012
3013 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3014 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3015
3016 return IRQ_HANDLED;
3017}
3018
3019static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3020{
d91d7820 3021 struct pci_dev *pci_dev = ctrl_info->pci_dev;
6c223761
KB
3022 int i;
3023 int rc;
3024
d91d7820 3025 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
6c223761
KB
3026
3027 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
d91d7820 3028 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
52198226 3029 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 3030 if (rc) {
d91d7820 3031 dev_err(&pci_dev->dev,
6c223761 3032 "irq %u init failed with error %d\n",
d91d7820 3033 pci_irq_vector(pci_dev, i), rc);
6c223761
KB
3034 return rc;
3035 }
3036 ctrl_info->num_msix_vectors_initialized++;
3037 }
3038
3039 return 0;
3040}
3041
98bf061b
KB
3042static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3043{
3044 int i;
3045
3046 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3047 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3048 &ctrl_info->queue_groups[i]);
3049
3050 ctrl_info->num_msix_vectors_initialized = 0;
3051}
3052
6c223761
KB
3053static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3054{
98bf061b 3055 int num_vectors_enabled;
6c223761 3056
98bf061b 3057 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226
CH
3058 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3059 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
98bf061b 3060 if (num_vectors_enabled < 0) {
6c223761 3061 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
3062 "MSI-X init failed with error %d\n",
3063 num_vectors_enabled);
3064 return num_vectors_enabled;
6c223761
KB
3065 }
3066
98bf061b 3067 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 3068 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
3069 return 0;
3070}
3071
98bf061b
KB
3072static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3073{
3074 if (ctrl_info->num_msix_vectors_enabled) {
3075 pci_free_irq_vectors(ctrl_info->pci_dev);
3076 ctrl_info->num_msix_vectors_enabled = 0;
3077 }
3078}
3079
6c223761
KB
3080static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3081{
3082 unsigned int i;
3083 size_t alloc_length;
3084 size_t element_array_length_per_iq;
3085 size_t element_array_length_per_oq;
3086 void *element_array;
3087 void *next_queue_index;
3088 void *aligned_pointer;
3089 unsigned int num_inbound_queues;
3090 unsigned int num_outbound_queues;
3091 unsigned int num_queue_indexes;
3092 struct pqi_queue_group *queue_group;
3093
3094 element_array_length_per_iq =
3095 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3096 ctrl_info->num_elements_per_iq;
3097 element_array_length_per_oq =
3098 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3099 ctrl_info->num_elements_per_oq;
3100 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3101 num_outbound_queues = ctrl_info->num_queue_groups;
3102 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3103
3104 aligned_pointer = NULL;
3105
3106 for (i = 0; i < num_inbound_queues; i++) {
3107 aligned_pointer = PTR_ALIGN(aligned_pointer,
3108 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3109 aligned_pointer += element_array_length_per_iq;
3110 }
3111
3112 for (i = 0; i < num_outbound_queues; i++) {
3113 aligned_pointer = PTR_ALIGN(aligned_pointer,
3114 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3115 aligned_pointer += element_array_length_per_oq;
3116 }
3117
3118 aligned_pointer = PTR_ALIGN(aligned_pointer,
3119 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3120 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3121 PQI_EVENT_OQ_ELEMENT_LENGTH;
3122
3123 for (i = 0; i < num_queue_indexes; i++) {
3124 aligned_pointer = PTR_ALIGN(aligned_pointer,
3125 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3126 aligned_pointer += sizeof(pqi_index_t);
3127 }
3128
3129 alloc_length = (size_t)aligned_pointer +
3130 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3131
e1d213bd
KB
3132 alloc_length += PQI_EXTRA_SGL_MEMORY;
3133
6c223761
KB
3134 ctrl_info->queue_memory_base =
3135 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3136 alloc_length,
3137 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3138
d87d5474 3139 if (!ctrl_info->queue_memory_base)
6c223761 3140 return -ENOMEM;
6c223761
KB
3141
3142 ctrl_info->queue_memory_length = alloc_length;
3143
3144 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3145 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3146
3147 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3148 queue_group = &ctrl_info->queue_groups[i];
3149 queue_group->iq_element_array[RAID_PATH] = element_array;
3150 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3151 ctrl_info->queue_memory_base_dma_handle +
3152 (element_array - ctrl_info->queue_memory_base);
3153 element_array += element_array_length_per_iq;
3154 element_array = PTR_ALIGN(element_array,
3155 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3156 queue_group->iq_element_array[AIO_PATH] = element_array;
3157 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3158 ctrl_info->queue_memory_base_dma_handle +
3159 (element_array - ctrl_info->queue_memory_base);
3160 element_array += element_array_length_per_iq;
3161 element_array = PTR_ALIGN(element_array,
3162 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3163 }
3164
3165 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3166 queue_group = &ctrl_info->queue_groups[i];
3167 queue_group->oq_element_array = element_array;
3168 queue_group->oq_element_array_bus_addr =
3169 ctrl_info->queue_memory_base_dma_handle +
3170 (element_array - ctrl_info->queue_memory_base);
3171 element_array += element_array_length_per_oq;
3172 element_array = PTR_ALIGN(element_array,
3173 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3174 }
3175
3176 ctrl_info->event_queue.oq_element_array = element_array;
3177 ctrl_info->event_queue.oq_element_array_bus_addr =
3178 ctrl_info->queue_memory_base_dma_handle +
3179 (element_array - ctrl_info->queue_memory_base);
3180 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3181 PQI_EVENT_OQ_ELEMENT_LENGTH;
3182
3183 next_queue_index = PTR_ALIGN(element_array,
3184 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3185
3186 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3187 queue_group = &ctrl_info->queue_groups[i];
3188 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3189 queue_group->iq_ci_bus_addr[RAID_PATH] =
3190 ctrl_info->queue_memory_base_dma_handle +
3191 (next_queue_index - ctrl_info->queue_memory_base);
3192 next_queue_index += sizeof(pqi_index_t);
3193 next_queue_index = PTR_ALIGN(next_queue_index,
3194 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3195 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3196 queue_group->iq_ci_bus_addr[AIO_PATH] =
3197 ctrl_info->queue_memory_base_dma_handle +
3198 (next_queue_index - ctrl_info->queue_memory_base);
3199 next_queue_index += sizeof(pqi_index_t);
3200 next_queue_index = PTR_ALIGN(next_queue_index,
3201 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3202 queue_group->oq_pi = next_queue_index;
3203 queue_group->oq_pi_bus_addr =
3204 ctrl_info->queue_memory_base_dma_handle +
3205 (next_queue_index - ctrl_info->queue_memory_base);
3206 next_queue_index += sizeof(pqi_index_t);
3207 next_queue_index = PTR_ALIGN(next_queue_index,
3208 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3209 }
3210
3211 ctrl_info->event_queue.oq_pi = next_queue_index;
3212 ctrl_info->event_queue.oq_pi_bus_addr =
3213 ctrl_info->queue_memory_base_dma_handle +
3214 (next_queue_index - ctrl_info->queue_memory_base);
3215
3216 return 0;
3217}
3218
3219static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3220{
3221 unsigned int i;
3222 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3223 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3224
3225 /*
3226 * Initialize the backpointers to the controller structure in
3227 * each operational queue group structure.
3228 */
3229 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3230 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3231
3232 /*
3233 * Assign IDs to all operational queues. Note that the IDs
3234 * assigned to operational IQs are independent of the IDs
3235 * assigned to operational OQs.
3236 */
3237 ctrl_info->event_queue.oq_id = next_oq_id++;
3238 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3239 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3240 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3241 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3242 }
3243
3244 /*
3245 * Assign MSI-X table entry indexes to all queues. Note that the
3246 * interrupt for the event queue is shared with the first queue group.
3247 */
3248 ctrl_info->event_queue.int_msg_num = 0;
3249 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3250 ctrl_info->queue_groups[i].int_msg_num = i;
3251
3252 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3253 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3254 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3255 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3256 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3257 }
3258}
3259
3260static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3261{
3262 size_t alloc_length;
3263 struct pqi_admin_queues_aligned *admin_queues_aligned;
3264 struct pqi_admin_queues *admin_queues;
3265
3266 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3267 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3268
3269 ctrl_info->admin_queue_memory_base =
3270 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3271 alloc_length,
3272 &ctrl_info->admin_queue_memory_base_dma_handle,
3273 GFP_KERNEL);
3274
3275 if (!ctrl_info->admin_queue_memory_base)
3276 return -ENOMEM;
3277
3278 ctrl_info->admin_queue_memory_length = alloc_length;
3279
3280 admin_queues = &ctrl_info->admin_queues;
3281 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3282 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3283 admin_queues->iq_element_array =
3284 &admin_queues_aligned->iq_element_array;
3285 admin_queues->oq_element_array =
3286 &admin_queues_aligned->oq_element_array;
3287 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3288 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3289
3290 admin_queues->iq_element_array_bus_addr =
3291 ctrl_info->admin_queue_memory_base_dma_handle +
3292 (admin_queues->iq_element_array -
3293 ctrl_info->admin_queue_memory_base);
3294 admin_queues->oq_element_array_bus_addr =
3295 ctrl_info->admin_queue_memory_base_dma_handle +
3296 (admin_queues->oq_element_array -
3297 ctrl_info->admin_queue_memory_base);
3298 admin_queues->iq_ci_bus_addr =
3299 ctrl_info->admin_queue_memory_base_dma_handle +
3300 ((void *)admin_queues->iq_ci -
3301 ctrl_info->admin_queue_memory_base);
3302 admin_queues->oq_pi_bus_addr =
3303 ctrl_info->admin_queue_memory_base_dma_handle +
3304 ((void *)admin_queues->oq_pi -
3305 ctrl_info->admin_queue_memory_base);
3306
3307 return 0;
3308}
3309
3310#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3311#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3312
3313static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3314{
3315 struct pqi_device_registers __iomem *pqi_registers;
3316 struct pqi_admin_queues *admin_queues;
3317 unsigned long timeout;
3318 u8 status;
3319 u32 reg;
3320
3321 pqi_registers = ctrl_info->pqi_registers;
3322 admin_queues = &ctrl_info->admin_queues;
3323
3324 writeq((u64)admin_queues->iq_element_array_bus_addr,
3325 &pqi_registers->admin_iq_element_array_addr);
3326 writeq((u64)admin_queues->oq_element_array_bus_addr,
3327 &pqi_registers->admin_oq_element_array_addr);
3328 writeq((u64)admin_queues->iq_ci_bus_addr,
3329 &pqi_registers->admin_iq_ci_addr);
3330 writeq((u64)admin_queues->oq_pi_bus_addr,
3331 &pqi_registers->admin_oq_pi_addr);
3332
3333 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3334 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3335 (admin_queues->int_msg_num << 16);
3336 writel(reg, &pqi_registers->admin_iq_num_elements);
3337 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3338 &pqi_registers->function_and_status_code);
3339
3340 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3341 while (1) {
3342 status = readb(&pqi_registers->function_and_status_code);
3343 if (status == PQI_STATUS_IDLE)
3344 break;
3345 if (time_after(jiffies, timeout))
3346 return -ETIMEDOUT;
3347 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3348 }
3349
3350 /*
3351 * The offset registers are not initialized to the correct
3352 * offsets until *after* the create admin queue pair command
3353 * completes successfully.
3354 */
3355 admin_queues->iq_pi = ctrl_info->iomem_base +
3356 PQI_DEVICE_REGISTERS_OFFSET +
3357 readq(&pqi_registers->admin_iq_pi_offset);
3358 admin_queues->oq_ci = ctrl_info->iomem_base +
3359 PQI_DEVICE_REGISTERS_OFFSET +
3360 readq(&pqi_registers->admin_oq_ci_offset);
3361
3362 return 0;
3363}
3364
3365static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3366 struct pqi_general_admin_request *request)
3367{
3368 struct pqi_admin_queues *admin_queues;
3369 void *next_element;
3370 pqi_index_t iq_pi;
3371
3372 admin_queues = &ctrl_info->admin_queues;
3373 iq_pi = admin_queues->iq_pi_copy;
3374
3375 next_element = admin_queues->iq_element_array +
3376 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3377
3378 memcpy(next_element, request, sizeof(*request));
3379
3380 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3381 admin_queues->iq_pi_copy = iq_pi;
3382
3383 /*
3384 * This write notifies the controller that an IU is available to be
3385 * processed.
3386 */
3387 writel(iq_pi, admin_queues->iq_pi);
3388}
3389
3390static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3391 struct pqi_general_admin_response *response)
3392{
3393 struct pqi_admin_queues *admin_queues;
3394 pqi_index_t oq_pi;
3395 pqi_index_t oq_ci;
3396 unsigned long timeout;
3397
3398 admin_queues = &ctrl_info->admin_queues;
3399 oq_ci = admin_queues->oq_ci_copy;
3400
3401 timeout = (3 * HZ) + jiffies;
3402
3403 while (1) {
3404 oq_pi = *admin_queues->oq_pi;
3405 if (oq_pi != oq_ci)
3406 break;
3407 if (time_after(jiffies, timeout)) {
3408 dev_err(&ctrl_info->pci_dev->dev,
3409 "timed out waiting for admin response\n");
3410 return -ETIMEDOUT;
3411 }
3412 usleep_range(1000, 2000);
3413 }
3414
3415 memcpy(response, admin_queues->oq_element_array +
3416 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3417
3418 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3419 admin_queues->oq_ci_copy = oq_ci;
3420 writel(oq_ci, admin_queues->oq_ci);
3421
3422 return 0;
3423}
3424
3425static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3426 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3427 struct pqi_io_request *io_request)
3428{
3429 struct pqi_io_request *next;
3430 void *next_element;
3431 pqi_index_t iq_pi;
3432 pqi_index_t iq_ci;
3433 size_t iu_length;
3434 unsigned long flags;
3435 unsigned int num_elements_needed;
3436 unsigned int num_elements_to_end_of_queue;
3437 size_t copy_count;
3438 struct pqi_iu_header *request;
3439
3440 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3441
376fb880
KB
3442 if (io_request) {
3443 io_request->queue_group = queue_group;
6c223761
KB
3444 list_add_tail(&io_request->request_list_entry,
3445 &queue_group->request_list[path]);
376fb880 3446 }
6c223761
KB
3447
3448 iq_pi = queue_group->iq_pi_copy[path];
3449
3450 list_for_each_entry_safe(io_request, next,
3451 &queue_group->request_list[path], request_list_entry) {
3452
3453 request = io_request->iu;
3454
3455 iu_length = get_unaligned_le16(&request->iu_length) +
3456 PQI_REQUEST_HEADER_LENGTH;
3457 num_elements_needed =
3458 DIV_ROUND_UP(iu_length,
3459 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3460
3461 iq_ci = *queue_group->iq_ci[path];
3462
3463 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3464 ctrl_info->num_elements_per_iq))
3465 break;
3466
3467 put_unaligned_le16(queue_group->oq_id,
3468 &request->response_queue_id);
3469
3470 next_element = queue_group->iq_element_array[path] +
3471 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3472
3473 num_elements_to_end_of_queue =
3474 ctrl_info->num_elements_per_iq - iq_pi;
3475
3476 if (num_elements_needed <= num_elements_to_end_of_queue) {
3477 memcpy(next_element, request, iu_length);
3478 } else {
3479 copy_count = num_elements_to_end_of_queue *
3480 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3481 memcpy(next_element, request, copy_count);
3482 memcpy(queue_group->iq_element_array[path],
3483 (u8 *)request + copy_count,
3484 iu_length - copy_count);
3485 }
3486
3487 iq_pi = (iq_pi + num_elements_needed) %
3488 ctrl_info->num_elements_per_iq;
3489
3490 list_del(&io_request->request_list_entry);
3491 }
3492
3493 if (iq_pi != queue_group->iq_pi_copy[path]) {
3494 queue_group->iq_pi_copy[path] = iq_pi;
3495 /*
3496 * This write notifies the controller that one or more IUs are
3497 * available to be processed.
3498 */
3499 writel(iq_pi, queue_group->iq_pi[path]);
3500 }
3501
3502 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3503}
3504
1f37e992
KB
3505#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3506
3507static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3508 struct completion *wait)
3509{
3510 int rc;
1f37e992
KB
3511
3512 while (1) {
3513 if (wait_for_completion_io_timeout(wait,
3514 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3515 rc = 0;
3516 break;
3517 }
3518
3519 pqi_check_ctrl_health(ctrl_info);
3520 if (pqi_ctrl_offline(ctrl_info)) {
3521 rc = -ENXIO;
3522 break;
3523 }
1f37e992
KB
3524 }
3525
3526 return rc;
3527}
3528
6c223761
KB
3529static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3530 void *context)
3531{
3532 struct completion *waiting = context;
3533
3534 complete(waiting);
3535}
3536
3537static int pqi_submit_raid_request_synchronous_with_io_request(
3538 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3539 unsigned long timeout_msecs)
3540{
3541 int rc = 0;
3542 DECLARE_COMPLETION_ONSTACK(wait);
3543
3544 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3545 io_request->context = &wait;
3546
3547 pqi_start_io(ctrl_info,
3548 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3549 io_request);
3550
3551 if (timeout_msecs == NO_TIMEOUT) {
1f37e992 3552 pqi_wait_for_completion_io(ctrl_info, &wait);
6c223761
KB
3553 } else {
3554 if (!wait_for_completion_io_timeout(&wait,
3555 msecs_to_jiffies(timeout_msecs))) {
3556 dev_warn(&ctrl_info->pci_dev->dev,
3557 "command timed out\n");
3558 rc = -ETIMEDOUT;
3559 }
3560 }
3561
3562 return rc;
3563}
3564
3565static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3566 struct pqi_iu_header *request, unsigned int flags,
3567 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3568{
3569 int rc;
3570 struct pqi_io_request *io_request;
3571 unsigned long start_jiffies;
3572 unsigned long msecs_blocked;
3573 size_t iu_length;
3574
3575 /*
3576 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3577 * are mutually exclusive.
3578 */
3579
3580 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3581 if (down_interruptible(&ctrl_info->sync_request_sem))
3582 return -ERESTARTSYS;
3583 } else {
3584 if (timeout_msecs == NO_TIMEOUT) {
3585 down(&ctrl_info->sync_request_sem);
3586 } else {
3587 start_jiffies = jiffies;
3588 if (down_timeout(&ctrl_info->sync_request_sem,
3589 msecs_to_jiffies(timeout_msecs)))
3590 return -ETIMEDOUT;
3591 msecs_blocked =
3592 jiffies_to_msecs(jiffies - start_jiffies);
3593 if (msecs_blocked >= timeout_msecs)
3594 return -ETIMEDOUT;
3595 timeout_msecs -= msecs_blocked;
3596 }
3597 }
3598
7561a7e4
KB
3599 pqi_ctrl_busy(ctrl_info);
3600 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3601 if (timeout_msecs == 0) {
3602 rc = -ETIMEDOUT;
3603 goto out;
3604 }
3605
376fb880
KB
3606 if (pqi_ctrl_offline(ctrl_info)) {
3607 rc = -ENXIO;
3608 goto out;
3609 }
3610
6c223761
KB
3611 io_request = pqi_alloc_io_request(ctrl_info);
3612
3613 put_unaligned_le16(io_request->index,
3614 &(((struct pqi_raid_path_request *)request)->request_id));
3615
3616 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3617 ((struct pqi_raid_path_request *)request)->error_index =
3618 ((struct pqi_raid_path_request *)request)->request_id;
3619
3620 iu_length = get_unaligned_le16(&request->iu_length) +
3621 PQI_REQUEST_HEADER_LENGTH;
3622 memcpy(io_request->iu, request, iu_length);
3623
3624 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3625 io_request, timeout_msecs);
3626
3627 if (error_info) {
3628 if (io_request->error_info)
3629 memcpy(error_info, io_request->error_info,
3630 sizeof(*error_info));
3631 else
3632 memset(error_info, 0, sizeof(*error_info));
3633 } else if (rc == 0 && io_request->error_info) {
3634 u8 scsi_status;
3635 struct pqi_raid_error_info *raid_error_info;
3636
3637 raid_error_info = io_request->error_info;
3638 scsi_status = raid_error_info->status;
3639
3640 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3641 raid_error_info->data_out_result ==
3642 PQI_DATA_IN_OUT_UNDERFLOW)
3643 scsi_status = SAM_STAT_GOOD;
3644
3645 if (scsi_status != SAM_STAT_GOOD)
3646 rc = -EIO;
3647 }
3648
3649 pqi_free_io_request(io_request);
3650
7561a7e4
KB
3651out:
3652 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
3653 up(&ctrl_info->sync_request_sem);
3654
3655 return rc;
3656}
3657
3658static int pqi_validate_admin_response(
3659 struct pqi_general_admin_response *response, u8 expected_function_code)
3660{
3661 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3662 return -EINVAL;
3663
3664 if (get_unaligned_le16(&response->header.iu_length) !=
3665 PQI_GENERAL_ADMIN_IU_LENGTH)
3666 return -EINVAL;
3667
3668 if (response->function_code != expected_function_code)
3669 return -EINVAL;
3670
3671 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3672 return -EINVAL;
3673
3674 return 0;
3675}
3676
3677static int pqi_submit_admin_request_synchronous(
3678 struct pqi_ctrl_info *ctrl_info,
3679 struct pqi_general_admin_request *request,
3680 struct pqi_general_admin_response *response)
3681{
3682 int rc;
3683
3684 pqi_submit_admin_request(ctrl_info, request);
3685
3686 rc = pqi_poll_for_admin_response(ctrl_info, response);
3687
3688 if (rc == 0)
3689 rc = pqi_validate_admin_response(response,
3690 request->function_code);
3691
3692 return rc;
3693}
3694
3695static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3696{
3697 int rc;
3698 struct pqi_general_admin_request request;
3699 struct pqi_general_admin_response response;
3700 struct pqi_device_capability *capability;
3701 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3702
3703 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3704 if (!capability)
3705 return -ENOMEM;
3706
3707 memset(&request, 0, sizeof(request));
3708
3709 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3710 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3711 &request.header.iu_length);
3712 request.function_code =
3713 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3714 put_unaligned_le32(sizeof(*capability),
3715 &request.data.report_device_capability.buffer_length);
3716
3717 rc = pqi_map_single(ctrl_info->pci_dev,
3718 &request.data.report_device_capability.sg_descriptor,
3719 capability, sizeof(*capability),
3720 PCI_DMA_FROMDEVICE);
3721 if (rc)
3722 goto out;
3723
3724 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3725 &response);
3726
3727 pqi_pci_unmap(ctrl_info->pci_dev,
3728 &request.data.report_device_capability.sg_descriptor, 1,
3729 PCI_DMA_FROMDEVICE);
3730
3731 if (rc)
3732 goto out;
3733
3734 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3735 rc = -EIO;
3736 goto out;
3737 }
3738
3739 ctrl_info->max_inbound_queues =
3740 get_unaligned_le16(&capability->max_inbound_queues);
3741 ctrl_info->max_elements_per_iq =
3742 get_unaligned_le16(&capability->max_elements_per_iq);
3743 ctrl_info->max_iq_element_length =
3744 get_unaligned_le16(&capability->max_iq_element_length)
3745 * 16;
3746 ctrl_info->max_outbound_queues =
3747 get_unaligned_le16(&capability->max_outbound_queues);
3748 ctrl_info->max_elements_per_oq =
3749 get_unaligned_le16(&capability->max_elements_per_oq);
3750 ctrl_info->max_oq_element_length =
3751 get_unaligned_le16(&capability->max_oq_element_length)
3752 * 16;
3753
3754 sop_iu_layer_descriptor =
3755 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3756
3757 ctrl_info->max_inbound_iu_length_per_firmware =
3758 get_unaligned_le16(
3759 &sop_iu_layer_descriptor->max_inbound_iu_length);
3760 ctrl_info->inbound_spanning_supported =
3761 sop_iu_layer_descriptor->inbound_spanning_supported;
3762 ctrl_info->outbound_spanning_supported =
3763 sop_iu_layer_descriptor->outbound_spanning_supported;
3764
3765out:
3766 kfree(capability);
3767
3768 return rc;
3769}
3770
3771static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3772{
3773 if (ctrl_info->max_iq_element_length <
3774 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3775 dev_err(&ctrl_info->pci_dev->dev,
3776 "max. inbound queue element length of %d is less than the required length of %d\n",
3777 ctrl_info->max_iq_element_length,
3778 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3779 return -EINVAL;
3780 }
3781
3782 if (ctrl_info->max_oq_element_length <
3783 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3784 dev_err(&ctrl_info->pci_dev->dev,
3785 "max. outbound queue element length of %d is less than the required length of %d\n",
3786 ctrl_info->max_oq_element_length,
3787 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3788 return -EINVAL;
3789 }
3790
3791 if (ctrl_info->max_inbound_iu_length_per_firmware <
3792 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3793 dev_err(&ctrl_info->pci_dev->dev,
3794 "max. inbound IU length of %u is less than the min. required length of %d\n",
3795 ctrl_info->max_inbound_iu_length_per_firmware,
3796 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3797 return -EINVAL;
3798 }
3799
77668f41
KB
3800 if (!ctrl_info->inbound_spanning_supported) {
3801 dev_err(&ctrl_info->pci_dev->dev,
3802 "the controller does not support inbound spanning\n");
3803 return -EINVAL;
3804 }
3805
3806 if (ctrl_info->outbound_spanning_supported) {
3807 dev_err(&ctrl_info->pci_dev->dev,
3808 "the controller supports outbound spanning but this driver does not\n");
3809 return -EINVAL;
3810 }
3811
6c223761
KB
3812 return 0;
3813}
3814
3815static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3816 bool inbound_queue, u16 queue_id)
3817{
3818 struct pqi_general_admin_request request;
3819 struct pqi_general_admin_response response;
3820
3821 memset(&request, 0, sizeof(request));
3822 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3823 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3824 &request.header.iu_length);
3825 if (inbound_queue)
3826 request.function_code =
3827 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3828 else
3829 request.function_code =
3830 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3831 put_unaligned_le16(queue_id,
3832 &request.data.delete_operational_queue.queue_id);
3833
3834 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3835 &response);
3836}
3837
3838static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3839{
3840 int rc;
3841 struct pqi_event_queue *event_queue;
3842 struct pqi_general_admin_request request;
3843 struct pqi_general_admin_response response;
3844
3845 event_queue = &ctrl_info->event_queue;
3846
3847 /*
3848 * Create OQ (Outbound Queue - device to host queue) to dedicate
3849 * to events.
3850 */
3851 memset(&request, 0, sizeof(request));
3852 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3853 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3854 &request.header.iu_length);
3855 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3856 put_unaligned_le16(event_queue->oq_id,
3857 &request.data.create_operational_oq.queue_id);
3858 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3859 &request.data.create_operational_oq.element_array_addr);
3860 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3861 &request.data.create_operational_oq.pi_addr);
3862 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3863 &request.data.create_operational_oq.num_elements);
3864 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3865 &request.data.create_operational_oq.element_length);
3866 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3867 put_unaligned_le16(event_queue->int_msg_num,
3868 &request.data.create_operational_oq.int_msg_num);
3869
3870 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3871 &response);
3872 if (rc)
3873 return rc;
3874
3875 event_queue->oq_ci = ctrl_info->iomem_base +
3876 PQI_DEVICE_REGISTERS_OFFSET +
3877 get_unaligned_le64(
3878 &response.data.create_operational_oq.oq_ci_offset);
3879
3880 return 0;
3881}
3882
061ef06a
KB
3883static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3884 unsigned int group_number)
6c223761 3885{
6c223761
KB
3886 int rc;
3887 struct pqi_queue_group *queue_group;
3888 struct pqi_general_admin_request request;
3889 struct pqi_general_admin_response response;
3890
061ef06a 3891 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
3892
3893 /*
3894 * Create IQ (Inbound Queue - host to device queue) for
3895 * RAID path.
3896 */
3897 memset(&request, 0, sizeof(request));
3898 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3899 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3900 &request.header.iu_length);
3901 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3902 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3903 &request.data.create_operational_iq.queue_id);
3904 put_unaligned_le64(
3905 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3906 &request.data.create_operational_iq.element_array_addr);
3907 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3908 &request.data.create_operational_iq.ci_addr);
3909 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3910 &request.data.create_operational_iq.num_elements);
3911 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3912 &request.data.create_operational_iq.element_length);
3913 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3914
3915 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3916 &response);
3917 if (rc) {
3918 dev_err(&ctrl_info->pci_dev->dev,
3919 "error creating inbound RAID queue\n");
3920 return rc;
3921 }
3922
3923 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3924 PQI_DEVICE_REGISTERS_OFFSET +
3925 get_unaligned_le64(
3926 &response.data.create_operational_iq.iq_pi_offset);
3927
3928 /*
3929 * Create IQ (Inbound Queue - host to device queue) for
3930 * Advanced I/O (AIO) path.
3931 */
3932 memset(&request, 0, sizeof(request));
3933 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3934 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3935 &request.header.iu_length);
3936 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3937 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3938 &request.data.create_operational_iq.queue_id);
3939 put_unaligned_le64((u64)queue_group->
3940 iq_element_array_bus_addr[AIO_PATH],
3941 &request.data.create_operational_iq.element_array_addr);
3942 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3943 &request.data.create_operational_iq.ci_addr);
3944 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3945 &request.data.create_operational_iq.num_elements);
3946 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3947 &request.data.create_operational_iq.element_length);
3948 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3949
3950 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3951 &response);
3952 if (rc) {
3953 dev_err(&ctrl_info->pci_dev->dev,
3954 "error creating inbound AIO queue\n");
3955 goto delete_inbound_queue_raid;
3956 }
3957
3958 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3959 PQI_DEVICE_REGISTERS_OFFSET +
3960 get_unaligned_le64(
3961 &response.data.create_operational_iq.iq_pi_offset);
3962
3963 /*
3964 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3965 * assumed to be for RAID path I/O unless we change the queue's
3966 * property.
3967 */
3968 memset(&request, 0, sizeof(request));
3969 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3970 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3971 &request.header.iu_length);
3972 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3973 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3974 &request.data.change_operational_iq_properties.queue_id);
3975 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3976 &request.data.change_operational_iq_properties.vendor_specific);
3977
3978 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3979 &response);
3980 if (rc) {
3981 dev_err(&ctrl_info->pci_dev->dev,
3982 "error changing queue property\n");
3983 goto delete_inbound_queue_aio;
3984 }
3985
3986 /*
3987 * Create OQ (Outbound Queue - device to host queue).
3988 */
3989 memset(&request, 0, sizeof(request));
3990 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3991 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3992 &request.header.iu_length);
3993 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3994 put_unaligned_le16(queue_group->oq_id,
3995 &request.data.create_operational_oq.queue_id);
3996 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3997 &request.data.create_operational_oq.element_array_addr);
3998 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3999 &request.data.create_operational_oq.pi_addr);
4000 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4001 &request.data.create_operational_oq.num_elements);
4002 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4003 &request.data.create_operational_oq.element_length);
4004 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4005 put_unaligned_le16(queue_group->int_msg_num,
4006 &request.data.create_operational_oq.int_msg_num);
4007
4008 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4009 &response);
4010 if (rc) {
4011 dev_err(&ctrl_info->pci_dev->dev,
4012 "error creating outbound queue\n");
4013 goto delete_inbound_queue_aio;
4014 }
4015
4016 queue_group->oq_ci = ctrl_info->iomem_base +
4017 PQI_DEVICE_REGISTERS_OFFSET +
4018 get_unaligned_le64(
4019 &response.data.create_operational_oq.oq_ci_offset);
4020
6c223761
KB
4021 return 0;
4022
4023delete_inbound_queue_aio:
4024 pqi_delete_operational_queue(ctrl_info, true,
4025 queue_group->iq_id[AIO_PATH]);
4026
4027delete_inbound_queue_raid:
4028 pqi_delete_operational_queue(ctrl_info, true,
4029 queue_group->iq_id[RAID_PATH]);
4030
4031 return rc;
4032}
4033
4034static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4035{
4036 int rc;
4037 unsigned int i;
4038
4039 rc = pqi_create_event_queue(ctrl_info);
4040 if (rc) {
4041 dev_err(&ctrl_info->pci_dev->dev,
4042 "error creating event queue\n");
4043 return rc;
4044 }
4045
4046 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4047 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4048 if (rc) {
4049 dev_err(&ctrl_info->pci_dev->dev,
4050 "error creating queue group number %u/%u\n",
4051 i, ctrl_info->num_queue_groups);
4052 return rc;
4053 }
4054 }
4055
4056 return 0;
4057}
4058
4059#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4060 (offsetof(struct pqi_event_config, descriptors) + \
4061 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4062
6a50d6ad
KB
4063static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4064 bool enable_events)
6c223761
KB
4065{
4066 int rc;
4067 unsigned int i;
4068 struct pqi_event_config *event_config;
6a50d6ad 4069 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
4070 struct pqi_general_management_request request;
4071
4072 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4073 GFP_KERNEL);
4074 if (!event_config)
4075 return -ENOMEM;
4076
4077 memset(&request, 0, sizeof(request));
4078
4079 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4080 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4081 data.report_event_configuration.sg_descriptors[1]) -
4082 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4083 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4084 &request.data.report_event_configuration.buffer_length);
4085
4086 rc = pqi_map_single(ctrl_info->pci_dev,
4087 request.data.report_event_configuration.sg_descriptors,
4088 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4089 PCI_DMA_FROMDEVICE);
4090 if (rc)
4091 goto out;
4092
4093 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4094 0, NULL, NO_TIMEOUT);
4095
4096 pqi_pci_unmap(ctrl_info->pci_dev,
4097 request.data.report_event_configuration.sg_descriptors, 1,
4098 PCI_DMA_FROMDEVICE);
4099
4100 if (rc)
4101 goto out;
4102
6a50d6ad
KB
4103 for (i = 0; i < event_config->num_event_descriptors; i++) {
4104 event_descriptor = &event_config->descriptors[i];
4105 if (enable_events &&
4106 pqi_is_supported_event(event_descriptor->event_type))
4107 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4108 &event_descriptor->oq_id);
4109 else
4110 put_unaligned_le16(0, &event_descriptor->oq_id);
4111 }
6c223761
KB
4112
4113 memset(&request, 0, sizeof(request));
4114
4115 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4116 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4117 data.report_event_configuration.sg_descriptors[1]) -
4118 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4119 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4120 &request.data.report_event_configuration.buffer_length);
4121
4122 rc = pqi_map_single(ctrl_info->pci_dev,
4123 request.data.report_event_configuration.sg_descriptors,
4124 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4125 PCI_DMA_TODEVICE);
4126 if (rc)
4127 goto out;
4128
4129 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4130 NULL, NO_TIMEOUT);
4131
4132 pqi_pci_unmap(ctrl_info->pci_dev,
4133 request.data.report_event_configuration.sg_descriptors, 1,
4134 PCI_DMA_TODEVICE);
4135
4136out:
4137 kfree(event_config);
4138
4139 return rc;
4140}
4141
6a50d6ad
KB
4142static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4143{
4144 return pqi_configure_events(ctrl_info, true);
4145}
4146
4147static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4148{
4149 return pqi_configure_events(ctrl_info, false);
4150}
4151
6c223761
KB
4152static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4153{
4154 unsigned int i;
4155 struct device *dev;
4156 size_t sg_chain_buffer_length;
4157 struct pqi_io_request *io_request;
4158
4159 if (!ctrl_info->io_request_pool)
4160 return;
4161
4162 dev = &ctrl_info->pci_dev->dev;
4163 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4164 io_request = ctrl_info->io_request_pool;
4165
4166 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4167 kfree(io_request->iu);
4168 if (!io_request->sg_chain_buffer)
4169 break;
4170 dma_free_coherent(dev, sg_chain_buffer_length,
4171 io_request->sg_chain_buffer,
4172 io_request->sg_chain_buffer_dma_handle);
4173 io_request++;
4174 }
4175
4176 kfree(ctrl_info->io_request_pool);
4177 ctrl_info->io_request_pool = NULL;
4178}
4179
4180static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4181{
4182 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4183 ctrl_info->error_buffer_length,
4184 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4185
4186 if (!ctrl_info->error_buffer)
4187 return -ENOMEM;
4188
4189 return 0;
4190}
4191
4192static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4193{
4194 unsigned int i;
4195 void *sg_chain_buffer;
4196 size_t sg_chain_buffer_length;
4197 dma_addr_t sg_chain_buffer_dma_handle;
4198 struct device *dev;
4199 struct pqi_io_request *io_request;
4200
4201 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4202 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4203
4204 if (!ctrl_info->io_request_pool) {
4205 dev_err(&ctrl_info->pci_dev->dev,
4206 "failed to allocate I/O request pool\n");
4207 goto error;
4208 }
4209
4210 dev = &ctrl_info->pci_dev->dev;
4211 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4212 io_request = ctrl_info->io_request_pool;
4213
4214 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4215 io_request->iu =
4216 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4217
4218 if (!io_request->iu) {
4219 dev_err(&ctrl_info->pci_dev->dev,
4220 "failed to allocate IU buffers\n");
4221 goto error;
4222 }
4223
4224 sg_chain_buffer = dma_alloc_coherent(dev,
4225 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4226 GFP_KERNEL);
4227
4228 if (!sg_chain_buffer) {
4229 dev_err(&ctrl_info->pci_dev->dev,
4230 "failed to allocate PQI scatter-gather chain buffers\n");
4231 goto error;
4232 }
4233
4234 io_request->index = i;
4235 io_request->sg_chain_buffer = sg_chain_buffer;
4236 io_request->sg_chain_buffer_dma_handle =
4237 sg_chain_buffer_dma_handle;
4238 io_request++;
4239 }
4240
4241 return 0;
4242
4243error:
4244 pqi_free_all_io_requests(ctrl_info);
4245
4246 return -ENOMEM;
4247}
4248
4249/*
4250 * Calculate required resources that are sized based on max. outstanding
4251 * requests and max. transfer size.
4252 */
4253
4254static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4255{
4256 u32 max_transfer_size;
4257 u32 max_sg_entries;
4258
4259 ctrl_info->scsi_ml_can_queue =
4260 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4261 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4262
4263 ctrl_info->error_buffer_length =
4264 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4265
d727a776
KB
4266 if (reset_devices)
4267 max_transfer_size = min(ctrl_info->max_transfer_size,
4268 PQI_MAX_TRANSFER_SIZE_KDUMP);
4269 else
4270 max_transfer_size = min(ctrl_info->max_transfer_size,
4271 PQI_MAX_TRANSFER_SIZE);
6c223761
KB
4272
4273 max_sg_entries = max_transfer_size / PAGE_SIZE;
4274
4275 /* +1 to cover when the buffer is not page-aligned. */
4276 max_sg_entries++;
4277
4278 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4279
4280 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4281
4282 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
4283 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4284 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
4285 ctrl_info->sg_tablesize = max_sg_entries;
4286 ctrl_info->max_sectors = max_transfer_size / 512;
4287}
4288
4289static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4290{
6c223761
KB
4291 int num_queue_groups;
4292 u16 num_elements_per_iq;
4293 u16 num_elements_per_oq;
4294
d727a776
KB
4295 if (reset_devices) {
4296 num_queue_groups = 1;
4297 } else {
4298 int num_cpus;
4299 int max_queue_groups;
4300
4301 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4302 ctrl_info->max_outbound_queues - 1);
4303 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
6c223761 4304
d727a776
KB
4305 num_cpus = num_online_cpus();
4306 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4307 num_queue_groups = min(num_queue_groups, max_queue_groups);
4308 }
6c223761
KB
4309
4310 ctrl_info->num_queue_groups = num_queue_groups;
061ef06a 4311 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
6c223761 4312
77668f41
KB
4313 /*
4314 * Make sure that the max. inbound IU length is an even multiple
4315 * of our inbound element length.
4316 */
4317 ctrl_info->max_inbound_iu_length =
4318 (ctrl_info->max_inbound_iu_length_per_firmware /
4319 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4320 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
4321
4322 num_elements_per_iq =
4323 (ctrl_info->max_inbound_iu_length /
4324 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4325
4326 /* Add one because one element in each queue is unusable. */
4327 num_elements_per_iq++;
4328
4329 num_elements_per_iq = min(num_elements_per_iq,
4330 ctrl_info->max_elements_per_iq);
4331
4332 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4333 num_elements_per_oq = min(num_elements_per_oq,
4334 ctrl_info->max_elements_per_oq);
4335
4336 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4337 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4338
4339 ctrl_info->max_sg_per_iu =
4340 ((ctrl_info->max_inbound_iu_length -
4341 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4342 sizeof(struct pqi_sg_descriptor)) +
4343 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4344}
4345
4346static inline void pqi_set_sg_descriptor(
4347 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4348{
4349 u64 address = (u64)sg_dma_address(sg);
4350 unsigned int length = sg_dma_len(sg);
4351
4352 put_unaligned_le64(address, &sg_descriptor->address);
4353 put_unaligned_le32(length, &sg_descriptor->length);
4354 put_unaligned_le32(0, &sg_descriptor->flags);
4355}
4356
4357static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4358 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4359 struct pqi_io_request *io_request)
4360{
4361 int i;
4362 u16 iu_length;
4363 int sg_count;
4364 bool chained;
4365 unsigned int num_sg_in_iu;
4366 unsigned int max_sg_per_iu;
4367 struct scatterlist *sg;
4368 struct pqi_sg_descriptor *sg_descriptor;
4369
4370 sg_count = scsi_dma_map(scmd);
4371 if (sg_count < 0)
4372 return sg_count;
4373
4374 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4375 PQI_REQUEST_HEADER_LENGTH;
4376
4377 if (sg_count == 0)
4378 goto out;
4379
4380 sg = scsi_sglist(scmd);
4381 sg_descriptor = request->sg_descriptors;
4382 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4383 chained = false;
4384 num_sg_in_iu = 0;
4385 i = 0;
4386
4387 while (1) {
4388 pqi_set_sg_descriptor(sg_descriptor, sg);
4389 if (!chained)
4390 num_sg_in_iu++;
4391 i++;
4392 if (i == sg_count)
4393 break;
4394 sg_descriptor++;
4395 if (i == max_sg_per_iu) {
4396 put_unaligned_le64(
4397 (u64)io_request->sg_chain_buffer_dma_handle,
4398 &sg_descriptor->address);
4399 put_unaligned_le32((sg_count - num_sg_in_iu)
4400 * sizeof(*sg_descriptor),
4401 &sg_descriptor->length);
4402 put_unaligned_le32(CISS_SG_CHAIN,
4403 &sg_descriptor->flags);
4404 chained = true;
4405 num_sg_in_iu++;
4406 sg_descriptor = io_request->sg_chain_buffer;
4407 }
4408 sg = sg_next(sg);
4409 }
4410
4411 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4412 request->partial = chained;
4413 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4414
4415out:
4416 put_unaligned_le16(iu_length, &request->header.iu_length);
4417
4418 return 0;
4419}
4420
4421static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4422 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4423 struct pqi_io_request *io_request)
4424{
4425 int i;
4426 u16 iu_length;
4427 int sg_count;
a60eec02
KB
4428 bool chained;
4429 unsigned int num_sg_in_iu;
4430 unsigned int max_sg_per_iu;
6c223761
KB
4431 struct scatterlist *sg;
4432 struct pqi_sg_descriptor *sg_descriptor;
4433
4434 sg_count = scsi_dma_map(scmd);
4435 if (sg_count < 0)
4436 return sg_count;
a60eec02
KB
4437
4438 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4439 PQI_REQUEST_HEADER_LENGTH;
4440 num_sg_in_iu = 0;
4441
6c223761
KB
4442 if (sg_count == 0)
4443 goto out;
4444
a60eec02
KB
4445 sg = scsi_sglist(scmd);
4446 sg_descriptor = request->sg_descriptors;
4447 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4448 chained = false;
4449 i = 0;
4450
4451 while (1) {
4452 pqi_set_sg_descriptor(sg_descriptor, sg);
4453 if (!chained)
4454 num_sg_in_iu++;
4455 i++;
4456 if (i == sg_count)
4457 break;
4458 sg_descriptor++;
4459 if (i == max_sg_per_iu) {
4460 put_unaligned_le64(
4461 (u64)io_request->sg_chain_buffer_dma_handle,
4462 &sg_descriptor->address);
4463 put_unaligned_le32((sg_count - num_sg_in_iu)
4464 * sizeof(*sg_descriptor),
4465 &sg_descriptor->length);
4466 put_unaligned_le32(CISS_SG_CHAIN,
4467 &sg_descriptor->flags);
4468 chained = true;
4469 num_sg_in_iu++;
4470 sg_descriptor = io_request->sg_chain_buffer;
6c223761 4471 }
a60eec02 4472 sg = sg_next(sg);
6c223761
KB
4473 }
4474
a60eec02
KB
4475 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4476 request->partial = chained;
6c223761 4477 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
4478
4479out:
6c223761
KB
4480 put_unaligned_le16(iu_length, &request->header.iu_length);
4481 request->num_sg_descriptors = num_sg_in_iu;
4482
4483 return 0;
4484}
4485
4486static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4487 void *context)
4488{
4489 struct scsi_cmnd *scmd;
4490
4491 scmd = io_request->scmd;
4492 pqi_free_io_request(io_request);
4493 scsi_dma_unmap(scmd);
4494 pqi_scsi_done(scmd);
4495}
4496
376fb880
KB
4497static int pqi_raid_submit_scsi_cmd_with_io_request(
4498 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
6c223761
KB
4499 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4500 struct pqi_queue_group *queue_group)
4501{
4502 int rc;
4503 size_t cdb_length;
6c223761
KB
4504 struct pqi_raid_path_request *request;
4505
6c223761
KB
4506 io_request->io_complete_callback = pqi_raid_io_complete;
4507 io_request->scmd = scmd;
4508
6c223761
KB
4509 request = io_request->iu;
4510 memset(request, 0,
4511 offsetof(struct pqi_raid_path_request, sg_descriptors));
4512
4513 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4514 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4515 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4516 put_unaligned_le16(io_request->index, &request->request_id);
4517 request->error_index = request->request_id;
4518 memcpy(request->lun_number, device->scsi3addr,
4519 sizeof(request->lun_number));
4520
4521 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4522 memcpy(request->cdb, scmd->cmnd, cdb_length);
4523
4524 switch (cdb_length) {
4525 case 6:
4526 case 10:
4527 case 12:
4528 case 16:
4529 /* No bytes in the Additional CDB bytes field */
4530 request->additional_cdb_bytes_usage =
4531 SOP_ADDITIONAL_CDB_BYTES_0;
4532 break;
4533 case 20:
4534 /* 4 bytes in the Additional cdb field */
4535 request->additional_cdb_bytes_usage =
4536 SOP_ADDITIONAL_CDB_BYTES_4;
4537 break;
4538 case 24:
4539 /* 8 bytes in the Additional cdb field */
4540 request->additional_cdb_bytes_usage =
4541 SOP_ADDITIONAL_CDB_BYTES_8;
4542 break;
4543 case 28:
4544 /* 12 bytes in the Additional cdb field */
4545 request->additional_cdb_bytes_usage =
4546 SOP_ADDITIONAL_CDB_BYTES_12;
4547 break;
4548 case 32:
4549 default:
4550 /* 16 bytes in the Additional cdb field */
4551 request->additional_cdb_bytes_usage =
4552 SOP_ADDITIONAL_CDB_BYTES_16;
4553 break;
4554 }
4555
4556 switch (scmd->sc_data_direction) {
4557 case DMA_TO_DEVICE:
4558 request->data_direction = SOP_READ_FLAG;
4559 break;
4560 case DMA_FROM_DEVICE:
4561 request->data_direction = SOP_WRITE_FLAG;
4562 break;
4563 case DMA_NONE:
4564 request->data_direction = SOP_NO_DIRECTION_FLAG;
4565 break;
4566 case DMA_BIDIRECTIONAL:
4567 request->data_direction = SOP_BIDIRECTIONAL;
4568 break;
4569 default:
4570 dev_err(&ctrl_info->pci_dev->dev,
4571 "unknown data direction: %d\n",
4572 scmd->sc_data_direction);
6c223761
KB
4573 break;
4574 }
4575
4576 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4577 if (rc) {
4578 pqi_free_io_request(io_request);
4579 return SCSI_MLQUEUE_HOST_BUSY;
4580 }
4581
4582 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4583
4584 return 0;
4585}
4586
376fb880
KB
4587static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4588 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4589 struct pqi_queue_group *queue_group)
4590{
4591 struct pqi_io_request *io_request;
4592
4593 io_request = pqi_alloc_io_request(ctrl_info);
4594
4595 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4596 device, scmd, queue_group);
4597}
4598
4599static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
4600{
4601 if (!pqi_ctrl_blocked(ctrl_info))
4602 schedule_work(&ctrl_info->raid_bypass_retry_work);
4603}
4604
4605static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
4606{
4607 struct scsi_cmnd *scmd;
03b288cf 4608 struct pqi_scsi_dev *device;
376fb880
KB
4609 struct pqi_ctrl_info *ctrl_info;
4610
4611 if (!io_request->raid_bypass)
4612 return false;
4613
4614 scmd = io_request->scmd;
4615 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
4616 return false;
4617 if (host_byte(scmd->result) == DID_NO_CONNECT)
4618 return false;
4619
03b288cf
KB
4620 device = scmd->device->hostdata;
4621 if (pqi_device_offline(device))
4622 return false;
4623
376fb880
KB
4624 ctrl_info = shost_to_hba(scmd->device->host);
4625 if (pqi_ctrl_offline(ctrl_info))
4626 return false;
4627
4628 return true;
4629}
4630
4631static inline void pqi_add_to_raid_bypass_retry_list(
4632 struct pqi_ctrl_info *ctrl_info,
4633 struct pqi_io_request *io_request, bool at_head)
4634{
4635 unsigned long flags;
4636
4637 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4638 if (at_head)
4639 list_add(&io_request->request_list_entry,
4640 &ctrl_info->raid_bypass_retry_list);
4641 else
4642 list_add_tail(&io_request->request_list_entry,
4643 &ctrl_info->raid_bypass_retry_list);
4644 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4645}
4646
4647static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
4648 void *context)
4649{
4650 struct scsi_cmnd *scmd;
4651
4652 scmd = io_request->scmd;
4653 pqi_free_io_request(io_request);
4654 pqi_scsi_done(scmd);
4655}
4656
4657static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
4658{
4659 struct scsi_cmnd *scmd;
4660 struct pqi_ctrl_info *ctrl_info;
4661
4662 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
4663 scmd = io_request->scmd;
4664 scmd->result = 0;
4665 ctrl_info = shost_to_hba(scmd->device->host);
4666
4667 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
4668 pqi_schedule_bypass_retry(ctrl_info);
4669}
4670
4671static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
4672{
4673 struct scsi_cmnd *scmd;
4674 struct pqi_scsi_dev *device;
4675 struct pqi_ctrl_info *ctrl_info;
4676 struct pqi_queue_group *queue_group;
4677
4678 scmd = io_request->scmd;
4679 device = scmd->device->hostdata;
4680 if (pqi_device_in_reset(device)) {
4681 pqi_free_io_request(io_request);
4682 set_host_byte(scmd, DID_RESET);
4683 pqi_scsi_done(scmd);
4684 return 0;
4685 }
4686
4687 ctrl_info = shost_to_hba(scmd->device->host);
4688 queue_group = io_request->queue_group;
4689
4690 pqi_reinit_io_request(io_request);
4691
4692 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4693 device, scmd, queue_group);
4694}
4695
4696static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
4697 struct pqi_ctrl_info *ctrl_info)
4698{
4699 unsigned long flags;
4700 struct pqi_io_request *io_request;
4701
4702 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4703 io_request = list_first_entry_or_null(
4704 &ctrl_info->raid_bypass_retry_list,
4705 struct pqi_io_request, request_list_entry);
4706 if (io_request)
4707 list_del(&io_request->request_list_entry);
4708 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4709
4710 return io_request;
4711}
4712
4713static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
4714{
4715 int rc;
4716 struct pqi_io_request *io_request;
4717
4718 pqi_ctrl_busy(ctrl_info);
4719
4720 while (1) {
4721 if (pqi_ctrl_blocked(ctrl_info))
4722 break;
4723 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
4724 if (!io_request)
4725 break;
4726 rc = pqi_retry_raid_bypass(io_request);
4727 if (rc) {
4728 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
4729 true);
4730 pqi_schedule_bypass_retry(ctrl_info);
4731 break;
4732 }
4733 }
4734
4735 pqi_ctrl_unbusy(ctrl_info);
4736}
4737
4738static void pqi_raid_bypass_retry_worker(struct work_struct *work)
4739{
4740 struct pqi_ctrl_info *ctrl_info;
4741
4742 ctrl_info = container_of(work, struct pqi_ctrl_info,
4743 raid_bypass_retry_work);
4744 pqi_retry_raid_bypass_requests(ctrl_info);
4745}
4746
4747static void pqi_complete_all_queued_raid_bypass_retries(
4748 struct pqi_ctrl_info *ctrl_info, int result)
4749{
4750 unsigned long flags;
4751 struct pqi_io_request *io_request;
4752 struct pqi_io_request *next;
4753 struct scsi_cmnd *scmd;
4754
4755 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4756
4757 list_for_each_entry_safe(io_request, next,
4758 &ctrl_info->raid_bypass_retry_list, request_list_entry) {
4759 list_del(&io_request->request_list_entry);
4760 scmd = io_request->scmd;
4761 pqi_free_io_request(io_request);
4762 scmd->result = result;
4763 pqi_scsi_done(scmd);
4764 }
4765
4766 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4767}
4768
6c223761
KB
4769static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4770 void *context)
4771{
4772 struct scsi_cmnd *scmd;
4773
4774 scmd = io_request->scmd;
4775 scsi_dma_unmap(scmd);
4776 if (io_request->status == -EAGAIN)
4777 set_host_byte(scmd, DID_IMM_RETRY);
376fb880
KB
4778 else if (pqi_raid_bypass_retry_needed(io_request)) {
4779 pqi_queue_raid_bypass_retry(io_request);
4780 return;
4781 }
6c223761
KB
4782 pqi_free_io_request(io_request);
4783 pqi_scsi_done(scmd);
4784}
4785
4786static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4787 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4788 struct pqi_queue_group *queue_group)
4789{
4790 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
376fb880 4791 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
6c223761
KB
4792}
4793
4794static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4795 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4796 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 4797 struct pqi_encryption_info *encryption_info, bool raid_bypass)
6c223761
KB
4798{
4799 int rc;
4800 struct pqi_io_request *io_request;
4801 struct pqi_aio_path_request *request;
4802
4803 io_request = pqi_alloc_io_request(ctrl_info);
4804 io_request->io_complete_callback = pqi_aio_io_complete;
4805 io_request->scmd = scmd;
376fb880 4806 io_request->raid_bypass = raid_bypass;
6c223761
KB
4807
4808 request = io_request->iu;
4809 memset(request, 0,
4810 offsetof(struct pqi_raid_path_request, sg_descriptors));
4811
4812 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4813 put_unaligned_le32(aio_handle, &request->nexus_id);
4814 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4815 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4816 put_unaligned_le16(io_request->index, &request->request_id);
4817 request->error_index = request->request_id;
4818 if (cdb_length > sizeof(request->cdb))
4819 cdb_length = sizeof(request->cdb);
4820 request->cdb_length = cdb_length;
4821 memcpy(request->cdb, cdb, cdb_length);
4822
4823 switch (scmd->sc_data_direction) {
4824 case DMA_TO_DEVICE:
4825 request->data_direction = SOP_READ_FLAG;
4826 break;
4827 case DMA_FROM_DEVICE:
4828 request->data_direction = SOP_WRITE_FLAG;
4829 break;
4830 case DMA_NONE:
4831 request->data_direction = SOP_NO_DIRECTION_FLAG;
4832 break;
4833 case DMA_BIDIRECTIONAL:
4834 request->data_direction = SOP_BIDIRECTIONAL;
4835 break;
4836 default:
4837 dev_err(&ctrl_info->pci_dev->dev,
4838 "unknown data direction: %d\n",
4839 scmd->sc_data_direction);
6c223761
KB
4840 break;
4841 }
4842
4843 if (encryption_info) {
4844 request->encryption_enable = true;
4845 put_unaligned_le16(encryption_info->data_encryption_key_index,
4846 &request->data_encryption_key_index);
4847 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4848 &request->encrypt_tweak_lower);
4849 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4850 &request->encrypt_tweak_upper);
4851 }
4852
4853 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4854 if (rc) {
4855 pqi_free_io_request(io_request);
4856 return SCSI_MLQUEUE_HOST_BUSY;
4857 }
4858
4859 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4860
4861 return 0;
4862}
4863
061ef06a
KB
4864static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4865 struct scsi_cmnd *scmd)
4866{
4867 u16 hw_queue;
4868
4869 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4870 if (hw_queue > ctrl_info->max_hw_queue_index)
4871 hw_queue = 0;
4872
4873 return hw_queue;
4874}
4875
7561a7e4
KB
4876/*
4877 * This function gets called just before we hand the completed SCSI request
4878 * back to the SML.
4879 */
4880
4881void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4882{
4883 struct pqi_scsi_dev *device;
4884
4885 device = scmd->device->hostdata;
4886 atomic_dec(&device->scsi_cmds_outstanding);
4887}
4888
6c223761 4889static int pqi_scsi_queue_command(struct Scsi_Host *shost,
7d81d2b8 4890 struct scsi_cmnd *scmd)
6c223761
KB
4891{
4892 int rc;
4893 struct pqi_ctrl_info *ctrl_info;
4894 struct pqi_scsi_dev *device;
061ef06a 4895 u16 hw_queue;
6c223761
KB
4896 struct pqi_queue_group *queue_group;
4897 bool raid_bypassed;
4898
4899 device = scmd->device->hostdata;
6c223761
KB
4900 ctrl_info = shost_to_hba(shost);
4901
7561a7e4
KB
4902 atomic_inc(&device->scsi_cmds_outstanding);
4903
6c223761
KB
4904 if (pqi_ctrl_offline(ctrl_info)) {
4905 set_host_byte(scmd, DID_NO_CONNECT);
4906 pqi_scsi_done(scmd);
4907 return 0;
4908 }
4909
7561a7e4
KB
4910 pqi_ctrl_busy(ctrl_info);
4911 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4912 rc = SCSI_MLQUEUE_HOST_BUSY;
4913 goto out;
4914 }
4915
7d81d2b8
KB
4916 /*
4917 * This is necessary because the SML doesn't zero out this field during
4918 * error recovery.
4919 */
4920 scmd->result = 0;
4921
061ef06a
KB
4922 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
4923 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
4924
4925 if (pqi_is_logical_device(device)) {
4926 raid_bypassed = false;
4927 if (device->offload_enabled &&
57292b58 4928 !blk_rq_is_passthrough(scmd->request)) {
6c223761
KB
4929 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4930 scmd, queue_group);
376fb880
KB
4931 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
4932 raid_bypassed = true;
6c223761
KB
4933 }
4934 if (!raid_bypassed)
4935 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4936 queue_group);
4937 } else {
4938 if (device->aio_enabled)
4939 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4940 queue_group);
4941 else
4942 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4943 queue_group);
4944 }
4945
7561a7e4
KB
4946out:
4947 pqi_ctrl_unbusy(ctrl_info);
4948 if (rc)
4949 atomic_dec(&device->scsi_cmds_outstanding);
4950
6c223761
KB
4951 return rc;
4952}
4953
7561a7e4
KB
4954static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
4955 struct pqi_queue_group *queue_group)
4956{
4957 unsigned int path;
4958 unsigned long flags;
4959 bool list_is_empty;
4960
4961 for (path = 0; path < 2; path++) {
4962 while (1) {
4963 spin_lock_irqsave(
4964 &queue_group->submit_lock[path], flags);
4965 list_is_empty =
4966 list_empty(&queue_group->request_list[path]);
4967 spin_unlock_irqrestore(
4968 &queue_group->submit_lock[path], flags);
4969 if (list_is_empty)
4970 break;
4971 pqi_check_ctrl_health(ctrl_info);
4972 if (pqi_ctrl_offline(ctrl_info))
4973 return -ENXIO;
4974 usleep_range(1000, 2000);
4975 }
4976 }
4977
4978 return 0;
4979}
4980
4981static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
4982{
4983 int rc;
4984 unsigned int i;
4985 unsigned int path;
4986 struct pqi_queue_group *queue_group;
4987 pqi_index_t iq_pi;
4988 pqi_index_t iq_ci;
4989
4990 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4991 queue_group = &ctrl_info->queue_groups[i];
4992
4993 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
4994 if (rc)
4995 return rc;
4996
4997 for (path = 0; path < 2; path++) {
4998 iq_pi = queue_group->iq_pi_copy[path];
4999
5000 while (1) {
5001 iq_ci = *queue_group->iq_ci[path];
5002 if (iq_ci == iq_pi)
5003 break;
5004 pqi_check_ctrl_health(ctrl_info);
5005 if (pqi_ctrl_offline(ctrl_info))
5006 return -ENXIO;
5007 usleep_range(1000, 2000);
5008 }
5009 }
5010 }
5011
5012 return 0;
5013}
5014
5015static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5016 struct pqi_scsi_dev *device)
5017{
5018 unsigned int i;
5019 unsigned int path;
5020 struct pqi_queue_group *queue_group;
5021 unsigned long flags;
5022 struct pqi_io_request *io_request;
5023 struct pqi_io_request *next;
5024 struct scsi_cmnd *scmd;
5025 struct pqi_scsi_dev *scsi_device;
5026
5027 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5028 queue_group = &ctrl_info->queue_groups[i];
5029
5030 for (path = 0; path < 2; path++) {
5031 spin_lock_irqsave(
5032 &queue_group->submit_lock[path], flags);
5033
5034 list_for_each_entry_safe(io_request, next,
5035 &queue_group->request_list[path],
5036 request_list_entry) {
5037 scmd = io_request->scmd;
5038 if (!scmd)
5039 continue;
5040
5041 scsi_device = scmd->device->hostdata;
5042 if (scsi_device != device)
5043 continue;
5044
5045 list_del(&io_request->request_list_entry);
5046 set_host_byte(scmd, DID_RESET);
5047 pqi_scsi_done(scmd);
5048 }
5049
5050 spin_unlock_irqrestore(
5051 &queue_group->submit_lock[path], flags);
5052 }
5053 }
5054}
5055
061ef06a
KB
5056static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5057 struct pqi_scsi_dev *device)
5058{
5059 while (atomic_read(&device->scsi_cmds_outstanding)) {
5060 pqi_check_ctrl_health(ctrl_info);
5061 if (pqi_ctrl_offline(ctrl_info))
5062 return -ENXIO;
5063 usleep_range(1000, 2000);
5064 }
5065
5066 return 0;
5067}
5068
5069static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5070{
5071 bool io_pending;
5072 unsigned long flags;
5073 struct pqi_scsi_dev *device;
5074
5075 while (1) {
5076 io_pending = false;
5077
5078 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5079 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5080 scsi_device_list_entry) {
5081 if (atomic_read(&device->scsi_cmds_outstanding)) {
5082 io_pending = true;
5083 break;
5084 }
5085 }
5086 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5087 flags);
5088
5089 if (!io_pending)
5090 break;
5091
5092 pqi_check_ctrl_health(ctrl_info);
5093 if (pqi_ctrl_offline(ctrl_info))
5094 return -ENXIO;
5095
5096 usleep_range(1000, 2000);
5097 }
5098
5099 return 0;
5100}
5101
14bb215d
KB
5102static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5103 void *context)
6c223761 5104{
14bb215d 5105 struct completion *waiting = context;
6c223761 5106
14bb215d
KB
5107 complete(waiting);
5108}
6c223761 5109
14bb215d
KB
5110#define PQI_LUN_RESET_TIMEOUT_SECS 10
5111
5112static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5113 struct pqi_scsi_dev *device, struct completion *wait)
5114{
5115 int rc;
14bb215d
KB
5116
5117 while (1) {
5118 if (wait_for_completion_io_timeout(wait,
5119 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5120 rc = 0;
5121 break;
6c223761
KB
5122 }
5123
14bb215d
KB
5124 pqi_check_ctrl_health(ctrl_info);
5125 if (pqi_ctrl_offline(ctrl_info)) {
4e8415e3 5126 rc = -ENXIO;
14bb215d
KB
5127 break;
5128 }
6c223761 5129 }
6c223761 5130
14bb215d 5131 return rc;
6c223761
KB
5132}
5133
14bb215d 5134static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
5135 struct pqi_scsi_dev *device)
5136{
5137 int rc;
5138 struct pqi_io_request *io_request;
5139 DECLARE_COMPLETION_ONSTACK(wait);
5140 struct pqi_task_management_request *request;
5141
6c223761 5142 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 5143 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
5144 io_request->context = &wait;
5145
5146 request = io_request->iu;
5147 memset(request, 0, sizeof(*request));
5148
5149 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5150 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5151 &request->header.iu_length);
5152 put_unaligned_le16(io_request->index, &request->request_id);
5153 memcpy(request->lun_number, device->scsi3addr,
5154 sizeof(request->lun_number));
5155 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5156
5157 pqi_start_io(ctrl_info,
5158 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5159 io_request);
5160
14bb215d
KB
5161 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5162 if (rc == 0)
6c223761 5163 rc = io_request->status;
6c223761
KB
5164
5165 pqi_free_io_request(io_request);
6c223761
KB
5166
5167 return rc;
5168}
5169
5170/* Performs a reset at the LUN level. */
5171
5172static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5173 struct pqi_scsi_dev *device)
5174{
5175 int rc;
5176
14bb215d 5177 rc = pqi_lun_reset(ctrl_info, device);
061ef06a
KB
5178 if (rc == 0)
5179 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
6c223761 5180
14bb215d 5181 return rc == 0 ? SUCCESS : FAILED;
6c223761
KB
5182}
5183
5184static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5185{
5186 int rc;
7561a7e4 5187 struct Scsi_Host *shost;
6c223761
KB
5188 struct pqi_ctrl_info *ctrl_info;
5189 struct pqi_scsi_dev *device;
5190
7561a7e4
KB
5191 shost = scmd->device->host;
5192 ctrl_info = shost_to_hba(shost);
6c223761
KB
5193 device = scmd->device->hostdata;
5194
5195 dev_err(&ctrl_info->pci_dev->dev,
5196 "resetting scsi %d:%d:%d:%d\n",
7561a7e4 5197 shost->host_no, device->bus, device->target, device->lun);
6c223761 5198
7561a7e4
KB
5199 pqi_check_ctrl_health(ctrl_info);
5200 if (pqi_ctrl_offline(ctrl_info)) {
5201 rc = FAILED;
5202 goto out;
5203 }
6c223761 5204
7561a7e4
KB
5205 mutex_lock(&ctrl_info->lun_reset_mutex);
5206
5207 pqi_ctrl_block_requests(ctrl_info);
5208 pqi_ctrl_wait_until_quiesced(ctrl_info);
5209 pqi_fail_io_queued_for_device(ctrl_info, device);
5210 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5211 pqi_device_reset_start(device);
5212 pqi_ctrl_unblock_requests(ctrl_info);
5213
5214 if (rc)
5215 rc = FAILED;
5216 else
5217 rc = pqi_device_reset(ctrl_info, device);
5218
5219 pqi_device_reset_done(device);
5220
5221 mutex_unlock(&ctrl_info->lun_reset_mutex);
5222
5223out:
6c223761
KB
5224 dev_err(&ctrl_info->pci_dev->dev,
5225 "reset of scsi %d:%d:%d:%d: %s\n",
7561a7e4 5226 shost->host_no, device->bus, device->target, device->lun,
6c223761
KB
5227 rc == SUCCESS ? "SUCCESS" : "FAILED");
5228
5229 return rc;
5230}
5231
5232static int pqi_slave_alloc(struct scsi_device *sdev)
5233{
5234 struct pqi_scsi_dev *device;
5235 unsigned long flags;
5236 struct pqi_ctrl_info *ctrl_info;
5237 struct scsi_target *starget;
5238 struct sas_rphy *rphy;
5239
5240 ctrl_info = shost_to_hba(sdev->host);
5241
5242 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5243
5244 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5245 starget = scsi_target(sdev);
5246 rphy = target_to_rphy(starget);
5247 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5248 if (device) {
5249 device->target = sdev_id(sdev);
5250 device->lun = sdev->lun;
5251 device->target_lun_valid = true;
5252 }
5253 } else {
5254 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5255 sdev_id(sdev), sdev->lun);
5256 }
5257
94086f5b 5258 if (device) {
6c223761
KB
5259 sdev->hostdata = device;
5260 device->sdev = sdev;
5261 if (device->queue_depth) {
5262 device->advertised_queue_depth = device->queue_depth;
5263 scsi_change_queue_depth(sdev,
5264 device->advertised_queue_depth);
5265 }
5266 }
5267
5268 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5269
5270 return 0;
5271}
5272
52198226
CH
5273static int pqi_map_queues(struct Scsi_Host *shost)
5274{
5275 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5276
5277 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
5278}
5279
6c223761
KB
5280static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5281 void __user *arg)
5282{
5283 struct pci_dev *pci_dev;
5284 u32 subsystem_vendor;
5285 u32 subsystem_device;
5286 cciss_pci_info_struct pciinfo;
5287
5288 if (!arg)
5289 return -EINVAL;
5290
5291 pci_dev = ctrl_info->pci_dev;
5292
5293 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5294 pciinfo.bus = pci_dev->bus->number;
5295 pciinfo.dev_fn = pci_dev->devfn;
5296 subsystem_vendor = pci_dev->subsystem_vendor;
5297 subsystem_device = pci_dev->subsystem_device;
5298 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5299 subsystem_vendor;
5300
5301 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5302 return -EFAULT;
5303
5304 return 0;
5305}
5306
5307static int pqi_getdrivver_ioctl(void __user *arg)
5308{
5309 u32 version;
5310
5311 if (!arg)
5312 return -EINVAL;
5313
5314 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5315 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5316
5317 if (copy_to_user(arg, &version, sizeof(version)))
5318 return -EFAULT;
5319
5320 return 0;
5321}
5322
5323struct ciss_error_info {
5324 u8 scsi_status;
5325 int command_status;
5326 size_t sense_data_length;
5327};
5328
5329static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5330 struct ciss_error_info *ciss_error_info)
5331{
5332 int ciss_cmd_status;
5333 size_t sense_data_length;
5334
5335 switch (pqi_error_info->data_out_result) {
5336 case PQI_DATA_IN_OUT_GOOD:
5337 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5338 break;
5339 case PQI_DATA_IN_OUT_UNDERFLOW:
5340 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5341 break;
5342 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5343 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5344 break;
5345 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5346 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5347 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5348 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5349 case PQI_DATA_IN_OUT_ERROR:
5350 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5351 break;
5352 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5353 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5354 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5355 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5356 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5357 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5358 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5359 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5360 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5361 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5362 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5363 break;
5364 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5365 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5366 break;
5367 case PQI_DATA_IN_OUT_ABORTED:
5368 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5369 break;
5370 case PQI_DATA_IN_OUT_TIMEOUT:
5371 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5372 break;
5373 default:
5374 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5375 break;
5376 }
5377
5378 sense_data_length =
5379 get_unaligned_le16(&pqi_error_info->sense_data_length);
5380 if (sense_data_length == 0)
5381 sense_data_length =
5382 get_unaligned_le16(&pqi_error_info->response_data_length);
5383 if (sense_data_length)
5384 if (sense_data_length > sizeof(pqi_error_info->data))
5385 sense_data_length = sizeof(pqi_error_info->data);
5386
5387 ciss_error_info->scsi_status = pqi_error_info->status;
5388 ciss_error_info->command_status = ciss_cmd_status;
5389 ciss_error_info->sense_data_length = sense_data_length;
5390}
5391
5392static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5393{
5394 int rc;
5395 char *kernel_buffer = NULL;
5396 u16 iu_length;
5397 size_t sense_data_length;
5398 IOCTL_Command_struct iocommand;
5399 struct pqi_raid_path_request request;
5400 struct pqi_raid_error_info pqi_error_info;
5401 struct ciss_error_info ciss_error_info;
5402
5403 if (pqi_ctrl_offline(ctrl_info))
5404 return -ENXIO;
5405 if (!arg)
5406 return -EINVAL;
5407 if (!capable(CAP_SYS_RAWIO))
5408 return -EPERM;
5409 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5410 return -EFAULT;
5411 if (iocommand.buf_size < 1 &&
5412 iocommand.Request.Type.Direction != XFER_NONE)
5413 return -EINVAL;
5414 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5415 return -EINVAL;
5416 if (iocommand.Request.Type.Type != TYPE_CMD)
5417 return -EINVAL;
5418
5419 switch (iocommand.Request.Type.Direction) {
5420 case XFER_NONE:
5421 case XFER_WRITE:
5422 case XFER_READ:
5423 break;
5424 default:
5425 return -EINVAL;
5426 }
5427
5428 if (iocommand.buf_size > 0) {
5429 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5430 if (!kernel_buffer)
5431 return -ENOMEM;
5432 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5433 if (copy_from_user(kernel_buffer, iocommand.buf,
5434 iocommand.buf_size)) {
5435 rc = -EFAULT;
5436 goto out;
5437 }
5438 } else {
5439 memset(kernel_buffer, 0, iocommand.buf_size);
5440 }
5441 }
5442
5443 memset(&request, 0, sizeof(request));
5444
5445 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5446 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5447 PQI_REQUEST_HEADER_LENGTH;
5448 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5449 sizeof(request.lun_number));
5450 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5451 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5452
5453 switch (iocommand.Request.Type.Direction) {
5454 case XFER_NONE:
5455 request.data_direction = SOP_NO_DIRECTION_FLAG;
5456 break;
5457 case XFER_WRITE:
5458 request.data_direction = SOP_WRITE_FLAG;
5459 break;
5460 case XFER_READ:
5461 request.data_direction = SOP_READ_FLAG;
5462 break;
5463 }
5464
5465 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5466
5467 if (iocommand.buf_size > 0) {
5468 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5469
5470 rc = pqi_map_single(ctrl_info->pci_dev,
5471 &request.sg_descriptors[0], kernel_buffer,
5472 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5473 if (rc)
5474 goto out;
5475
5476 iu_length += sizeof(request.sg_descriptors[0]);
5477 }
5478
5479 put_unaligned_le16(iu_length, &request.header.iu_length);
5480
5481 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5482 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5483
5484 if (iocommand.buf_size > 0)
5485 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5486 PCI_DMA_BIDIRECTIONAL);
5487
5488 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5489
5490 if (rc == 0) {
5491 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5492 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5493 iocommand.error_info.CommandStatus =
5494 ciss_error_info.command_status;
5495 sense_data_length = ciss_error_info.sense_data_length;
5496 if (sense_data_length) {
5497 if (sense_data_length >
5498 sizeof(iocommand.error_info.SenseInfo))
5499 sense_data_length =
5500 sizeof(iocommand.error_info.SenseInfo);
5501 memcpy(iocommand.error_info.SenseInfo,
5502 pqi_error_info.data, sense_data_length);
5503 iocommand.error_info.SenseLen = sense_data_length;
5504 }
5505 }
5506
5507 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5508 rc = -EFAULT;
5509 goto out;
5510 }
5511
5512 if (rc == 0 && iocommand.buf_size > 0 &&
5513 (iocommand.Request.Type.Direction & XFER_READ)) {
5514 if (copy_to_user(iocommand.buf, kernel_buffer,
5515 iocommand.buf_size)) {
5516 rc = -EFAULT;
5517 }
5518 }
5519
5520out:
5521 kfree(kernel_buffer);
5522
5523 return rc;
5524}
5525
5526static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5527{
5528 int rc;
5529 struct pqi_ctrl_info *ctrl_info;
5530
5531 ctrl_info = shost_to_hba(sdev->host);
5532
5533 switch (cmd) {
5534 case CCISS_DEREGDISK:
5535 case CCISS_REGNEWDISK:
5536 case CCISS_REGNEWD:
5537 rc = pqi_scan_scsi_devices(ctrl_info);
5538 break;
5539 case CCISS_GETPCIINFO:
5540 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5541 break;
5542 case CCISS_GETDRIVVER:
5543 rc = pqi_getdrivver_ioctl(arg);
5544 break;
5545 case CCISS_PASSTHRU:
5546 rc = pqi_passthru_ioctl(ctrl_info, arg);
5547 break;
5548 default:
5549 rc = -EINVAL;
5550 break;
5551 }
5552
5553 return rc;
5554}
5555
5556static ssize_t pqi_version_show(struct device *dev,
5557 struct device_attribute *attr, char *buffer)
5558{
5559 ssize_t count = 0;
5560 struct Scsi_Host *shost;
5561 struct pqi_ctrl_info *ctrl_info;
5562
5563 shost = class_to_shost(dev);
5564 ctrl_info = shost_to_hba(shost);
5565
5566 count += snprintf(buffer + count, PAGE_SIZE - count,
5567 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5568
5569 count += snprintf(buffer + count, PAGE_SIZE - count,
5570 "firmware: %s\n", ctrl_info->firmware_version);
5571
5572 return count;
5573}
5574
5575static ssize_t pqi_host_rescan_store(struct device *dev,
5576 struct device_attribute *attr, const char *buffer, size_t count)
5577{
5578 struct Scsi_Host *shost = class_to_shost(dev);
5579
5580 pqi_scan_start(shost);
5581
5582 return count;
5583}
5584
3c50976f
KB
5585static ssize_t pqi_lockup_action_show(struct device *dev,
5586 struct device_attribute *attr, char *buffer)
5587{
5588 int count = 0;
5589 unsigned int i;
5590
5591 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5592 if (pqi_lockup_actions[i].action == pqi_lockup_action)
5593 count += snprintf(buffer + count, PAGE_SIZE - count,
5594 "[%s] ", pqi_lockup_actions[i].name);
5595 else
5596 count += snprintf(buffer + count, PAGE_SIZE - count,
5597 "%s ", pqi_lockup_actions[i].name);
5598 }
5599
5600 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
5601
5602 return count;
5603}
5604
5605static ssize_t pqi_lockup_action_store(struct device *dev,
5606 struct device_attribute *attr, const char *buffer, size_t count)
5607{
5608 unsigned int i;
5609 char *action_name;
5610 char action_name_buffer[32];
5611
5612 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
5613 action_name = strstrip(action_name_buffer);
5614
5615 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5616 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
5617 pqi_lockup_action = pqi_lockup_actions[i].action;
5618 return count;
5619 }
5620 }
5621
5622 return -EINVAL;
5623}
5624
cbe0c7b1
KB
5625static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5626static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
3c50976f
KB
5627static DEVICE_ATTR(lockup_action, 0644,
5628 pqi_lockup_action_show, pqi_lockup_action_store);
6c223761
KB
5629
5630static struct device_attribute *pqi_shost_attrs[] = {
5631 &dev_attr_version,
5632 &dev_attr_rescan,
3c50976f 5633 &dev_attr_lockup_action,
6c223761
KB
5634 NULL
5635};
5636
5637static ssize_t pqi_sas_address_show(struct device *dev,
5638 struct device_attribute *attr, char *buffer)
5639{
5640 struct pqi_ctrl_info *ctrl_info;
5641 struct scsi_device *sdev;
5642 struct pqi_scsi_dev *device;
5643 unsigned long flags;
5644 u64 sas_address;
5645
5646 sdev = to_scsi_device(dev);
5647 ctrl_info = shost_to_hba(sdev->host);
5648
5649 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5650
5651 device = sdev->hostdata;
5652 if (pqi_is_logical_device(device)) {
5653 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5654 flags);
5655 return -ENODEV;
5656 }
5657 sas_address = device->sas_address;
5658
5659 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5660
5661 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5662}
5663
5664static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5665 struct device_attribute *attr, char *buffer)
5666{
5667 struct pqi_ctrl_info *ctrl_info;
5668 struct scsi_device *sdev;
5669 struct pqi_scsi_dev *device;
5670 unsigned long flags;
5671
5672 sdev = to_scsi_device(dev);
5673 ctrl_info = shost_to_hba(sdev->host);
5674
5675 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5676
5677 device = sdev->hostdata;
5678 buffer[0] = device->offload_enabled ? '1' : '0';
5679 buffer[1] = '\n';
5680 buffer[2] = '\0';
5681
5682 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5683
5684 return 2;
5685}
5686
cbe0c7b1
KB
5687static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5688static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
6c223761
KB
5689 pqi_ssd_smart_path_enabled_show, NULL);
5690
5691static struct device_attribute *pqi_sdev_attrs[] = {
5692 &dev_attr_sas_address,
5693 &dev_attr_ssd_smart_path_enabled,
5694 NULL
5695};
5696
5697static struct scsi_host_template pqi_driver_template = {
5698 .module = THIS_MODULE,
5699 .name = DRIVER_NAME_SHORT,
5700 .proc_name = DRIVER_NAME_SHORT,
5701 .queuecommand = pqi_scsi_queue_command,
5702 .scan_start = pqi_scan_start,
5703 .scan_finished = pqi_scan_finished,
5704 .this_id = -1,
5705 .use_clustering = ENABLE_CLUSTERING,
5706 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5707 .ioctl = pqi_ioctl,
5708 .slave_alloc = pqi_slave_alloc,
52198226 5709 .map_queues = pqi_map_queues,
6c223761
KB
5710 .sdev_attrs = pqi_sdev_attrs,
5711 .shost_attrs = pqi_shost_attrs,
5712};
5713
5714static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5715{
5716 int rc;
5717 struct Scsi_Host *shost;
5718
5719 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5720 if (!shost) {
5721 dev_err(&ctrl_info->pci_dev->dev,
5722 "scsi_host_alloc failed for controller %u\n",
5723 ctrl_info->ctrl_id);
5724 return -ENOMEM;
5725 }
5726
5727 shost->io_port = 0;
5728 shost->n_io_port = 0;
5729 shost->this_id = -1;
5730 shost->max_channel = PQI_MAX_BUS;
5731 shost->max_cmd_len = MAX_COMMAND_SIZE;
5732 shost->max_lun = ~0;
5733 shost->max_id = ~0;
5734 shost->max_sectors = ctrl_info->max_sectors;
5735 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5736 shost->cmd_per_lun = shost->can_queue;
5737 shost->sg_tablesize = ctrl_info->sg_tablesize;
5738 shost->transportt = pqi_sas_transport_template;
52198226 5739 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
5740 shost->unique_id = shost->irq;
5741 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5742 shost->hostdata[0] = (unsigned long)ctrl_info;
5743
5744 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5745 if (rc) {
5746 dev_err(&ctrl_info->pci_dev->dev,
5747 "scsi_add_host failed for controller %u\n",
5748 ctrl_info->ctrl_id);
5749 goto free_host;
5750 }
5751
5752 rc = pqi_add_sas_host(shost, ctrl_info);
5753 if (rc) {
5754 dev_err(&ctrl_info->pci_dev->dev,
5755 "add SAS host failed for controller %u\n",
5756 ctrl_info->ctrl_id);
5757 goto remove_host;
5758 }
5759
5760 ctrl_info->scsi_host = shost;
5761
5762 return 0;
5763
5764remove_host:
5765 scsi_remove_host(shost);
5766free_host:
5767 scsi_host_put(shost);
5768
5769 return rc;
5770}
5771
5772static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5773{
5774 struct Scsi_Host *shost;
5775
5776 pqi_delete_sas_host(ctrl_info);
5777
5778 shost = ctrl_info->scsi_host;
5779 if (!shost)
5780 return;
5781
5782 scsi_remove_host(shost);
5783 scsi_host_put(shost);
5784}
5785
5786#define PQI_RESET_ACTION_RESET 0x1
5787
5788#define PQI_RESET_TYPE_NO_RESET 0x0
5789#define PQI_RESET_TYPE_SOFT_RESET 0x1
5790#define PQI_RESET_TYPE_FIRM_RESET 0x2
5791#define PQI_RESET_TYPE_HARD_RESET 0x3
5792
5793static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5794{
5795 int rc;
5796 u32 reset_params;
5797
5798 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5799 PQI_RESET_TYPE_HARD_RESET;
5800
5801 writel(reset_params,
5802 &ctrl_info->pqi_registers->device_reset);
5803
5804 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5805 if (rc)
5806 dev_err(&ctrl_info->pci_dev->dev,
5807 "PQI reset failed\n");
5808
5809 return rc;
5810}
5811
5812static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5813{
5814 int rc;
5815 struct bmic_identify_controller *identify;
5816
5817 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5818 if (!identify)
5819 return -ENOMEM;
5820
5821 rc = pqi_identify_controller(ctrl_info, identify);
5822 if (rc)
5823 goto out;
5824
5825 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5826 sizeof(identify->firmware_version));
5827 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5828 snprintf(ctrl_info->firmware_version +
5829 strlen(ctrl_info->firmware_version),
5830 sizeof(ctrl_info->firmware_version),
5831 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5832
5833out:
5834 kfree(identify);
5835
5836 return rc;
5837}
5838
98f87667
KB
5839static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5840{
5841 u32 table_length;
5842 u32 section_offset;
5843 void __iomem *table_iomem_addr;
5844 struct pqi_config_table *config_table;
5845 struct pqi_config_table_section_header *section;
5846
5847 table_length = ctrl_info->config_table_length;
5848
5849 config_table = kmalloc(table_length, GFP_KERNEL);
5850 if (!config_table) {
5851 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 5852 "failed to allocate memory for PQI configuration table\n");
98f87667
KB
5853 return -ENOMEM;
5854 }
5855
5856 /*
5857 * Copy the config table contents from I/O memory space into the
5858 * temporary buffer.
5859 */
5860 table_iomem_addr = ctrl_info->iomem_base +
5861 ctrl_info->config_table_offset;
5862 memcpy_fromio(config_table, table_iomem_addr, table_length);
5863
5864 section_offset =
5865 get_unaligned_le32(&config_table->first_section_offset);
5866
5867 while (section_offset) {
5868 section = (void *)config_table + section_offset;
5869
5870 switch (get_unaligned_le16(&section->section_id)) {
5871 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5872 ctrl_info->heartbeat_counter = table_iomem_addr +
5873 section_offset +
5874 offsetof(struct pqi_config_table_heartbeat,
5875 heartbeat_counter);
5876 break;
5877 }
5878
5879 section_offset =
5880 get_unaligned_le16(&section->next_section_offset);
5881 }
5882
5883 kfree(config_table);
5884
5885 return 0;
5886}
5887
162d7753
KB
5888/* Switches the controller from PQI mode back into SIS mode. */
5889
5890static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
5891{
5892 int rc;
5893
061ef06a 5894 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
5895 rc = pqi_reset(ctrl_info);
5896 if (rc)
5897 return rc;
5898 sis_reenable_sis_mode(ctrl_info);
5899 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5900
5901 return 0;
5902}
5903
5904/*
5905 * If the controller isn't already in SIS mode, this function forces it into
5906 * SIS mode.
5907 */
5908
5909static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
5910{
5911 if (!sis_is_firmware_running(ctrl_info))
5912 return -ENXIO;
5913
162d7753
KB
5914 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
5915 return 0;
5916
5917 if (sis_is_kernel_up(ctrl_info)) {
5918 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5919 return 0;
ff6abb73
KB
5920 }
5921
162d7753 5922 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
5923}
5924
6c223761
KB
5925static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5926{
5927 int rc;
5928
162d7753
KB
5929 rc = pqi_force_sis_mode(ctrl_info);
5930 if (rc)
5931 return rc;
6c223761
KB
5932
5933 /*
5934 * Wait until the controller is ready to start accepting SIS
5935 * commands.
5936 */
5937 rc = sis_wait_for_ctrl_ready(ctrl_info);
8845fdfa 5938 if (rc)
6c223761 5939 return rc;
6c223761
KB
5940
5941 /*
5942 * Get the controller properties. This allows us to determine
5943 * whether or not it supports PQI mode.
5944 */
5945 rc = sis_get_ctrl_properties(ctrl_info);
5946 if (rc) {
5947 dev_err(&ctrl_info->pci_dev->dev,
5948 "error obtaining controller properties\n");
5949 return rc;
5950 }
5951
5952 rc = sis_get_pqi_capabilities(ctrl_info);
5953 if (rc) {
5954 dev_err(&ctrl_info->pci_dev->dev,
5955 "error obtaining controller capabilities\n");
5956 return rc;
5957 }
5958
d727a776
KB
5959 if (reset_devices) {
5960 if (ctrl_info->max_outstanding_requests >
5961 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
5962 ctrl_info->max_outstanding_requests =
5963 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
5964 } else {
5965 if (ctrl_info->max_outstanding_requests >
5966 PQI_MAX_OUTSTANDING_REQUESTS)
5967 ctrl_info->max_outstanding_requests =
5968 PQI_MAX_OUTSTANDING_REQUESTS;
5969 }
6c223761
KB
5970
5971 pqi_calculate_io_resources(ctrl_info);
5972
5973 rc = pqi_alloc_error_buffer(ctrl_info);
5974 if (rc) {
5975 dev_err(&ctrl_info->pci_dev->dev,
5976 "failed to allocate PQI error buffer\n");
5977 return rc;
5978 }
5979
5980 /*
5981 * If the function we are about to call succeeds, the
5982 * controller will transition from legacy SIS mode
5983 * into PQI mode.
5984 */
5985 rc = sis_init_base_struct_addr(ctrl_info);
5986 if (rc) {
5987 dev_err(&ctrl_info->pci_dev->dev,
5988 "error initializing PQI mode\n");
5989 return rc;
5990 }
5991
5992 /* Wait for the controller to complete the SIS -> PQI transition. */
5993 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5994 if (rc) {
5995 dev_err(&ctrl_info->pci_dev->dev,
5996 "transition to PQI mode failed\n");
5997 return rc;
5998 }
5999
6000 /* From here on, we are running in PQI mode. */
6001 ctrl_info->pqi_mode_enabled = true;
ff6abb73 6002 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761 6003
98f87667
KB
6004 rc = pqi_process_config_table(ctrl_info);
6005 if (rc)
6006 return rc;
6007
6c223761
KB
6008 rc = pqi_alloc_admin_queues(ctrl_info);
6009 if (rc) {
6010 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 6011 "failed to allocate admin queues\n");
6c223761
KB
6012 return rc;
6013 }
6014
6015 rc = pqi_create_admin_queues(ctrl_info);
6016 if (rc) {
6017 dev_err(&ctrl_info->pci_dev->dev,
6018 "error creating admin queues\n");
6019 return rc;
6020 }
6021
6022 rc = pqi_report_device_capability(ctrl_info);
6023 if (rc) {
6024 dev_err(&ctrl_info->pci_dev->dev,
6025 "obtaining device capability failed\n");
6026 return rc;
6027 }
6028
6029 rc = pqi_validate_device_capability(ctrl_info);
6030 if (rc)
6031 return rc;
6032
6033 pqi_calculate_queue_resources(ctrl_info);
6034
6035 rc = pqi_enable_msix_interrupts(ctrl_info);
6036 if (rc)
6037 return rc;
6038
6039 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
6040 ctrl_info->max_msix_vectors =
6041 ctrl_info->num_msix_vectors_enabled;
6042 pqi_calculate_queue_resources(ctrl_info);
6043 }
6044
6045 rc = pqi_alloc_io_resources(ctrl_info);
6046 if (rc)
6047 return rc;
6048
6049 rc = pqi_alloc_operational_queues(ctrl_info);
d87d5474
KB
6050 if (rc) {
6051 dev_err(&ctrl_info->pci_dev->dev,
6052 "failed to allocate operational queues\n");
6c223761 6053 return rc;
d87d5474 6054 }
6c223761
KB
6055
6056 pqi_init_operational_queues(ctrl_info);
6057
6058 rc = pqi_request_irqs(ctrl_info);
6059 if (rc)
6060 return rc;
6061
6c223761
KB
6062 rc = pqi_create_queues(ctrl_info);
6063 if (rc)
6064 return rc;
6065
061ef06a
KB
6066 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6067
6068 ctrl_info->controller_online = true;
6069 pqi_start_heartbeat_timer(ctrl_info);
6c223761 6070
6a50d6ad 6071 rc = pqi_enable_events(ctrl_info);
6c223761
KB
6072 if (rc) {
6073 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 6074 "error enabling events\n");
6c223761
KB
6075 return rc;
6076 }
6077
6c223761
KB
6078 /* Register with the SCSI subsystem. */
6079 rc = pqi_register_scsi(ctrl_info);
6080 if (rc)
6081 return rc;
6082
6083 rc = pqi_get_ctrl_firmware_version(ctrl_info);
6084 if (rc) {
6085 dev_err(&ctrl_info->pci_dev->dev,
6086 "error obtaining firmware version\n");
6087 return rc;
6088 }
6089
6090 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6091 if (rc) {
6092 dev_err(&ctrl_info->pci_dev->dev,
6093 "error updating host wellness\n");
6094 return rc;
6095 }
6096
6097 pqi_schedule_update_time_worker(ctrl_info);
6098
6099 pqi_scan_scsi_devices(ctrl_info);
6100
6101 return 0;
6102}
6103
061ef06a
KB
6104#if defined(CONFIG_PM)
6105
6106static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
6107{
6108 unsigned int i;
6109 struct pqi_admin_queues *admin_queues;
6110 struct pqi_event_queue *event_queue;
6111
6112 admin_queues = &ctrl_info->admin_queues;
6113 admin_queues->iq_pi_copy = 0;
6114 admin_queues->oq_ci_copy = 0;
6115 *admin_queues->oq_pi = 0;
6116
6117 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6118 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
6119 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
6120 ctrl_info->queue_groups[i].oq_ci_copy = 0;
6121
6122 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
6123 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
6124 *ctrl_info->queue_groups[i].oq_pi = 0;
6125 }
6126
6127 event_queue = &ctrl_info->event_queue;
6128 *event_queue->oq_pi = 0;
6129 event_queue->oq_ci_copy = 0;
6130}
6131
6132static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6133{
6134 int rc;
6135
6136 rc = pqi_force_sis_mode(ctrl_info);
6137 if (rc)
6138 return rc;
6139
6140 /*
6141 * Wait until the controller is ready to start accepting SIS
6142 * commands.
6143 */
6144 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
6145 if (rc)
6146 return rc;
6147
6148 /*
6149 * If the function we are about to call succeeds, the
6150 * controller will transition from legacy SIS mode
6151 * into PQI mode.
6152 */
6153 rc = sis_init_base_struct_addr(ctrl_info);
6154 if (rc) {
6155 dev_err(&ctrl_info->pci_dev->dev,
6156 "error initializing PQI mode\n");
6157 return rc;
6158 }
6159
6160 /* Wait for the controller to complete the SIS -> PQI transition. */
6161 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6162 if (rc) {
6163 dev_err(&ctrl_info->pci_dev->dev,
6164 "transition to PQI mode failed\n");
6165 return rc;
6166 }
6167
6168 /* From here on, we are running in PQI mode. */
6169 ctrl_info->pqi_mode_enabled = true;
6170 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6171
6172 pqi_reinit_queues(ctrl_info);
6173
6174 rc = pqi_create_admin_queues(ctrl_info);
6175 if (rc) {
6176 dev_err(&ctrl_info->pci_dev->dev,
6177 "error creating admin queues\n");
6178 return rc;
6179 }
6180
6181 rc = pqi_create_queues(ctrl_info);
6182 if (rc)
6183 return rc;
6184
6185 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6186
6187 ctrl_info->controller_online = true;
6188 pqi_start_heartbeat_timer(ctrl_info);
6189 pqi_ctrl_unblock_requests(ctrl_info);
6190
6191 rc = pqi_enable_events(ctrl_info);
6192 if (rc) {
6193 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 6194 "error enabling events\n");
061ef06a
KB
6195 return rc;
6196 }
6197
6198 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6199 if (rc) {
6200 dev_err(&ctrl_info->pci_dev->dev,
6201 "error updating host wellness\n");
6202 return rc;
6203 }
6204
6205 pqi_schedule_update_time_worker(ctrl_info);
6206
6207 pqi_scan_scsi_devices(ctrl_info);
6208
6209 return 0;
6210}
6211
6212#endif /* CONFIG_PM */
6213
a81ed5f3
KB
6214static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6215 u16 timeout)
6216{
6217 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6218 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6219}
6220
6c223761
KB
6221static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6222{
6223 int rc;
6224 u64 mask;
6225
6226 rc = pci_enable_device(ctrl_info->pci_dev);
6227 if (rc) {
6228 dev_err(&ctrl_info->pci_dev->dev,
6229 "failed to enable PCI device\n");
6230 return rc;
6231 }
6232
6233 if (sizeof(dma_addr_t) > 4)
6234 mask = DMA_BIT_MASK(64);
6235 else
6236 mask = DMA_BIT_MASK(32);
6237
6238 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6239 if (rc) {
6240 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6241 goto disable_device;
6242 }
6243
6244 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6245 if (rc) {
6246 dev_err(&ctrl_info->pci_dev->dev,
6247 "failed to obtain PCI resources\n");
6248 goto disable_device;
6249 }
6250
6251 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6252 ctrl_info->pci_dev, 0),
6253 sizeof(struct pqi_ctrl_registers));
6254 if (!ctrl_info->iomem_base) {
6255 dev_err(&ctrl_info->pci_dev->dev,
6256 "failed to map memory for controller registers\n");
6257 rc = -ENOMEM;
6258 goto release_regions;
6259 }
6260
a81ed5f3
KB
6261#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6262
6263 /* Increase the PCIe completion timeout. */
6264 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6265 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6266 if (rc) {
6267 dev_err(&ctrl_info->pci_dev->dev,
6268 "failed to set PCIe completion timeout\n");
6269 goto release_regions;
6270 }
6271
6c223761
KB
6272 /* Enable bus mastering. */
6273 pci_set_master(ctrl_info->pci_dev);
6274
cbe0c7b1
KB
6275 ctrl_info->registers = ctrl_info->iomem_base;
6276 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6277
6c223761
KB
6278 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6279
6280 return 0;
6281
6282release_regions:
6283 pci_release_regions(ctrl_info->pci_dev);
6284disable_device:
6285 pci_disable_device(ctrl_info->pci_dev);
6286
6287 return rc;
6288}
6289
6290static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6291{
6292 iounmap(ctrl_info->iomem_base);
6293 pci_release_regions(ctrl_info->pci_dev);
cbe0c7b1
KB
6294 if (pci_is_enabled(ctrl_info->pci_dev))
6295 pci_disable_device(ctrl_info->pci_dev);
6c223761
KB
6296 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6297}
6298
6299static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6300{
6301 struct pqi_ctrl_info *ctrl_info;
6302
6303 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6304 GFP_KERNEL, numa_node);
6305 if (!ctrl_info)
6306 return NULL;
6307
6308 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 6309 mutex_init(&ctrl_info->lun_reset_mutex);
6c223761
KB
6310
6311 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6312 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6313
6314 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6315 atomic_set(&ctrl_info->num_interrupts, 0);
6316
6317 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6318 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6319
98f87667
KB
6320 init_timer(&ctrl_info->heartbeat_timer);
6321
6c223761
KB
6322 sema_init(&ctrl_info->sync_request_sem,
6323 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 6324 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761 6325
376fb880
KB
6326 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
6327 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
6328 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
6329 pqi_raid_bypass_retry_worker);
6330
6c223761 6331 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 6332 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
6333 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6334
6335 return ctrl_info;
6336}
6337
6338static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6339{
6340 kfree(ctrl_info);
6341}
6342
6343static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6344{
98bf061b
KB
6345 pqi_free_irqs(ctrl_info);
6346 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
6347}
6348
6349static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6350{
6351 pqi_stop_heartbeat_timer(ctrl_info);
6352 pqi_free_interrupts(ctrl_info);
6353 if (ctrl_info->queue_memory_base)
6354 dma_free_coherent(&ctrl_info->pci_dev->dev,
6355 ctrl_info->queue_memory_length,
6356 ctrl_info->queue_memory_base,
6357 ctrl_info->queue_memory_base_dma_handle);
6358 if (ctrl_info->admin_queue_memory_base)
6359 dma_free_coherent(&ctrl_info->pci_dev->dev,
6360 ctrl_info->admin_queue_memory_length,
6361 ctrl_info->admin_queue_memory_base,
6362 ctrl_info->admin_queue_memory_base_dma_handle);
6363 pqi_free_all_io_requests(ctrl_info);
6364 if (ctrl_info->error_buffer)
6365 dma_free_coherent(&ctrl_info->pci_dev->dev,
6366 ctrl_info->error_buffer_length,
6367 ctrl_info->error_buffer,
6368 ctrl_info->error_buffer_dma_handle);
6369 if (ctrl_info->iomem_base)
6370 pqi_cleanup_pci_init(ctrl_info);
6371 pqi_free_ctrl_info(ctrl_info);
6372}
6373
6374static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6375{
061ef06a
KB
6376 pqi_cancel_rescan_worker(ctrl_info);
6377 pqi_cancel_update_time_worker(ctrl_info);
e57a1f9b
KB
6378 pqi_remove_all_scsi_devices(ctrl_info);
6379 pqi_unregister_scsi(ctrl_info);
162d7753
KB
6380 if (ctrl_info->pqi_mode_enabled)
6381 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
6382 pqi_free_ctrl_resources(ctrl_info);
6383}
6384
3c50976f
KB
6385static void pqi_perform_lockup_action(void)
6386{
6387 switch (pqi_lockup_action) {
6388 case PANIC:
6389 panic("FATAL: Smart Family Controller lockup detected");
6390 break;
6391 case REBOOT:
6392 emergency_restart();
6393 break;
6394 case NONE:
6395 default:
6396 break;
6397 }
6398}
6399
376fb880
KB
6400static void pqi_complete_all_queued_requests(struct pqi_ctrl_info *ctrl_info,
6401 int result)
6402{
6403 unsigned int i;
6404 unsigned int path;
6405 struct pqi_queue_group *queue_group;
6406 unsigned long flags;
6407 struct pqi_io_request *io_request;
6408 struct pqi_io_request *next;
6409 struct scsi_cmnd *scmd;
6410
6411 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6412 queue_group = &ctrl_info->queue_groups[i];
6413
6414 for (path = 0; path < 2; path++) {
6415 spin_lock_irqsave(
6416 &queue_group->submit_lock[path], flags);
6417
6418 list_for_each_entry_safe(io_request, next,
6419 &queue_group->request_list[path],
6420 request_list_entry) {
6421
6422 scmd = io_request->scmd;
6423 if (scmd) {
6424 scmd->result = result;
6425 pqi_scsi_done(scmd);
6426 }
6427
6428 list_del(&io_request->request_list_entry);
6429 }
6430
6431 spin_unlock_irqrestore(
6432 &queue_group->submit_lock[path], flags);
6433 }
6434 }
6435}
6436
6437static void pqi_fail_all_queued_requests(struct pqi_ctrl_info *ctrl_info)
6438{
6439 pqi_complete_all_queued_requests(ctrl_info, DID_NO_CONNECT << 16);
6440 pqi_complete_all_queued_raid_bypass_retries(ctrl_info,
6441 DID_NO_CONNECT << 16);
6442}
6443
6444static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
6445{
6446 ctrl_info->controller_online = false;
6447 sis_shutdown_ctrl(ctrl_info);
6448 pci_disable_device(ctrl_info->pci_dev);
6449 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
6450 pqi_perform_lockup_action();
6451 pqi_fail_all_queued_requests(ctrl_info);
6452}
6453
d91d7820 6454static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6c223761
KB
6455 const struct pci_device_id *id)
6456{
6457 char *ctrl_description;
6458
6459 if (id->driver_data) {
6460 ctrl_description = (char *)id->driver_data;
6461 } else {
6462 switch (id->subvendor) {
6463 case PCI_VENDOR_ID_HP:
6464 ctrl_description = hpe_branded_controller;
6465 break;
6466 case PCI_VENDOR_ID_ADAPTEC2:
6467 default:
6468 ctrl_description = microsemi_branded_controller;
6469 break;
6470 }
6471 }
6472
d91d7820 6473 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6c223761
KB
6474}
6475
d91d7820
KB
6476static int pqi_pci_probe(struct pci_dev *pci_dev,
6477 const struct pci_device_id *id)
6c223761
KB
6478{
6479 int rc;
6480 int node;
6481 struct pqi_ctrl_info *ctrl_info;
6482
d91d7820 6483 pqi_print_ctrl_info(pci_dev, id);
6c223761
KB
6484
6485 if (pqi_disable_device_id_wildcards &&
6486 id->subvendor == PCI_ANY_ID &&
6487 id->subdevice == PCI_ANY_ID) {
d91d7820 6488 dev_warn(&pci_dev->dev,
6c223761
KB
6489 "controller not probed because device ID wildcards are disabled\n");
6490 return -ENODEV;
6491 }
6492
6493 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
d91d7820 6494 dev_warn(&pci_dev->dev,
6c223761
KB
6495 "controller device ID matched using wildcards\n");
6496
d91d7820 6497 node = dev_to_node(&pci_dev->dev);
6c223761 6498 if (node == NUMA_NO_NODE)
d91d7820 6499 set_dev_node(&pci_dev->dev, 0);
6c223761
KB
6500
6501 ctrl_info = pqi_alloc_ctrl_info(node);
6502 if (!ctrl_info) {
d91d7820 6503 dev_err(&pci_dev->dev,
6c223761
KB
6504 "failed to allocate controller info block\n");
6505 return -ENOMEM;
6506 }
6507
d91d7820 6508 ctrl_info->pci_dev = pci_dev;
6c223761
KB
6509
6510 rc = pqi_pci_init(ctrl_info);
6511 if (rc)
6512 goto error;
6513
6514 rc = pqi_ctrl_init(ctrl_info);
6515 if (rc)
6516 goto error;
6517
6518 return 0;
6519
6520error:
6521 pqi_remove_ctrl(ctrl_info);
6522
6523 return rc;
6524}
6525
d91d7820 6526static void pqi_pci_remove(struct pci_dev *pci_dev)
6c223761
KB
6527{
6528 struct pqi_ctrl_info *ctrl_info;
6529
d91d7820 6530 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6531 if (!ctrl_info)
6532 return;
6533
6534 pqi_remove_ctrl(ctrl_info);
6535}
6536
d91d7820 6537static void pqi_shutdown(struct pci_dev *pci_dev)
6c223761
KB
6538{
6539 int rc;
6540 struct pqi_ctrl_info *ctrl_info;
6541
d91d7820 6542 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6543 if (!ctrl_info)
6544 goto error;
6545
6546 /*
6547 * Write all data in the controller's battery-backed cache to
6548 * storage.
6549 */
6550 rc = pqi_flush_cache(ctrl_info);
6551 if (rc == 0)
6552 return;
6553
6554error:
d91d7820 6555 dev_warn(&pci_dev->dev,
6c223761
KB
6556 "unable to flush controller cache\n");
6557}
6558
3c50976f
KB
6559static void pqi_process_lockup_action_param(void)
6560{
6561 unsigned int i;
6562
6563 if (!pqi_lockup_action_param)
6564 return;
6565
6566 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6567 if (strcmp(pqi_lockup_action_param,
6568 pqi_lockup_actions[i].name) == 0) {
6569 pqi_lockup_action = pqi_lockup_actions[i].action;
6570 return;
6571 }
6572 }
6573
6574 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
6575 DRIVER_NAME_SHORT, pqi_lockup_action_param);
6576}
6577
6578static void pqi_process_module_params(void)
6579{
6580 pqi_process_lockup_action_param();
6581}
6582
061ef06a
KB
6583#if defined(CONFIG_PM)
6584
6585static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
6586{
6587 struct pqi_ctrl_info *ctrl_info;
6588
6589 ctrl_info = pci_get_drvdata(pci_dev);
6590
6591 pqi_disable_events(ctrl_info);
6592 pqi_cancel_update_time_worker(ctrl_info);
6593 pqi_cancel_rescan_worker(ctrl_info);
6594 pqi_wait_until_scan_finished(ctrl_info);
6595 pqi_wait_until_lun_reset_finished(ctrl_info);
6596 pqi_flush_cache(ctrl_info);
6597 pqi_ctrl_block_requests(ctrl_info);
6598 pqi_ctrl_wait_until_quiesced(ctrl_info);
6599 pqi_wait_until_inbound_queues_empty(ctrl_info);
6600 pqi_ctrl_wait_for_pending_io(ctrl_info);
6601 pqi_stop_heartbeat_timer(ctrl_info);
6602
6603 if (state.event == PM_EVENT_FREEZE)
6604 return 0;
6605
6606 pci_save_state(pci_dev);
6607 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6608
6609 ctrl_info->controller_online = false;
6610 ctrl_info->pqi_mode_enabled = false;
6611
6612 return 0;
6613}
6614
6615static int pqi_resume(struct pci_dev *pci_dev)
6616{
6617 int rc;
6618 struct pqi_ctrl_info *ctrl_info;
6619
6620 ctrl_info = pci_get_drvdata(pci_dev);
6621
6622 if (pci_dev->current_state != PCI_D0) {
6623 ctrl_info->max_hw_queue_index = 0;
6624 pqi_free_interrupts(ctrl_info);
6625 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6626 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6627 IRQF_SHARED, DRIVER_NAME_SHORT,
6628 &ctrl_info->queue_groups[0]);
6629 if (rc) {
6630 dev_err(&ctrl_info->pci_dev->dev,
6631 "irq %u init failed with error %d\n",
6632 pci_dev->irq, rc);
6633 return rc;
6634 }
6635 pqi_start_heartbeat_timer(ctrl_info);
6636 pqi_ctrl_unblock_requests(ctrl_info);
6637 return 0;
6638 }
6639
6640 pci_set_power_state(pci_dev, PCI_D0);
6641 pci_restore_state(pci_dev);
6642
6643 return pqi_ctrl_init_resume(ctrl_info);
6644}
6645
6646#endif /* CONFIG_PM */
6647
6c223761
KB
6648/* Define the PCI IDs for the controllers that we support. */
6649static const struct pci_device_id pqi_pci_id_table[] = {
7eddabff
KB
6650 {
6651 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6652 0x152d, 0x8a22)
6653 },
6654 {
6655 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6656 0x152d, 0x8a23)
6657 },
6658 {
6659 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6660 0x152d, 0x8a24)
6661 },
6662 {
6663 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6664 0x152d, 0x8a36)
6665 },
6666 {
6667 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6668 0x152d, 0x8a37)
6669 },
6c223761
KB
6670 {
6671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6672 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6673 },
6674 {
6675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6676 PCI_VENDOR_ID_ADAPTEC2, 0x0605)
6c223761
KB
6677 },
6678 {
6679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6680 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6c223761
KB
6681 },
6682 {
6683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6684 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6c223761
KB
6685 },
6686 {
6687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6688 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6c223761
KB
6689 },
6690 {
6691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6692 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6c223761
KB
6693 },
6694 {
6695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6696 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6c223761
KB
6697 },
6698 {
6699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6700 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6c223761
KB
6701 },
6702 {
6703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6704 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6c223761
KB
6705 },
6706 {
6707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6708 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6c223761
KB
6709 },
6710 {
6711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6712 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6c223761
KB
6713 },
6714 {
6715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6716 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6c223761
KB
6717 },
6718 {
6719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6720 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6c223761
KB
6721 },
6722 {
6723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6724 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6c223761
KB
6725 },
6726 {
6727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6728 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6c223761
KB
6729 },
6730 {
6731 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6732 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6c223761
KB
6733 },
6734 {
6735 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6736 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6c223761
KB
6737 },
6738 {
6739 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6740 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6c223761
KB
6741 },
6742 {
6743 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6744 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6c223761
KB
6745 },
6746 {
6747 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6748 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6c223761
KB
6749 },
6750 {
6751 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6752 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6c223761
KB
6753 },
6754 {
6755 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6756 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6c223761
KB
6757 },
6758 {
6759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6760 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6c223761
KB
6761 },
6762 {
6763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6764 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6c223761
KB
6765 },
6766 {
6767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6768 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6c223761
KB
6769 },
6770 {
6771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff
KB
6772 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
6773 },
6774 {
6775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6776 PCI_VENDOR_ID_HP, 0x0600)
6777 },
6778 {
6779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6780 PCI_VENDOR_ID_HP, 0x0601)
6781 },
6782 {
6783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6784 PCI_VENDOR_ID_HP, 0x0602)
6785 },
6786 {
6787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6788 PCI_VENDOR_ID_HP, 0x0603)
6789 },
6790 {
6791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6792 PCI_VENDOR_ID_HP, 0x0604)
6793 },
6794 {
6795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6796 PCI_VENDOR_ID_HP, 0x0606)
6797 },
6798 {
6799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6800 PCI_VENDOR_ID_HP, 0x0650)
6801 },
6802 {
6803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6804 PCI_VENDOR_ID_HP, 0x0651)
6805 },
6806 {
6807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6808 PCI_VENDOR_ID_HP, 0x0652)
6809 },
6810 {
6811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6812 PCI_VENDOR_ID_HP, 0x0653)
6813 },
6814 {
6815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6816 PCI_VENDOR_ID_HP, 0x0654)
6817 },
6818 {
6819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6820 PCI_VENDOR_ID_HP, 0x0655)
6821 },
6822 {
6823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6824 PCI_VENDOR_ID_HP, 0x0656)
6825 },
6826 {
6827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6828 PCI_VENDOR_ID_HP, 0x0657)
6829 },
6830 {
6831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6832 PCI_VENDOR_ID_HP, 0x0700)
6833 },
6834 {
6835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6836 PCI_VENDOR_ID_HP, 0x0701)
6c223761
KB
6837 },
6838 {
6839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6840 PCI_VENDOR_ID_HP, 0x1001)
6841 },
6842 {
6843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6844 PCI_VENDOR_ID_HP, 0x1100)
6845 },
6846 {
6847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6848 PCI_VENDOR_ID_HP, 0x1101)
6849 },
6850 {
6851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6852 PCI_VENDOR_ID_HP, 0x1102)
6853 },
6854 {
6855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6856 PCI_VENDOR_ID_HP, 0x1150)
6857 },
6858 {
6859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6860 PCI_ANY_ID, PCI_ANY_ID)
6861 },
6862 { 0 }
6863};
6864
6865MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
6866
6867static struct pci_driver pqi_pci_driver = {
6868 .name = DRIVER_NAME_SHORT,
6869 .id_table = pqi_pci_id_table,
6870 .probe = pqi_pci_probe,
6871 .remove = pqi_pci_remove,
6872 .shutdown = pqi_shutdown,
061ef06a
KB
6873#if defined(CONFIG_PM)
6874 .suspend = pqi_suspend,
6875 .resume = pqi_resume,
6876#endif
6c223761
KB
6877};
6878
6879static int __init pqi_init(void)
6880{
6881 int rc;
6882
6883 pr_info(DRIVER_NAME "\n");
6884
6885 pqi_sas_transport_template =
6886 sas_attach_transport(&pqi_sas_transport_functions);
6887 if (!pqi_sas_transport_template)
6888 return -ENODEV;
6889
3c50976f
KB
6890 pqi_process_module_params();
6891
6c223761
KB
6892 rc = pci_register_driver(&pqi_pci_driver);
6893 if (rc)
6894 sas_release_transport(pqi_sas_transport_template);
6895
6896 return rc;
6897}
6898
6899static void __exit pqi_cleanup(void)
6900{
6901 pci_unregister_driver(&pqi_pci_driver);
6902 sas_release_transport(pqi_sas_transport_template);
6903}
6904
6905module_init(pqi_init);
6906module_exit(pqi_cleanup);
6907
6908static void __attribute__((unused)) verify_structures(void)
6909{
6910 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6911 sis_host_to_ctrl_doorbell) != 0x20);
6912 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6913 sis_interrupt_mask) != 0x34);
6914 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6915 sis_ctrl_to_host_doorbell) != 0x9c);
6916 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6917 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
6918 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6919 sis_driver_scratch) != 0xb0);
6c223761
KB
6920 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6921 sis_firmware_status) != 0xbc);
6922 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6923 sis_mailbox) != 0x1000);
6924 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6925 pqi_registers) != 0x4000);
6926
6927 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6928 iu_type) != 0x0);
6929 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6930 iu_length) != 0x2);
6931 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6932 response_queue_id) != 0x4);
6933 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6934 work_area) != 0x6);
6935 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
6936
6937 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6938 status) != 0x0);
6939 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6940 service_response) != 0x1);
6941 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6942 data_present) != 0x2);
6943 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6944 reserved) != 0x3);
6945 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6946 residual_count) != 0x4);
6947 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6948 data_length) != 0x8);
6949 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6950 reserved1) != 0xa);
6951 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6952 data) != 0xc);
6953 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
6954
6955 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6956 data_in_result) != 0x0);
6957 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6958 data_out_result) != 0x1);
6959 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6960 reserved) != 0x2);
6961 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6962 status) != 0x5);
6963 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6964 status_qualifier) != 0x6);
6965 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6966 sense_data_length) != 0x8);
6967 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6968 response_data_length) != 0xa);
6969 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6970 data_in_transferred) != 0xc);
6971 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6972 data_out_transferred) != 0x10);
6973 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6974 data) != 0x14);
6975 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
6976
6977 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6978 signature) != 0x0);
6979 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6980 function_and_status_code) != 0x8);
6981 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6982 max_admin_iq_elements) != 0x10);
6983 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6984 max_admin_oq_elements) != 0x11);
6985 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6986 admin_iq_element_length) != 0x12);
6987 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6988 admin_oq_element_length) != 0x13);
6989 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6990 max_reset_timeout) != 0x14);
6991 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6992 legacy_intx_status) != 0x18);
6993 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6994 legacy_intx_mask_set) != 0x1c);
6995 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6996 legacy_intx_mask_clear) != 0x20);
6997 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6998 device_status) != 0x40);
6999 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7000 admin_iq_pi_offset) != 0x48);
7001 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7002 admin_oq_ci_offset) != 0x50);
7003 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7004 admin_iq_element_array_addr) != 0x58);
7005 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7006 admin_oq_element_array_addr) != 0x60);
7007 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7008 admin_iq_ci_addr) != 0x68);
7009 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7010 admin_oq_pi_addr) != 0x70);
7011 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7012 admin_iq_num_elements) != 0x78);
7013 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7014 admin_oq_num_elements) != 0x79);
7015 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7016 admin_queue_int_msg_num) != 0x7a);
7017 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7018 device_error) != 0x80);
7019 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7020 error_details) != 0x88);
7021 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7022 device_reset) != 0x90);
7023 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7024 power_action) != 0x94);
7025 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
7026
7027 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7028 header.iu_type) != 0);
7029 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7030 header.iu_length) != 2);
7031 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7032 header.work_area) != 6);
7033 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7034 request_id) != 8);
7035 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7036 function_code) != 10);
7037 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7038 data.report_device_capability.buffer_length) != 44);
7039 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7040 data.report_device_capability.sg_descriptor) != 48);
7041 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7042 data.create_operational_iq.queue_id) != 12);
7043 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7044 data.create_operational_iq.element_array_addr) != 16);
7045 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7046 data.create_operational_iq.ci_addr) != 24);
7047 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7048 data.create_operational_iq.num_elements) != 32);
7049 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7050 data.create_operational_iq.element_length) != 34);
7051 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7052 data.create_operational_iq.queue_protocol) != 36);
7053 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7054 data.create_operational_oq.queue_id) != 12);
7055 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7056 data.create_operational_oq.element_array_addr) != 16);
7057 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7058 data.create_operational_oq.pi_addr) != 24);
7059 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7060 data.create_operational_oq.num_elements) != 32);
7061 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7062 data.create_operational_oq.element_length) != 34);
7063 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7064 data.create_operational_oq.queue_protocol) != 36);
7065 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7066 data.create_operational_oq.int_msg_num) != 40);
7067 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7068 data.create_operational_oq.coalescing_count) != 42);
7069 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7070 data.create_operational_oq.min_coalescing_time) != 44);
7071 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7072 data.create_operational_oq.max_coalescing_time) != 48);
7073 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7074 data.delete_operational_queue.queue_id) != 12);
7075 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
7076 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7077 data.create_operational_iq) != 64 - 11);
7078 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7079 data.create_operational_oq) != 64 - 11);
7080 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7081 data.delete_operational_queue) != 64 - 11);
7082
7083 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7084 header.iu_type) != 0);
7085 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7086 header.iu_length) != 2);
7087 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7088 header.work_area) != 6);
7089 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7090 request_id) != 8);
7091 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7092 function_code) != 10);
7093 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7094 status) != 11);
7095 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7096 data.create_operational_iq.status_descriptor) != 12);
7097 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7098 data.create_operational_iq.iq_pi_offset) != 16);
7099 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7100 data.create_operational_oq.status_descriptor) != 12);
7101 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7102 data.create_operational_oq.oq_ci_offset) != 16);
7103 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
7104
7105 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7106 header.iu_type) != 0);
7107 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7108 header.iu_length) != 2);
7109 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7110 header.response_queue_id) != 4);
7111 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7112 header.work_area) != 6);
7113 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7114 request_id) != 8);
7115 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7116 nexus_id) != 10);
7117 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7118 buffer_length) != 12);
7119 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7120 lun_number) != 16);
7121 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7122 protocol_specific) != 24);
7123 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7124 error_index) != 27);
7125 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7126 cdb) != 32);
7127 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7128 sg_descriptors) != 64);
7129 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
7130 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7131
7132 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7133 header.iu_type) != 0);
7134 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7135 header.iu_length) != 2);
7136 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7137 header.response_queue_id) != 4);
7138 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7139 header.work_area) != 6);
7140 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7141 request_id) != 8);
7142 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7143 nexus_id) != 12);
7144 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7145 buffer_length) != 16);
7146 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7147 data_encryption_key_index) != 22);
7148 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7149 encrypt_tweak_lower) != 24);
7150 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7151 encrypt_tweak_upper) != 28);
7152 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7153 cdb) != 32);
7154 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7155 error_index) != 48);
7156 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7157 num_sg_descriptors) != 50);
7158 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7159 cdb_length) != 51);
7160 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7161 lun_number) != 52);
7162 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7163 sg_descriptors) != 64);
7164 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
7165 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7166
7167 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7168 header.iu_type) != 0);
7169 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7170 header.iu_length) != 2);
7171 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7172 request_id) != 8);
7173 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7174 error_index) != 10);
7175
7176 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7177 header.iu_type) != 0);
7178 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7179 header.iu_length) != 2);
7180 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7181 header.response_queue_id) != 4);
7182 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7183 request_id) != 8);
7184 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7185 data.report_event_configuration.buffer_length) != 12);
7186 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7187 data.report_event_configuration.sg_descriptors) != 16);
7188 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7189 data.set_event_configuration.global_event_oq_id) != 10);
7190 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7191 data.set_event_configuration.buffer_length) != 12);
7192 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7193 data.set_event_configuration.sg_descriptors) != 16);
7194
7195 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7196 max_inbound_iu_length) != 6);
7197 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7198 max_outbound_iu_length) != 14);
7199 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
7200
7201 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7202 data_length) != 0);
7203 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7204 iq_arbitration_priority_support_bitmask) != 8);
7205 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7206 maximum_aw_a) != 9);
7207 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7208 maximum_aw_b) != 10);
7209 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7210 maximum_aw_c) != 11);
7211 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7212 max_inbound_queues) != 16);
7213 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7214 max_elements_per_iq) != 18);
7215 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7216 max_iq_element_length) != 24);
7217 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7218 min_iq_element_length) != 26);
7219 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7220 max_outbound_queues) != 30);
7221 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7222 max_elements_per_oq) != 32);
7223 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7224 intr_coalescing_time_granularity) != 34);
7225 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7226 max_oq_element_length) != 36);
7227 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7228 min_oq_element_length) != 38);
7229 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7230 iu_layer_descriptors) != 64);
7231 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
7232
7233 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7234 event_type) != 0);
7235 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7236 oq_id) != 2);
7237 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
7238
7239 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7240 num_event_descriptors) != 2);
7241 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7242 descriptors) != 4);
7243
061ef06a
KB
7244 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
7245 ARRAY_SIZE(pqi_supported_event_types));
7246
6c223761
KB
7247 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7248 header.iu_type) != 0);
7249 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7250 header.iu_length) != 2);
7251 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7252 event_type) != 8);
7253 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7254 event_id) != 10);
7255 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7256 additional_event_id) != 12);
7257 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7258 data) != 16);
7259 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
7260
7261 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7262 header.iu_type) != 0);
7263 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7264 header.iu_length) != 2);
7265 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7266 event_type) != 8);
7267 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7268 event_id) != 10);
7269 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7270 additional_event_id) != 12);
7271 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
7272
7273 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7274 header.iu_type) != 0);
7275 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7276 header.iu_length) != 2);
7277 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7278 request_id) != 8);
7279 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7280 nexus_id) != 10);
7281 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7282 lun_number) != 16);
7283 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7284 protocol_specific) != 24);
7285 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7286 outbound_queue_id_to_manage) != 26);
7287 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7288 request_id_to_manage) != 28);
7289 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7290 task_management_function) != 30);
7291 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
7292
7293 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7294 header.iu_type) != 0);
7295 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7296 header.iu_length) != 2);
7297 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7298 request_id) != 8);
7299 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7300 nexus_id) != 10);
7301 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7302 additional_response_info) != 12);
7303 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7304 response_code) != 15);
7305 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
7306
7307 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7308 configured_logical_drive_count) != 0);
7309 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7310 configuration_signature) != 1);
7311 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7312 firmware_version) != 5);
7313 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7314 extended_logical_unit_count) != 154);
7315 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7316 firmware_build_number) != 190);
7317 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7318 controller_mode) != 292);
7319
1be42f46
KB
7320 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7321 phys_bay_in_box) != 115);
7322 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7323 device_type) != 120);
7324 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7325 redundant_path_present_map) != 1736);
7326 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7327 active_path_number) != 1738);
7328 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7329 alternate_paths_phys_connector) != 1739);
7330 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7331 alternate_paths_phys_box_on_port) != 1755);
7332 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7333 current_queue_depth_limit) != 1796);
7334 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
7335
6c223761
KB
7336 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
7337 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
7338 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
7339 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7340 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
7341 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7342 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
7343 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
7344 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7345 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
7346 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
7347 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7348
7349 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
d727a776
KB
7350 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
7351 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
6c223761 7352}