]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/smartpqi/smartpqi_init.c
scsi: smartpqi: correct bdma hw bug
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
6c223761
KB
1/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
52198226 28#include <linux/blk-mq-pci.h>
6c223761
KB
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport_sas.h>
34#include <asm/unaligned.h>
35#include "smartpqi.h"
36#include "smartpqi_sis.h"
37
38#if !defined(BUILD_TIMESTAMP)
39#define BUILD_TIMESTAMP
40#endif
41
699bed75 42#define DRIVER_VERSION "0.9.13-370"
6c223761
KB
43#define DRIVER_MAJOR 0
44#define DRIVER_MINOR 9
699bed75
KB
45#define DRIVER_RELEASE 13
46#define DRIVER_REVISION 370
6c223761
KB
47
48#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
49#define DRIVER_NAME_SHORT "smartpqi"
50
e1d213bd
KB
51#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
52
6c223761
KB
53MODULE_AUTHOR("Microsemi");
54MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
55 DRIVER_VERSION);
56MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
57MODULE_VERSION(DRIVER_VERSION);
58MODULE_LICENSE("GPL");
59
60#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
61
62static char *hpe_branded_controller = "HPE Smart Array Controller";
63static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
64
65static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
66static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
67static void pqi_scan_start(struct Scsi_Host *shost);
68static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
69 struct pqi_queue_group *queue_group, enum pqi_io_path path,
70 struct pqi_io_request *io_request);
71static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
72 struct pqi_iu_header *request, unsigned int flags,
73 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
74static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
75 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
76 unsigned int cdb_length, struct pqi_queue_group *queue_group,
77 struct pqi_encryption_info *encryption_info);
78
79/* for flags argument to pqi_submit_raid_request_synchronous() */
80#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
81
82static struct scsi_transport_template *pqi_sas_transport_template;
83
84static atomic_t pqi_controller_count = ATOMIC_INIT(0);
85
6a50d6ad
KB
86static unsigned int pqi_supported_event_types[] = {
87 PQI_EVENT_TYPE_HOTPLUG,
88 PQI_EVENT_TYPE_HARDWARE,
89 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
90 PQI_EVENT_TYPE_LOGICAL_DEVICE,
91 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
92 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
93};
94
6c223761
KB
95static int pqi_disable_device_id_wildcards;
96module_param_named(disable_device_id_wildcards,
97 pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
98MODULE_PARM_DESC(disable_device_id_wildcards,
99 "Disable device ID wildcards.");
100
101static char *raid_levels[] = {
102 "RAID-0",
103 "RAID-4",
104 "RAID-1(1+0)",
105 "RAID-5",
106 "RAID-5+1",
107 "RAID-ADG",
108 "RAID-1(ADM)",
109};
110
111static char *pqi_raid_level_to_string(u8 raid_level)
112{
113 if (raid_level < ARRAY_SIZE(raid_levels))
114 return raid_levels[raid_level];
115
116 return "";
117}
118
119#define SA_RAID_0 0
120#define SA_RAID_4 1
121#define SA_RAID_1 2 /* also used for RAID 10 */
122#define SA_RAID_5 3 /* also used for RAID 50 */
123#define SA_RAID_51 4
124#define SA_RAID_6 5 /* also used for RAID 60 */
125#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
126#define SA_RAID_MAX SA_RAID_ADM
127#define SA_RAID_UNKNOWN 0xff
128
129static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
130{
7561a7e4 131 pqi_prep_for_scsi_done(scmd);
6c223761
KB
132 scmd->scsi_done(scmd);
133}
134
135static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
136{
137 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
138}
139
140static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
141{
142 void *hostdata = shost_priv(shost);
143
144 return *((struct pqi_ctrl_info **)hostdata);
145}
146
147static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
148{
149 return !device->is_physical_device;
150}
151
152static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
153{
154 return !ctrl_info->controller_online;
155}
156
157static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
158{
159 if (ctrl_info->controller_online)
160 if (!sis_is_firmware_running(ctrl_info))
161 pqi_take_ctrl_offline(ctrl_info);
162}
163
164static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
165{
166 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
167}
168
ff6abb73
KB
169static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
170 struct pqi_ctrl_info *ctrl_info)
171{
172 return sis_read_driver_scratch(ctrl_info);
173}
174
175static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
176 enum pqi_ctrl_mode mode)
177{
178 sis_write_driver_scratch(ctrl_info, mode);
179}
180
7561a7e4
KB
181#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
182static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
183{
184 ctrl_info->block_requests = true;
185 scsi_block_requests(ctrl_info->scsi_host);
186}
187
188static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
189{
190 ctrl_info->block_requests = false;
191 wake_up_all(&ctrl_info->block_requests_wait);
192 scsi_unblock_requests(ctrl_info->scsi_host);
193}
194
195static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
196{
197 return ctrl_info->block_requests;
198}
199
200static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
201 unsigned long timeout_msecs)
202{
203 unsigned long remaining_msecs;
204
205 if (!pqi_ctrl_blocked(ctrl_info))
206 return timeout_msecs;
207
208 atomic_inc(&ctrl_info->num_blocked_threads);
209
210 if (timeout_msecs == NO_TIMEOUT) {
211 wait_event(ctrl_info->block_requests_wait,
212 !pqi_ctrl_blocked(ctrl_info));
213 remaining_msecs = timeout_msecs;
214 } else {
215 unsigned long remaining_jiffies;
216
217 remaining_jiffies =
218 wait_event_timeout(ctrl_info->block_requests_wait,
219 !pqi_ctrl_blocked(ctrl_info),
220 msecs_to_jiffies(timeout_msecs));
221 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
222 }
223
224 atomic_dec(&ctrl_info->num_blocked_threads);
225
226 return remaining_msecs;
227}
228
229static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
230{
231 atomic_inc(&ctrl_info->num_busy_threads);
232}
233
234static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
235{
236 atomic_dec(&ctrl_info->num_busy_threads);
237}
238
239static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
240{
241 while (atomic_read(&ctrl_info->num_busy_threads) >
242 atomic_read(&ctrl_info->num_blocked_threads))
243 usleep_range(1000, 2000);
244}
245
246static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
247{
248 device->in_reset = true;
249}
250
251static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
252{
253 device->in_reset = false;
254}
255
256static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
257{
258 return device->in_reset;
259}
6c223761
KB
260
261static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
262{
263 schedule_delayed_work(&ctrl_info->rescan_work,
264 PQI_RESCAN_WORK_INTERVAL);
265}
266
061ef06a
KB
267static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
268{
269 cancel_delayed_work_sync(&ctrl_info->rescan_work);
270}
271
98f87667
KB
272static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
273{
274 if (!ctrl_info->heartbeat_counter)
275 return 0;
276
277 return readl(ctrl_info->heartbeat_counter);
278}
279
6c223761
KB
280static int pqi_map_single(struct pci_dev *pci_dev,
281 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
282 size_t buffer_length, int data_direction)
283{
284 dma_addr_t bus_address;
285
286 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
287 return 0;
288
289 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
290 data_direction);
291 if (pci_dma_mapping_error(pci_dev, bus_address))
292 return -ENOMEM;
293
294 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
295 put_unaligned_le32(buffer_length, &sg_descriptor->length);
296 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
297
298 return 0;
299}
300
301static void pqi_pci_unmap(struct pci_dev *pci_dev,
302 struct pqi_sg_descriptor *descriptors, int num_descriptors,
303 int data_direction)
304{
305 int i;
306
307 if (data_direction == PCI_DMA_NONE)
308 return;
309
310 for (i = 0; i < num_descriptors; i++)
311 pci_unmap_single(pci_dev,
312 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
313 get_unaligned_le32(&descriptors[i].length),
314 data_direction);
315}
316
317static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
318 struct pqi_raid_path_request *request, u8 cmd,
319 u8 *scsi3addr, void *buffer, size_t buffer_length,
320 u16 vpd_page, int *pci_direction)
321{
322 u8 *cdb;
323 int pci_dir;
324
325 memset(request, 0, sizeof(*request));
326
327 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
328 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
329 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
330 &request->header.iu_length);
331 put_unaligned_le32(buffer_length, &request->buffer_length);
332 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
333 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
334 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
335
336 cdb = request->cdb;
337
338 switch (cmd) {
339 case INQUIRY:
340 request->data_direction = SOP_READ_FLAG;
341 cdb[0] = INQUIRY;
342 if (vpd_page & VPD_PAGE) {
343 cdb[1] = 0x1;
344 cdb[2] = (u8)vpd_page;
345 }
346 cdb[4] = (u8)buffer_length;
347 break;
348 case CISS_REPORT_LOG:
349 case CISS_REPORT_PHYS:
350 request->data_direction = SOP_READ_FLAG;
351 cdb[0] = cmd;
352 if (cmd == CISS_REPORT_PHYS)
353 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
354 else
355 cdb[1] = CISS_REPORT_LOG_EXTENDED;
356 put_unaligned_be32(buffer_length, &cdb[6]);
357 break;
358 case CISS_GET_RAID_MAP:
359 request->data_direction = SOP_READ_FLAG;
360 cdb[0] = CISS_READ;
361 cdb[1] = CISS_GET_RAID_MAP;
362 put_unaligned_be32(buffer_length, &cdb[6]);
363 break;
364 case SA_CACHE_FLUSH:
365 request->data_direction = SOP_WRITE_FLAG;
366 cdb[0] = BMIC_WRITE;
367 cdb[6] = BMIC_CACHE_FLUSH;
368 put_unaligned_be16(buffer_length, &cdb[7]);
369 break;
370 case BMIC_IDENTIFY_CONTROLLER:
371 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
372 request->data_direction = SOP_READ_FLAG;
373 cdb[0] = BMIC_READ;
374 cdb[6] = cmd;
375 put_unaligned_be16(buffer_length, &cdb[7]);
376 break;
377 case BMIC_WRITE_HOST_WELLNESS:
378 request->data_direction = SOP_WRITE_FLAG;
379 cdb[0] = BMIC_WRITE;
380 cdb[6] = cmd;
381 put_unaligned_be16(buffer_length, &cdb[7]);
382 break;
383 default:
384 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
385 cmd);
386 WARN_ON(cmd);
387 break;
388 }
389
390 switch (request->data_direction) {
391 case SOP_READ_FLAG:
392 pci_dir = PCI_DMA_FROMDEVICE;
393 break;
394 case SOP_WRITE_FLAG:
395 pci_dir = PCI_DMA_TODEVICE;
396 break;
397 case SOP_NO_DIRECTION_FLAG:
398 pci_dir = PCI_DMA_NONE;
399 break;
400 default:
401 pci_dir = PCI_DMA_BIDIRECTIONAL;
402 break;
403 }
404
405 *pci_direction = pci_dir;
406
407 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
408 buffer, buffer_length, pci_dir);
409}
410
411static struct pqi_io_request *pqi_alloc_io_request(
412 struct pqi_ctrl_info *ctrl_info)
413{
414 struct pqi_io_request *io_request;
415 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
416
417 while (1) {
418 io_request = &ctrl_info->io_request_pool[i];
419 if (atomic_inc_return(&io_request->refcount) == 1)
420 break;
421 atomic_dec(&io_request->refcount);
422 i = (i + 1) % ctrl_info->max_io_slots;
423 }
424
425 /* benignly racy */
426 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
427
428 io_request->scmd = NULL;
429 io_request->status = 0;
430 io_request->error_info = NULL;
431
432 return io_request;
433}
434
435static void pqi_free_io_request(struct pqi_io_request *io_request)
436{
437 atomic_dec(&io_request->refcount);
438}
439
440static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
441 struct bmic_identify_controller *buffer)
442{
443 int rc;
444 int pci_direction;
445 struct pqi_raid_path_request request;
446
447 rc = pqi_build_raid_path_request(ctrl_info, &request,
448 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
449 sizeof(*buffer), 0, &pci_direction);
450 if (rc)
451 return rc;
452
453 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
454 NULL, NO_TIMEOUT);
455
456 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
457 pci_direction);
458
459 return rc;
460}
461
462static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
463 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
464{
465 int rc;
466 int pci_direction;
467 struct pqi_raid_path_request request;
468
469 rc = pqi_build_raid_path_request(ctrl_info, &request,
470 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
471 &pci_direction);
472 if (rc)
473 return rc;
474
475 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
476 NULL, NO_TIMEOUT);
477
478 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
479 pci_direction);
480
481 return rc;
482}
483
484static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
485 struct pqi_scsi_dev *device,
486 struct bmic_identify_physical_device *buffer,
487 size_t buffer_length)
488{
489 int rc;
490 int pci_direction;
491 u16 bmic_device_index;
492 struct pqi_raid_path_request request;
493
494 rc = pqi_build_raid_path_request(ctrl_info, &request,
495 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
496 buffer_length, 0, &pci_direction);
497 if (rc)
498 return rc;
499
500 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
501 request.cdb[2] = (u8)bmic_device_index;
502 request.cdb[9] = (u8)(bmic_device_index >> 8);
503
504 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
505 0, NULL, NO_TIMEOUT);
506
507 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
508 pci_direction);
509
510 return rc;
511}
512
513#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
6c223761
KB
514
515static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
516{
517 int rc;
518 struct pqi_raid_path_request request;
519 int pci_direction;
520 u8 *buffer;
521
522 /*
523 * Don't bother trying to flush the cache if the controller is
524 * locked up.
525 */
526 if (pqi_ctrl_offline(ctrl_info))
527 return -ENXIO;
528
529 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
530 if (!buffer)
531 return -ENOMEM;
532
533 rc = pqi_build_raid_path_request(ctrl_info, &request,
534 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
535 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
536 if (rc)
537 goto out;
538
539 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
d48f8fad 540 0, NULL, NO_TIMEOUT);
6c223761
KB
541
542 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
543 pci_direction);
544
545out:
546 kfree(buffer);
547
548 return rc;
549}
550
551static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
552 void *buffer, size_t buffer_length)
553{
554 int rc;
555 struct pqi_raid_path_request request;
556 int pci_direction;
557
558 rc = pqi_build_raid_path_request(ctrl_info, &request,
559 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
560 buffer_length, 0, &pci_direction);
561 if (rc)
562 return rc;
563
564 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
565 0, NULL, NO_TIMEOUT);
566
567 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
568 pci_direction);
569
570 return rc;
571}
572
573#pragma pack(1)
574
575struct bmic_host_wellness_driver_version {
576 u8 start_tag[4];
577 u8 driver_version_tag[2];
578 __le16 driver_version_length;
579 char driver_version[32];
580 u8 end_tag[2];
581};
582
583#pragma pack()
584
585static int pqi_write_driver_version_to_host_wellness(
586 struct pqi_ctrl_info *ctrl_info)
587{
588 int rc;
589 struct bmic_host_wellness_driver_version *buffer;
590 size_t buffer_length;
591
592 buffer_length = sizeof(*buffer);
593
594 buffer = kmalloc(buffer_length, GFP_KERNEL);
595 if (!buffer)
596 return -ENOMEM;
597
598 buffer->start_tag[0] = '<';
599 buffer->start_tag[1] = 'H';
600 buffer->start_tag[2] = 'W';
601 buffer->start_tag[3] = '>';
602 buffer->driver_version_tag[0] = 'D';
603 buffer->driver_version_tag[1] = 'V';
604 put_unaligned_le16(sizeof(buffer->driver_version),
605 &buffer->driver_version_length);
061ef06a 606 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
607 sizeof(buffer->driver_version) - 1);
608 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
609 buffer->end_tag[0] = 'Z';
610 buffer->end_tag[1] = 'Z';
611
612 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
613
614 kfree(buffer);
615
616 return rc;
617}
618
619#pragma pack(1)
620
621struct bmic_host_wellness_time {
622 u8 start_tag[4];
623 u8 time_tag[2];
624 __le16 time_length;
625 u8 time[8];
626 u8 dont_write_tag[2];
627 u8 end_tag[2];
628};
629
630#pragma pack()
631
632static int pqi_write_current_time_to_host_wellness(
633 struct pqi_ctrl_info *ctrl_info)
634{
635 int rc;
636 struct bmic_host_wellness_time *buffer;
637 size_t buffer_length;
638 time64_t local_time;
639 unsigned int year;
ed10858e 640 struct tm tm;
6c223761
KB
641
642 buffer_length = sizeof(*buffer);
643
644 buffer = kmalloc(buffer_length, GFP_KERNEL);
645 if (!buffer)
646 return -ENOMEM;
647
648 buffer->start_tag[0] = '<';
649 buffer->start_tag[1] = 'H';
650 buffer->start_tag[2] = 'W';
651 buffer->start_tag[3] = '>';
652 buffer->time_tag[0] = 'T';
653 buffer->time_tag[1] = 'D';
654 put_unaligned_le16(sizeof(buffer->time),
655 &buffer->time_length);
656
ed10858e
AB
657 local_time = ktime_get_real_seconds();
658 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
659 year = tm.tm_year + 1900;
660
661 buffer->time[0] = bin2bcd(tm.tm_hour);
662 buffer->time[1] = bin2bcd(tm.tm_min);
663 buffer->time[2] = bin2bcd(tm.tm_sec);
664 buffer->time[3] = 0;
665 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
666 buffer->time[5] = bin2bcd(tm.tm_mday);
667 buffer->time[6] = bin2bcd(year / 100);
668 buffer->time[7] = bin2bcd(year % 100);
669
670 buffer->dont_write_tag[0] = 'D';
671 buffer->dont_write_tag[1] = 'W';
672 buffer->end_tag[0] = 'Z';
673 buffer->end_tag[1] = 'Z';
674
675 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
676
677 kfree(buffer);
678
679 return rc;
680}
681
682#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
683
684static void pqi_update_time_worker(struct work_struct *work)
685{
686 int rc;
687 struct pqi_ctrl_info *ctrl_info;
688
689 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
690 update_time_work);
691
6c223761
KB
692 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
693 if (rc)
694 dev_warn(&ctrl_info->pci_dev->dev,
695 "error updating time on controller\n");
696
697 schedule_delayed_work(&ctrl_info->update_time_work,
698 PQI_UPDATE_TIME_WORK_INTERVAL);
699}
700
701static inline void pqi_schedule_update_time_worker(
4fbebf1a 702 struct pqi_ctrl_info *ctrl_info)
6c223761 703{
061ef06a
KB
704 if (ctrl_info->update_time_worker_scheduled)
705 return;
706
4fbebf1a 707 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
708 ctrl_info->update_time_worker_scheduled = true;
709}
710
711static inline void pqi_cancel_update_time_worker(
712 struct pqi_ctrl_info *ctrl_info)
713{
714 if (!ctrl_info->update_time_worker_scheduled)
715 return;
716
717 cancel_delayed_work_sync(&ctrl_info->update_time_work);
718 ctrl_info->update_time_worker_scheduled = false;
6c223761
KB
719}
720
721static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
722 void *buffer, size_t buffer_length)
723{
724 int rc;
725 int pci_direction;
726 struct pqi_raid_path_request request;
727
728 rc = pqi_build_raid_path_request(ctrl_info, &request,
729 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
730 if (rc)
731 return rc;
732
733 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
734 NULL, NO_TIMEOUT);
735
736 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
737 pci_direction);
738
739 return rc;
740}
741
742static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
743 void **buffer)
744{
745 int rc;
746 size_t lun_list_length;
747 size_t lun_data_length;
748 size_t new_lun_list_length;
749 void *lun_data = NULL;
750 struct report_lun_header *report_lun_header;
751
752 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
753 if (!report_lun_header) {
754 rc = -ENOMEM;
755 goto out;
756 }
757
758 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
759 sizeof(*report_lun_header));
760 if (rc)
761 goto out;
762
763 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
764
765again:
766 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
767
768 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
769 if (!lun_data) {
770 rc = -ENOMEM;
771 goto out;
772 }
773
774 if (lun_list_length == 0) {
775 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
776 goto out;
777 }
778
779 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
780 if (rc)
781 goto out;
782
783 new_lun_list_length = get_unaligned_be32(
784 &((struct report_lun_header *)lun_data)->list_length);
785
786 if (new_lun_list_length > lun_list_length) {
787 lun_list_length = new_lun_list_length;
788 kfree(lun_data);
789 goto again;
790 }
791
792out:
793 kfree(report_lun_header);
794
795 if (rc) {
796 kfree(lun_data);
797 lun_data = NULL;
798 }
799
800 *buffer = lun_data;
801
802 return rc;
803}
804
805static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
806 void **buffer)
807{
808 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
809 buffer);
810}
811
812static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
813 void **buffer)
814{
815 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
816}
817
818static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
819 struct report_phys_lun_extended **physdev_list,
820 struct report_log_lun_extended **logdev_list)
821{
822 int rc;
823 size_t logdev_list_length;
824 size_t logdev_data_length;
825 struct report_log_lun_extended *internal_logdev_list;
826 struct report_log_lun_extended *logdev_data;
827 struct report_lun_header report_lun_header;
828
829 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
830 if (rc)
831 dev_err(&ctrl_info->pci_dev->dev,
832 "report physical LUNs failed\n");
833
834 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
835 if (rc)
836 dev_err(&ctrl_info->pci_dev->dev,
837 "report logical LUNs failed\n");
838
839 /*
840 * Tack the controller itself onto the end of the logical device list.
841 */
842
843 logdev_data = *logdev_list;
844
845 if (logdev_data) {
846 logdev_list_length =
847 get_unaligned_be32(&logdev_data->header.list_length);
848 } else {
849 memset(&report_lun_header, 0, sizeof(report_lun_header));
850 logdev_data =
851 (struct report_log_lun_extended *)&report_lun_header;
852 logdev_list_length = 0;
853 }
854
855 logdev_data_length = sizeof(struct report_lun_header) +
856 logdev_list_length;
857
858 internal_logdev_list = kmalloc(logdev_data_length +
859 sizeof(struct report_log_lun_extended), GFP_KERNEL);
860 if (!internal_logdev_list) {
861 kfree(*logdev_list);
862 *logdev_list = NULL;
863 return -ENOMEM;
864 }
865
866 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
867 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
868 sizeof(struct report_log_lun_extended_entry));
869 put_unaligned_be32(logdev_list_length +
870 sizeof(struct report_log_lun_extended_entry),
871 &internal_logdev_list->header.list_length);
872
873 kfree(*logdev_list);
874 *logdev_list = internal_logdev_list;
875
876 return 0;
877}
878
879static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
880 int bus, int target, int lun)
881{
882 device->bus = bus;
883 device->target = target;
884 device->lun = lun;
885}
886
887static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
888{
889 u8 *scsi3addr;
890 u32 lunid;
891
892 scsi3addr = device->scsi3addr;
893 lunid = get_unaligned_le32(scsi3addr);
894
895 if (pqi_is_hba_lunid(scsi3addr)) {
896 /* The specified device is the controller. */
897 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
898 device->target_lun_valid = true;
899 return;
900 }
901
902 if (pqi_is_logical_device(device)) {
903 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
904 lunid & 0x3fff);
905 device->target_lun_valid = true;
906 return;
907 }
908
909 /*
910 * Defer target and LUN assignment for non-controller physical devices
911 * because the SAS transport layer will make these assignments later.
912 */
913 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
914}
915
916static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
917 struct pqi_scsi_dev *device)
918{
919 int rc;
920 u8 raid_level;
921 u8 *buffer;
922
923 raid_level = SA_RAID_UNKNOWN;
924
925 buffer = kmalloc(64, GFP_KERNEL);
926 if (buffer) {
927 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
928 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
929 if (rc == 0) {
930 raid_level = buffer[8];
931 if (raid_level > SA_RAID_MAX)
932 raid_level = SA_RAID_UNKNOWN;
933 }
934 kfree(buffer);
935 }
936
937 device->raid_level = raid_level;
938}
939
940static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
941 struct pqi_scsi_dev *device, struct raid_map *raid_map)
942{
943 char *err_msg;
944 u32 raid_map_size;
945 u32 r5or6_blocks_per_row;
946 unsigned int num_phys_disks;
947 unsigned int num_raid_map_entries;
948
949 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
950
951 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
952 err_msg = "RAID map too small";
953 goto bad_raid_map;
954 }
955
956 if (raid_map_size > sizeof(*raid_map)) {
957 err_msg = "RAID map too large";
958 goto bad_raid_map;
959 }
960
961 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
962 (get_unaligned_le16(&raid_map->data_disks_per_row) +
963 get_unaligned_le16(&raid_map->metadata_disks_per_row));
964 num_raid_map_entries = num_phys_disks *
965 get_unaligned_le16(&raid_map->row_cnt);
966
967 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
968 err_msg = "invalid number of map entries in RAID map";
969 goto bad_raid_map;
970 }
971
972 if (device->raid_level == SA_RAID_1) {
973 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
974 err_msg = "invalid RAID-1 map";
975 goto bad_raid_map;
976 }
977 } else if (device->raid_level == SA_RAID_ADM) {
978 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
979 err_msg = "invalid RAID-1(ADM) map";
980 goto bad_raid_map;
981 }
982 } else if ((device->raid_level == SA_RAID_5 ||
983 device->raid_level == SA_RAID_6) &&
984 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
985 /* RAID 50/60 */
986 r5or6_blocks_per_row =
987 get_unaligned_le16(&raid_map->strip_size) *
988 get_unaligned_le16(&raid_map->data_disks_per_row);
989 if (r5or6_blocks_per_row == 0) {
990 err_msg = "invalid RAID-5 or RAID-6 map";
991 goto bad_raid_map;
992 }
993 }
994
995 return 0;
996
997bad_raid_map:
998 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
999
1000 return -EINVAL;
1001}
1002
1003static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1004 struct pqi_scsi_dev *device)
1005{
1006 int rc;
1007 int pci_direction;
1008 struct pqi_raid_path_request request;
1009 struct raid_map *raid_map;
1010
1011 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1012 if (!raid_map)
1013 return -ENOMEM;
1014
1015 rc = pqi_build_raid_path_request(ctrl_info, &request,
1016 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1017 sizeof(*raid_map), 0, &pci_direction);
1018 if (rc)
1019 goto error;
1020
1021 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1022 NULL, NO_TIMEOUT);
1023
1024 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1025 pci_direction);
1026
1027 if (rc)
1028 goto error;
1029
1030 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1031 if (rc)
1032 goto error;
1033
1034 device->raid_map = raid_map;
1035
1036 return 0;
1037
1038error:
1039 kfree(raid_map);
1040
1041 return rc;
1042}
1043
1044static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
1045 struct pqi_scsi_dev *device)
1046{
1047 int rc;
1048 u8 *buffer;
1049 u8 offload_status;
1050
1051 buffer = kmalloc(64, GFP_KERNEL);
1052 if (!buffer)
1053 return;
1054
1055 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1056 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
1057 if (rc)
1058 goto out;
1059
1060#define OFFLOAD_STATUS_BYTE 4
1061#define OFFLOAD_CONFIGURED_BIT 0x1
1062#define OFFLOAD_ENABLED_BIT 0x2
1063
1064 offload_status = buffer[OFFLOAD_STATUS_BYTE];
1065 device->offload_configured =
1066 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
1067 if (device->offload_configured) {
1068 device->offload_enabled_pending =
1069 !!(offload_status & OFFLOAD_ENABLED_BIT);
1070 if (pqi_get_raid_map(ctrl_info, device))
1071 device->offload_enabled_pending = false;
1072 }
1073
1074out:
1075 kfree(buffer);
1076}
1077
1078/*
1079 * Use vendor-specific VPD to determine online/offline status of a volume.
1080 */
1081
1082static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1083 struct pqi_scsi_dev *device)
1084{
1085 int rc;
1086 size_t page_length;
1087 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1088 bool volume_offline = true;
1089 u32 volume_flags;
1090 struct ciss_vpd_logical_volume_status *vpd;
1091
1092 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1093 if (!vpd)
1094 goto no_buffer;
1095
1096 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1097 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1098 if (rc)
1099 goto out;
1100
1101 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1102 volume_status) + vpd->page_length;
1103 if (page_length < sizeof(*vpd))
1104 goto out;
1105
1106 volume_status = vpd->volume_status;
1107 volume_flags = get_unaligned_be32(&vpd->flags);
1108 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1109
1110out:
1111 kfree(vpd);
1112no_buffer:
1113 device->volume_status = volume_status;
1114 device->volume_offline = volume_offline;
1115}
1116
1117static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1118 struct pqi_scsi_dev *device)
1119{
1120 int rc;
1121 u8 *buffer;
1122
1123 buffer = kmalloc(64, GFP_KERNEL);
1124 if (!buffer)
1125 return -ENOMEM;
1126
1127 /* Send an inquiry to the device to see what it is. */
1128 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1129 if (rc)
1130 goto out;
1131
1132 scsi_sanitize_inquiry_string(&buffer[8], 8);
1133 scsi_sanitize_inquiry_string(&buffer[16], 16);
1134
1135 device->devtype = buffer[0] & 0x1f;
1136 memcpy(device->vendor, &buffer[8],
1137 sizeof(device->vendor));
1138 memcpy(device->model, &buffer[16],
1139 sizeof(device->model));
1140
1141 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1142 pqi_get_raid_level(ctrl_info, device);
1143 pqi_get_offload_status(ctrl_info, device);
1144 pqi_get_volume_status(ctrl_info, device);
1145 }
1146
1147out:
1148 kfree(buffer);
1149
1150 return rc;
1151}
1152
1153static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1154 struct pqi_scsi_dev *device,
1155 struct bmic_identify_physical_device *id_phys)
1156{
1157 int rc;
1158
1159 memset(id_phys, 0, sizeof(*id_phys));
1160
1161 rc = pqi_identify_physical_device(ctrl_info, device,
1162 id_phys, sizeof(*id_phys));
1163 if (rc) {
1164 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1165 return;
1166 }
1167
1168 device->queue_depth =
1169 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1170 device->device_type = id_phys->device_type;
1171 device->active_path_index = id_phys->active_path_number;
1172 device->path_map = id_phys->redundant_path_present_map;
1173 memcpy(&device->box,
1174 &id_phys->alternate_paths_phys_box_on_port,
1175 sizeof(device->box));
1176 memcpy(&device->phys_connector,
1177 &id_phys->alternate_paths_phys_connector,
1178 sizeof(device->phys_connector));
1179 device->bay = id_phys->phys_bay_in_box;
1180}
1181
1182static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1183 struct pqi_scsi_dev *device)
1184{
1185 char *status;
1186 static const char unknown_state_str[] =
1187 "Volume is in an unknown state (%u)";
1188 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1189
1190 switch (device->volume_status) {
1191 case CISS_LV_OK:
1192 status = "Volume online";
1193 break;
1194 case CISS_LV_FAILED:
1195 status = "Volume failed";
1196 break;
1197 case CISS_LV_NOT_CONFIGURED:
1198 status = "Volume not configured";
1199 break;
1200 case CISS_LV_DEGRADED:
1201 status = "Volume degraded";
1202 break;
1203 case CISS_LV_READY_FOR_RECOVERY:
1204 status = "Volume ready for recovery operation";
1205 break;
1206 case CISS_LV_UNDERGOING_RECOVERY:
1207 status = "Volume undergoing recovery";
1208 break;
1209 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1210 status = "Wrong physical drive was replaced";
1211 break;
1212 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1213 status = "A physical drive not properly connected";
1214 break;
1215 case CISS_LV_HARDWARE_OVERHEATING:
1216 status = "Hardware is overheating";
1217 break;
1218 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1219 status = "Hardware has overheated";
1220 break;
1221 case CISS_LV_UNDERGOING_EXPANSION:
1222 status = "Volume undergoing expansion";
1223 break;
1224 case CISS_LV_NOT_AVAILABLE:
1225 status = "Volume waiting for transforming volume";
1226 break;
1227 case CISS_LV_QUEUED_FOR_EXPANSION:
1228 status = "Volume queued for expansion";
1229 break;
1230 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1231 status = "Volume disabled due to SCSI ID conflict";
1232 break;
1233 case CISS_LV_EJECTED:
1234 status = "Volume has been ejected";
1235 break;
1236 case CISS_LV_UNDERGOING_ERASE:
1237 status = "Volume undergoing background erase";
1238 break;
1239 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1240 status = "Volume ready for predictive spare rebuild";
1241 break;
1242 case CISS_LV_UNDERGOING_RPI:
1243 status = "Volume undergoing rapid parity initialization";
1244 break;
1245 case CISS_LV_PENDING_RPI:
1246 status = "Volume queued for rapid parity initialization";
1247 break;
1248 case CISS_LV_ENCRYPTED_NO_KEY:
1249 status = "Encrypted volume inaccessible - key not present";
1250 break;
1251 case CISS_LV_UNDERGOING_ENCRYPTION:
1252 status = "Volume undergoing encryption process";
1253 break;
1254 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1255 status = "Volume undergoing encryption re-keying process";
1256 break;
1257 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1258 status =
1259 "Encrypted volume inaccessible - disabled on ctrl";
1260 break;
1261 case CISS_LV_PENDING_ENCRYPTION:
1262 status = "Volume pending migration to encrypted state";
1263 break;
1264 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1265 status = "Volume pending encryption rekeying";
1266 break;
1267 case CISS_LV_NOT_SUPPORTED:
1268 status = "Volume not supported on this controller";
1269 break;
1270 case CISS_LV_STATUS_UNAVAILABLE:
1271 status = "Volume status not available";
1272 break;
1273 default:
1274 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1275 unknown_state_str, device->volume_status);
1276 status = unknown_state_buffer;
1277 break;
1278 }
1279
1280 dev_info(&ctrl_info->pci_dev->dev,
1281 "scsi %d:%d:%d:%d %s\n",
1282 ctrl_info->scsi_host->host_no,
1283 device->bus, device->target, device->lun, status);
1284}
1285
1286static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1287 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1288{
1289 struct pqi_scsi_dev *device;
1290
1291 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1292 scsi_device_list_entry) {
1293 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1294 continue;
1295 if (pqi_is_logical_device(device))
1296 continue;
1297 if (device->aio_handle == aio_handle)
1298 return device;
1299 }
1300
1301 return NULL;
1302}
1303
1304static void pqi_update_logical_drive_queue_depth(
1305 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1306{
1307 unsigned int i;
1308 struct raid_map *raid_map;
1309 struct raid_map_disk_data *disk_data;
1310 struct pqi_scsi_dev *phys_disk;
1311 unsigned int num_phys_disks;
1312 unsigned int num_raid_map_entries;
1313 unsigned int queue_depth;
1314
1315 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1316
1317 raid_map = logical_drive->raid_map;
1318 if (!raid_map)
1319 return;
1320
1321 disk_data = raid_map->disk_data;
1322 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1323 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1324 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1325 num_raid_map_entries = num_phys_disks *
1326 get_unaligned_le16(&raid_map->row_cnt);
1327
1328 queue_depth = 0;
1329 for (i = 0; i < num_raid_map_entries; i++) {
1330 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1331 disk_data[i].aio_handle);
1332
1333 if (!phys_disk) {
1334 dev_warn(&ctrl_info->pci_dev->dev,
1335 "failed to find physical disk for logical drive %016llx\n",
1336 get_unaligned_be64(logical_drive->scsi3addr));
1337 logical_drive->offload_enabled = false;
1338 logical_drive->offload_enabled_pending = false;
1339 kfree(raid_map);
1340 logical_drive->raid_map = NULL;
1341 return;
1342 }
1343
1344 queue_depth += phys_disk->queue_depth;
1345 }
1346
1347 logical_drive->queue_depth = queue_depth;
1348}
1349
1350static void pqi_update_all_logical_drive_queue_depths(
1351 struct pqi_ctrl_info *ctrl_info)
1352{
1353 struct pqi_scsi_dev *device;
1354
1355 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1356 scsi_device_list_entry) {
1357 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1358 continue;
1359 if (!pqi_is_logical_device(device))
1360 continue;
1361 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1362 }
1363}
1364
1365static void pqi_rescan_worker(struct work_struct *work)
1366{
1367 struct pqi_ctrl_info *ctrl_info;
1368
1369 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1370 rescan_work);
1371
1372 pqi_scan_scsi_devices(ctrl_info);
1373}
1374
1375static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1376 struct pqi_scsi_dev *device)
1377{
1378 int rc;
1379
1380 if (pqi_is_logical_device(device))
1381 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1382 device->target, device->lun);
1383 else
1384 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1385
1386 return rc;
1387}
1388
1389static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1390 struct pqi_scsi_dev *device)
1391{
1392 if (pqi_is_logical_device(device))
1393 scsi_remove_device(device->sdev);
1394 else
1395 pqi_remove_sas_device(device);
1396}
1397
1398/* Assumes the SCSI device list lock is held. */
1399
1400static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1401 int bus, int target, int lun)
1402{
1403 struct pqi_scsi_dev *device;
1404
1405 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1406 scsi_device_list_entry)
1407 if (device->bus == bus && device->target == target &&
1408 device->lun == lun)
1409 return device;
1410
1411 return NULL;
1412}
1413
1414static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1415 struct pqi_scsi_dev *dev2)
1416{
1417 if (dev1->is_physical_device != dev2->is_physical_device)
1418 return false;
1419
1420 if (dev1->is_physical_device)
1421 return dev1->wwid == dev2->wwid;
1422
1423 return memcmp(dev1->volume_id, dev2->volume_id,
1424 sizeof(dev1->volume_id)) == 0;
1425}
1426
1427enum pqi_find_result {
1428 DEVICE_NOT_FOUND,
1429 DEVICE_CHANGED,
1430 DEVICE_SAME,
1431};
1432
1433static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1434 struct pqi_scsi_dev *device_to_find,
1435 struct pqi_scsi_dev **matching_device)
1436{
1437 struct pqi_scsi_dev *device;
1438
1439 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1440 scsi_device_list_entry) {
1441 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1442 device->scsi3addr)) {
1443 *matching_device = device;
1444 if (pqi_device_equal(device_to_find, device)) {
1445 if (device_to_find->volume_offline)
1446 return DEVICE_CHANGED;
1447 return DEVICE_SAME;
1448 }
1449 return DEVICE_CHANGED;
1450 }
1451 }
1452
1453 return DEVICE_NOT_FOUND;
1454}
1455
1456static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1457 char *action, struct pqi_scsi_dev *device)
1458{
1459 dev_info(&ctrl_info->pci_dev->dev,
1460 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1461 action,
1462 ctrl_info->scsi_host->host_no,
1463 device->bus,
1464 device->target,
1465 device->lun,
1466 scsi_device_type(device->devtype),
1467 device->vendor,
1468 device->model,
1469 pqi_raid_level_to_string(device->raid_level),
1470 device->offload_configured ? '+' : '-',
1471 device->offload_enabled_pending ? '+' : '-',
1472 device->expose_device ? '+' : '-',
1473 device->queue_depth);
1474}
1475
1476/* Assumes the SCSI device list lock is held. */
1477
1478static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1479 struct pqi_scsi_dev *new_device)
1480{
1481 existing_device->devtype = new_device->devtype;
1482 existing_device->device_type = new_device->device_type;
1483 existing_device->bus = new_device->bus;
1484 if (new_device->target_lun_valid) {
1485 existing_device->target = new_device->target;
1486 existing_device->lun = new_device->lun;
1487 existing_device->target_lun_valid = true;
1488 }
1489
1490 /* By definition, the scsi3addr and wwid fields are already the same. */
1491
1492 existing_device->is_physical_device = new_device->is_physical_device;
1493 existing_device->expose_device = new_device->expose_device;
1494 existing_device->no_uld_attach = new_device->no_uld_attach;
1495 existing_device->aio_enabled = new_device->aio_enabled;
1496 memcpy(existing_device->vendor, new_device->vendor,
1497 sizeof(existing_device->vendor));
1498 memcpy(existing_device->model, new_device->model,
1499 sizeof(existing_device->model));
1500 existing_device->sas_address = new_device->sas_address;
1501 existing_device->raid_level = new_device->raid_level;
1502 existing_device->queue_depth = new_device->queue_depth;
1503 existing_device->aio_handle = new_device->aio_handle;
1504 existing_device->volume_status = new_device->volume_status;
1505 existing_device->active_path_index = new_device->active_path_index;
1506 existing_device->path_map = new_device->path_map;
1507 existing_device->bay = new_device->bay;
1508 memcpy(existing_device->box, new_device->box,
1509 sizeof(existing_device->box));
1510 memcpy(existing_device->phys_connector, new_device->phys_connector,
1511 sizeof(existing_device->phys_connector));
1512 existing_device->offload_configured = new_device->offload_configured;
1513 existing_device->offload_enabled = false;
1514 existing_device->offload_enabled_pending =
1515 new_device->offload_enabled_pending;
1516 existing_device->offload_to_mirror = 0;
1517 kfree(existing_device->raid_map);
1518 existing_device->raid_map = new_device->raid_map;
1519
1520 /* To prevent this from being freed later. */
1521 new_device->raid_map = NULL;
1522}
1523
1524static inline void pqi_free_device(struct pqi_scsi_dev *device)
1525{
1526 if (device) {
1527 kfree(device->raid_map);
1528 kfree(device);
1529 }
1530}
1531
1532/*
1533 * Called when exposing a new device to the OS fails in order to re-adjust
1534 * our internal SCSI device list to match the SCSI ML's view.
1535 */
1536
1537static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1538 struct pqi_scsi_dev *device)
1539{
1540 unsigned long flags;
1541
1542 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1543 list_del(&device->scsi_device_list_entry);
1544 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1545
1546 /* Allow the device structure to be freed later. */
1547 device->keep_device = false;
1548}
1549
1550static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1551 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1552{
1553 int rc;
1554 unsigned int i;
1555 unsigned long flags;
1556 enum pqi_find_result find_result;
1557 struct pqi_scsi_dev *device;
1558 struct pqi_scsi_dev *next;
1559 struct pqi_scsi_dev *matching_device;
1560 struct list_head add_list;
1561 struct list_head delete_list;
1562
1563 INIT_LIST_HEAD(&add_list);
1564 INIT_LIST_HEAD(&delete_list);
1565
1566 /*
1567 * The idea here is to do as little work as possible while holding the
1568 * spinlock. That's why we go to great pains to defer anything other
1569 * than updating the internal device list until after we release the
1570 * spinlock.
1571 */
1572
1573 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1574
1575 /* Assume that all devices in the existing list have gone away. */
1576 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1577 scsi_device_list_entry)
1578 device->device_gone = true;
1579
1580 for (i = 0; i < num_new_devices; i++) {
1581 device = new_device_list[i];
1582
1583 find_result = pqi_scsi_find_entry(ctrl_info, device,
1584 &matching_device);
1585
1586 switch (find_result) {
1587 case DEVICE_SAME:
1588 /*
1589 * The newly found device is already in the existing
1590 * device list.
1591 */
1592 device->new_device = false;
1593 matching_device->device_gone = false;
1594 pqi_scsi_update_device(matching_device, device);
1595 break;
1596 case DEVICE_NOT_FOUND:
1597 /*
1598 * The newly found device is NOT in the existing device
1599 * list.
1600 */
1601 device->new_device = true;
1602 break;
1603 case DEVICE_CHANGED:
1604 /*
1605 * The original device has gone away and we need to add
1606 * the new device.
1607 */
1608 device->new_device = true;
1609 break;
1610 default:
1611 WARN_ON(find_result);
1612 break;
1613 }
1614 }
1615
1616 /* Process all devices that have gone away. */
1617 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1618 scsi_device_list_entry) {
1619 if (device->device_gone) {
1620 list_del(&device->scsi_device_list_entry);
1621 list_add_tail(&device->delete_list_entry, &delete_list);
1622 }
1623 }
1624
1625 /* Process all new devices. */
1626 for (i = 0; i < num_new_devices; i++) {
1627 device = new_device_list[i];
1628 if (!device->new_device)
1629 continue;
1630 if (device->volume_offline)
1631 continue;
1632 list_add_tail(&device->scsi_device_list_entry,
1633 &ctrl_info->scsi_device_list);
1634 list_add_tail(&device->add_list_entry, &add_list);
1635 /* To prevent this device structure from being freed later. */
1636 device->keep_device = true;
1637 }
1638
1639 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1640
1641 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1642 scsi_device_list_entry)
1643 device->offload_enabled =
1644 device->offload_enabled_pending;
1645
1646 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1647
1648 /* Remove all devices that have gone away. */
1649 list_for_each_entry_safe(device, next, &delete_list,
1650 delete_list_entry) {
1651 if (device->sdev)
1652 pqi_remove_device(ctrl_info, device);
1653 if (device->volume_offline) {
1654 pqi_dev_info(ctrl_info, "offline", device);
1655 pqi_show_volume_status(ctrl_info, device);
1656 } else {
1657 pqi_dev_info(ctrl_info, "removed", device);
1658 }
1659 list_del(&device->delete_list_entry);
1660 pqi_free_device(device);
1661 }
1662
1663 /*
1664 * Notify the SCSI ML if the queue depth of any existing device has
1665 * changed.
1666 */
1667 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1668 scsi_device_list_entry) {
1669 if (device->sdev && device->queue_depth !=
1670 device->advertised_queue_depth) {
1671 device->advertised_queue_depth = device->queue_depth;
1672 scsi_change_queue_depth(device->sdev,
1673 device->advertised_queue_depth);
1674 }
1675 }
1676
1677 /* Expose any new devices. */
1678 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1679 if (device->expose_device && !device->sdev) {
1680 rc = pqi_add_device(ctrl_info, device);
1681 if (rc) {
1682 dev_warn(&ctrl_info->pci_dev->dev,
1683 "scsi %d:%d:%d:%d addition failed, device not added\n",
1684 ctrl_info->scsi_host->host_no,
1685 device->bus, device->target,
1686 device->lun);
1687 pqi_fixup_botched_add(ctrl_info, device);
1688 continue;
1689 }
1690 }
1691 pqi_dev_info(ctrl_info, "added", device);
1692 }
1693}
1694
1695static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1696{
1697 bool is_supported = false;
1698
1699 switch (device->devtype) {
1700 case TYPE_DISK:
1701 case TYPE_ZBC:
1702 case TYPE_TAPE:
1703 case TYPE_MEDIUM_CHANGER:
1704 case TYPE_ENCLOSURE:
1705 is_supported = true;
1706 break;
1707 case TYPE_RAID:
1708 /*
1709 * Only support the HBA controller itself as a RAID
1710 * controller. If it's a RAID controller other than
1711 * the HBA itself (an external RAID controller, MSA500
1712 * or similar), we don't support it.
1713 */
1714 if (pqi_is_hba_lunid(device->scsi3addr))
1715 is_supported = true;
1716 break;
1717 }
1718
1719 return is_supported;
1720}
1721
1722static inline bool pqi_skip_device(u8 *scsi3addr,
1723 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1724{
1725 u8 device_flags;
1726
1727 if (!MASKED_DEVICE(scsi3addr))
1728 return false;
1729
1730 /* The device is masked. */
1731
1732 device_flags = phys_lun_ext_entry->device_flags;
1733
1734 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1735 /*
1736 * It's a non-disk device. We ignore all devices of this type
1737 * when they're masked.
1738 */
1739 return true;
1740 }
1741
1742 return false;
1743}
1744
1745static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1746{
1747 /* Expose all devices except for physical devices that are masked. */
1748 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1749 return false;
1750
1751 return true;
1752}
1753
1754static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1755{
1756 int i;
1757 int rc;
1758 struct list_head new_device_list_head;
1759 struct report_phys_lun_extended *physdev_list = NULL;
1760 struct report_log_lun_extended *logdev_list = NULL;
1761 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1762 struct report_log_lun_extended_entry *log_lun_ext_entry;
1763 struct bmic_identify_physical_device *id_phys = NULL;
1764 u32 num_physicals;
1765 u32 num_logicals;
1766 struct pqi_scsi_dev **new_device_list = NULL;
1767 struct pqi_scsi_dev *device;
1768 struct pqi_scsi_dev *next;
1769 unsigned int num_new_devices;
1770 unsigned int num_valid_devices;
1771 bool is_physical_device;
1772 u8 *scsi3addr;
1773 static char *out_of_memory_msg =
1774 "out of memory, device discovery stopped";
1775
1776 INIT_LIST_HEAD(&new_device_list_head);
1777
1778 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1779 if (rc)
1780 goto out;
1781
1782 if (physdev_list)
1783 num_physicals =
1784 get_unaligned_be32(&physdev_list->header.list_length)
1785 / sizeof(physdev_list->lun_entries[0]);
1786 else
1787 num_physicals = 0;
1788
1789 if (logdev_list)
1790 num_logicals =
1791 get_unaligned_be32(&logdev_list->header.list_length)
1792 / sizeof(logdev_list->lun_entries[0]);
1793 else
1794 num_logicals = 0;
1795
1796 if (num_physicals) {
1797 /*
1798 * We need this buffer for calls to pqi_get_physical_disk_info()
1799 * below. We allocate it here instead of inside
1800 * pqi_get_physical_disk_info() because it's a fairly large
1801 * buffer.
1802 */
1803 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1804 if (!id_phys) {
1805 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1806 out_of_memory_msg);
1807 rc = -ENOMEM;
1808 goto out;
1809 }
1810 }
1811
1812 num_new_devices = num_physicals + num_logicals;
1813
1814 new_device_list = kmalloc(sizeof(*new_device_list) *
1815 num_new_devices, GFP_KERNEL);
1816 if (!new_device_list) {
1817 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1818 rc = -ENOMEM;
1819 goto out;
1820 }
1821
1822 for (i = 0; i < num_new_devices; i++) {
1823 device = kzalloc(sizeof(*device), GFP_KERNEL);
1824 if (!device) {
1825 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1826 out_of_memory_msg);
1827 rc = -ENOMEM;
1828 goto out;
1829 }
1830 list_add_tail(&device->new_device_list_entry,
1831 &new_device_list_head);
1832 }
1833
1834 device = NULL;
1835 num_valid_devices = 0;
1836
1837 for (i = 0; i < num_new_devices; i++) {
1838
1839 if (i < num_physicals) {
1840 is_physical_device = true;
1841 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1842 log_lun_ext_entry = NULL;
1843 scsi3addr = phys_lun_ext_entry->lunid;
1844 } else {
1845 is_physical_device = false;
1846 phys_lun_ext_entry = NULL;
1847 log_lun_ext_entry =
1848 &logdev_list->lun_entries[i - num_physicals];
1849 scsi3addr = log_lun_ext_entry->lunid;
1850 }
1851
1852 if (is_physical_device &&
1853 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1854 continue;
1855
1856 if (device)
1857 device = list_next_entry(device, new_device_list_entry);
1858 else
1859 device = list_first_entry(&new_device_list_head,
1860 struct pqi_scsi_dev, new_device_list_entry);
1861
1862 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1863 device->is_physical_device = is_physical_device;
1864 device->raid_level = SA_RAID_UNKNOWN;
1865
1866 /* Gather information about the device. */
1867 rc = pqi_get_device_info(ctrl_info, device);
1868 if (rc == -ENOMEM) {
1869 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1870 out_of_memory_msg);
1871 goto out;
1872 }
1873 if (rc) {
1874 dev_warn(&ctrl_info->pci_dev->dev,
1875 "obtaining device info failed, skipping device %016llx\n",
1876 get_unaligned_be64(device->scsi3addr));
1877 rc = 0;
1878 continue;
1879 }
1880
1881 if (!pqi_is_supported_device(device))
1882 continue;
1883
1884 pqi_assign_bus_target_lun(device);
1885
1886 device->expose_device = pqi_expose_device(device);
1887
1888 if (device->is_physical_device) {
1889 device->wwid = phys_lun_ext_entry->wwid;
1890 if ((phys_lun_ext_entry->device_flags &
1891 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1892 phys_lun_ext_entry->aio_handle)
1893 device->aio_enabled = true;
1894 } else {
1895 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1896 sizeof(device->volume_id));
1897 }
1898
1899 switch (device->devtype) {
1900 case TYPE_DISK:
1901 case TYPE_ZBC:
1902 case TYPE_ENCLOSURE:
1903 if (device->is_physical_device) {
1904 device->sas_address =
1905 get_unaligned_be64(&device->wwid);
1906 if (device->devtype == TYPE_DISK ||
1907 device->devtype == TYPE_ZBC) {
1908 device->aio_handle =
1909 phys_lun_ext_entry->aio_handle;
1910 pqi_get_physical_disk_info(ctrl_info,
1911 device, id_phys);
1912 }
1913 }
1914 break;
1915 }
1916
1917 new_device_list[num_valid_devices++] = device;
1918 }
1919
1920 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1921
1922out:
1923 list_for_each_entry_safe(device, next, &new_device_list_head,
1924 new_device_list_entry) {
1925 if (device->keep_device)
1926 continue;
1927 list_del(&device->new_device_list_entry);
1928 pqi_free_device(device);
1929 }
1930
1931 kfree(new_device_list);
1932 kfree(physdev_list);
1933 kfree(logdev_list);
1934 kfree(id_phys);
1935
1936 return rc;
1937}
1938
1939static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1940{
1941 unsigned long flags;
1942 struct pqi_scsi_dev *device;
6c223761 1943
a37ef745
KB
1944 while (1) {
1945 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1946
1947 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1948 struct pqi_scsi_dev, scsi_device_list_entry);
1949 if (device)
1950 list_del(&device->scsi_device_list_entry);
1951
1952 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1953 flags);
1954
1955 if (!device)
1956 break;
6c223761 1957
6c223761
KB
1958 if (device->sdev)
1959 pqi_remove_device(ctrl_info, device);
6c223761
KB
1960 pqi_free_device(device);
1961 }
6c223761
KB
1962}
1963
1964static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1965{
1966 int rc;
1967
1968 if (pqi_ctrl_offline(ctrl_info))
1969 return -ENXIO;
1970
1971 mutex_lock(&ctrl_info->scan_mutex);
1972
1973 rc = pqi_update_scsi_devices(ctrl_info);
1974 if (rc)
1975 pqi_schedule_rescan_worker(ctrl_info);
1976
1977 mutex_unlock(&ctrl_info->scan_mutex);
1978
1979 return rc;
1980}
1981
1982static void pqi_scan_start(struct Scsi_Host *shost)
1983{
1984 pqi_scan_scsi_devices(shost_to_hba(shost));
1985}
1986
1987/* Returns TRUE if scan is finished. */
1988
1989static int pqi_scan_finished(struct Scsi_Host *shost,
1990 unsigned long elapsed_time)
1991{
1992 struct pqi_ctrl_info *ctrl_info;
1993
1994 ctrl_info = shost_priv(shost);
1995
1996 return !mutex_is_locked(&ctrl_info->scan_mutex);
1997}
1998
061ef06a
KB
1999static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2000{
2001 mutex_lock(&ctrl_info->scan_mutex);
2002 mutex_unlock(&ctrl_info->scan_mutex);
2003}
2004
2005static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2006{
2007 mutex_lock(&ctrl_info->lun_reset_mutex);
2008 mutex_unlock(&ctrl_info->lun_reset_mutex);
2009}
2010
6c223761
KB
2011static inline void pqi_set_encryption_info(
2012 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2013 u64 first_block)
2014{
2015 u32 volume_blk_size;
2016
2017 /*
2018 * Set the encryption tweak values based on logical block address.
2019 * If the block size is 512, the tweak value is equal to the LBA.
2020 * For other block sizes, tweak value is (LBA * block size) / 512.
2021 */
2022 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2023 if (volume_blk_size != 512)
2024 first_block = (first_block * volume_blk_size) / 512;
2025
2026 encryption_info->data_encryption_key_index =
2027 get_unaligned_le16(&raid_map->data_encryption_key_index);
2028 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2029 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2030}
2031
2032/*
2033 * Attempt to perform offload RAID mapping for a logical volume I/O.
2034 */
2035
2036#define PQI_RAID_BYPASS_INELIGIBLE 1
2037
2038static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2039 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2040 struct pqi_queue_group *queue_group)
2041{
2042 struct raid_map *raid_map;
2043 bool is_write = false;
2044 u32 map_index;
2045 u64 first_block;
2046 u64 last_block;
2047 u32 block_cnt;
2048 u32 blocks_per_row;
2049 u64 first_row;
2050 u64 last_row;
2051 u32 first_row_offset;
2052 u32 last_row_offset;
2053 u32 first_column;
2054 u32 last_column;
2055 u64 r0_first_row;
2056 u64 r0_last_row;
2057 u32 r5or6_blocks_per_row;
2058 u64 r5or6_first_row;
2059 u64 r5or6_last_row;
2060 u32 r5or6_first_row_offset;
2061 u32 r5or6_last_row_offset;
2062 u32 r5or6_first_column;
2063 u32 r5or6_last_column;
2064 u16 data_disks_per_row;
2065 u32 total_disks_per_row;
2066 u16 layout_map_count;
2067 u32 stripesize;
2068 u16 strip_size;
2069 u32 first_group;
2070 u32 last_group;
2071 u32 current_group;
2072 u32 map_row;
2073 u32 aio_handle;
2074 u64 disk_block;
2075 u32 disk_block_cnt;
2076 u8 cdb[16];
2077 u8 cdb_length;
2078 int offload_to_mirror;
2079 struct pqi_encryption_info *encryption_info_ptr;
2080 struct pqi_encryption_info encryption_info;
2081#if BITS_PER_LONG == 32
2082 u64 tmpdiv;
2083#endif
2084
2085 /* Check for valid opcode, get LBA and block count. */
2086 switch (scmd->cmnd[0]) {
2087 case WRITE_6:
2088 is_write = true;
2089 /* fall through */
2090 case READ_6:
e018ef57
B
2091 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2092 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
6c223761
KB
2093 block_cnt = (u32)scmd->cmnd[4];
2094 if (block_cnt == 0)
2095 block_cnt = 256;
2096 break;
2097 case WRITE_10:
2098 is_write = true;
2099 /* fall through */
2100 case READ_10:
2101 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2102 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2103 break;
2104 case WRITE_12:
2105 is_write = true;
2106 /* fall through */
2107 case READ_12:
2108 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2109 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2110 break;
2111 case WRITE_16:
2112 is_write = true;
2113 /* fall through */
2114 case READ_16:
2115 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2116 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2117 break;
2118 default:
2119 /* Process via normal I/O path. */
2120 return PQI_RAID_BYPASS_INELIGIBLE;
2121 }
2122
2123 /* Check for write to non-RAID-0. */
2124 if (is_write && device->raid_level != SA_RAID_0)
2125 return PQI_RAID_BYPASS_INELIGIBLE;
2126
2127 if (unlikely(block_cnt == 0))
2128 return PQI_RAID_BYPASS_INELIGIBLE;
2129
2130 last_block = first_block + block_cnt - 1;
2131 raid_map = device->raid_map;
2132
2133 /* Check for invalid block or wraparound. */
2134 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2135 last_block < first_block)
2136 return PQI_RAID_BYPASS_INELIGIBLE;
2137
2138 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2139 strip_size = get_unaligned_le16(&raid_map->strip_size);
2140 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2141
2142 /* Calculate stripe information for the request. */
2143 blocks_per_row = data_disks_per_row * strip_size;
2144#if BITS_PER_LONG == 32
2145 tmpdiv = first_block;
2146 do_div(tmpdiv, blocks_per_row);
2147 first_row = tmpdiv;
2148 tmpdiv = last_block;
2149 do_div(tmpdiv, blocks_per_row);
2150 last_row = tmpdiv;
2151 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2152 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2153 tmpdiv = first_row_offset;
2154 do_div(tmpdiv, strip_size);
2155 first_column = tmpdiv;
2156 tmpdiv = last_row_offset;
2157 do_div(tmpdiv, strip_size);
2158 last_column = tmpdiv;
2159#else
2160 first_row = first_block / blocks_per_row;
2161 last_row = last_block / blocks_per_row;
2162 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2163 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2164 first_column = first_row_offset / strip_size;
2165 last_column = last_row_offset / strip_size;
2166#endif
2167
2168 /* If this isn't a single row/column then give to the controller. */
2169 if (first_row != last_row || first_column != last_column)
2170 return PQI_RAID_BYPASS_INELIGIBLE;
2171
2172 /* Proceeding with driver mapping. */
2173 total_disks_per_row = data_disks_per_row +
2174 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2175 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2176 get_unaligned_le16(&raid_map->row_cnt);
2177 map_index = (map_row * total_disks_per_row) + first_column;
2178
2179 /* RAID 1 */
2180 if (device->raid_level == SA_RAID_1) {
2181 if (device->offload_to_mirror)
2182 map_index += data_disks_per_row;
2183 device->offload_to_mirror = !device->offload_to_mirror;
2184 } else if (device->raid_level == SA_RAID_ADM) {
2185 /* RAID ADM */
2186 /*
2187 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2188 * divisible by 3.
2189 */
2190 offload_to_mirror = device->offload_to_mirror;
2191 if (offload_to_mirror == 0) {
2192 /* use physical disk in the first mirrored group. */
2193 map_index %= data_disks_per_row;
2194 } else {
2195 do {
2196 /*
2197 * Determine mirror group that map_index
2198 * indicates.
2199 */
2200 current_group = map_index / data_disks_per_row;
2201
2202 if (offload_to_mirror != current_group) {
2203 if (current_group <
2204 layout_map_count - 1) {
2205 /*
2206 * Select raid index from
2207 * next group.
2208 */
2209 map_index += data_disks_per_row;
2210 current_group++;
2211 } else {
2212 /*
2213 * Select raid index from first
2214 * group.
2215 */
2216 map_index %= data_disks_per_row;
2217 current_group = 0;
2218 }
2219 }
2220 } while (offload_to_mirror != current_group);
2221 }
2222
2223 /* Set mirror group to use next time. */
2224 offload_to_mirror =
2225 (offload_to_mirror >= layout_map_count - 1) ?
2226 0 : offload_to_mirror + 1;
2227 WARN_ON(offload_to_mirror >= layout_map_count);
2228 device->offload_to_mirror = offload_to_mirror;
2229 /*
2230 * Avoid direct use of device->offload_to_mirror within this
2231 * function since multiple threads might simultaneously
2232 * increment it beyond the range of device->layout_map_count -1.
2233 */
2234 } else if ((device->raid_level == SA_RAID_5 ||
2235 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2236 /* RAID 50/60 */
2237 /* Verify first and last block are in same RAID group */
2238 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2239 stripesize = r5or6_blocks_per_row * layout_map_count;
2240#if BITS_PER_LONG == 32
2241 tmpdiv = first_block;
2242 first_group = do_div(tmpdiv, stripesize);
2243 tmpdiv = first_group;
2244 do_div(tmpdiv, r5or6_blocks_per_row);
2245 first_group = tmpdiv;
2246 tmpdiv = last_block;
2247 last_group = do_div(tmpdiv, stripesize);
2248 tmpdiv = last_group;
2249 do_div(tmpdiv, r5or6_blocks_per_row);
2250 last_group = tmpdiv;
2251#else
2252 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2253 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2254#endif
2255 if (first_group != last_group)
2256 return PQI_RAID_BYPASS_INELIGIBLE;
2257
2258 /* Verify request is in a single row of RAID 5/6 */
2259#if BITS_PER_LONG == 32
2260 tmpdiv = first_block;
2261 do_div(tmpdiv, stripesize);
2262 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2263 tmpdiv = last_block;
2264 do_div(tmpdiv, stripesize);
2265 r5or6_last_row = r0_last_row = tmpdiv;
2266#else
2267 first_row = r5or6_first_row = r0_first_row =
2268 first_block / stripesize;
2269 r5or6_last_row = r0_last_row = last_block / stripesize;
2270#endif
2271 if (r5or6_first_row != r5or6_last_row)
2272 return PQI_RAID_BYPASS_INELIGIBLE;
2273
2274 /* Verify request is in a single column */
2275#if BITS_PER_LONG == 32
2276 tmpdiv = first_block;
2277 first_row_offset = do_div(tmpdiv, stripesize);
2278 tmpdiv = first_row_offset;
2279 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2280 r5or6_first_row_offset = first_row_offset;
2281 tmpdiv = last_block;
2282 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2283 tmpdiv = r5or6_last_row_offset;
2284 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2285 tmpdiv = r5or6_first_row_offset;
2286 do_div(tmpdiv, strip_size);
2287 first_column = r5or6_first_column = tmpdiv;
2288 tmpdiv = r5or6_last_row_offset;
2289 do_div(tmpdiv, strip_size);
2290 r5or6_last_column = tmpdiv;
2291#else
2292 first_row_offset = r5or6_first_row_offset =
2293 (u32)((first_block % stripesize) %
2294 r5or6_blocks_per_row);
2295
2296 r5or6_last_row_offset =
2297 (u32)((last_block % stripesize) %
2298 r5or6_blocks_per_row);
2299
2300 first_column = r5or6_first_row_offset / strip_size;
2301 r5or6_first_column = first_column;
2302 r5or6_last_column = r5or6_last_row_offset / strip_size;
2303#endif
2304 if (r5or6_first_column != r5or6_last_column)
2305 return PQI_RAID_BYPASS_INELIGIBLE;
2306
2307 /* Request is eligible */
2308 map_row =
2309 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2310 get_unaligned_le16(&raid_map->row_cnt);
2311
2312 map_index = (first_group *
2313 (get_unaligned_le16(&raid_map->row_cnt) *
2314 total_disks_per_row)) +
2315 (map_row * total_disks_per_row) + first_column;
2316 }
2317
2318 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2319 return PQI_RAID_BYPASS_INELIGIBLE;
2320
2321 aio_handle = raid_map->disk_data[map_index].aio_handle;
2322 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2323 first_row * strip_size +
2324 (first_row_offset - first_column * strip_size);
2325 disk_block_cnt = block_cnt;
2326
2327 /* Handle differing logical/physical block sizes. */
2328 if (raid_map->phys_blk_shift) {
2329 disk_block <<= raid_map->phys_blk_shift;
2330 disk_block_cnt <<= raid_map->phys_blk_shift;
2331 }
2332
2333 if (unlikely(disk_block_cnt > 0xffff))
2334 return PQI_RAID_BYPASS_INELIGIBLE;
2335
2336 /* Build the new CDB for the physical disk I/O. */
2337 if (disk_block > 0xffffffff) {
2338 cdb[0] = is_write ? WRITE_16 : READ_16;
2339 cdb[1] = 0;
2340 put_unaligned_be64(disk_block, &cdb[2]);
2341 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2342 cdb[14] = 0;
2343 cdb[15] = 0;
2344 cdb_length = 16;
2345 } else {
2346 cdb[0] = is_write ? WRITE_10 : READ_10;
2347 cdb[1] = 0;
2348 put_unaligned_be32((u32)disk_block, &cdb[2]);
2349 cdb[6] = 0;
2350 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2351 cdb[9] = 0;
2352 cdb_length = 10;
2353 }
2354
2355 if (get_unaligned_le16(&raid_map->flags) &
2356 RAID_MAP_ENCRYPTION_ENABLED) {
2357 pqi_set_encryption_info(&encryption_info, raid_map,
2358 first_block);
2359 encryption_info_ptr = &encryption_info;
2360 } else {
2361 encryption_info_ptr = NULL;
2362 }
2363
2364 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2365 cdb, cdb_length, queue_group, encryption_info_ptr);
2366}
2367
2368#define PQI_STATUS_IDLE 0x0
2369
2370#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2371#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2372
2373#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2374#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2375#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2376#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2377#define PQI_DEVICE_STATE_ERROR 0x4
2378
2379#define PQI_MODE_READY_TIMEOUT_SECS 30
2380#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2381
2382static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2383{
2384 struct pqi_device_registers __iomem *pqi_registers;
2385 unsigned long timeout;
2386 u64 signature;
2387 u8 status;
2388
2389 pqi_registers = ctrl_info->pqi_registers;
2390 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2391
2392 while (1) {
2393 signature = readq(&pqi_registers->signature);
2394 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2395 sizeof(signature)) == 0)
2396 break;
2397 if (time_after(jiffies, timeout)) {
2398 dev_err(&ctrl_info->pci_dev->dev,
2399 "timed out waiting for PQI signature\n");
2400 return -ETIMEDOUT;
2401 }
2402 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2403 }
2404
2405 while (1) {
2406 status = readb(&pqi_registers->function_and_status_code);
2407 if (status == PQI_STATUS_IDLE)
2408 break;
2409 if (time_after(jiffies, timeout)) {
2410 dev_err(&ctrl_info->pci_dev->dev,
2411 "timed out waiting for PQI IDLE\n");
2412 return -ETIMEDOUT;
2413 }
2414 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2415 }
2416
2417 while (1) {
2418 if (readl(&pqi_registers->device_status) ==
2419 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2420 break;
2421 if (time_after(jiffies, timeout)) {
2422 dev_err(&ctrl_info->pci_dev->dev,
2423 "timed out waiting for PQI all registers ready\n");
2424 return -ETIMEDOUT;
2425 }
2426 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2427 }
2428
2429 return 0;
2430}
2431
2432static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2433{
2434 struct pqi_scsi_dev *device;
2435
2436 device = io_request->scmd->device->hostdata;
2437 device->offload_enabled = false;
2438}
2439
2440static inline void pqi_take_device_offline(struct scsi_device *sdev)
2441{
2442 struct pqi_ctrl_info *ctrl_info;
e58081a7 2443 struct pqi_scsi_dev *device;
6c223761
KB
2444
2445 if (scsi_device_online(sdev)) {
2446 scsi_device_set_state(sdev, SDEV_OFFLINE);
2447 ctrl_info = shost_to_hba(sdev->host);
2448 schedule_delayed_work(&ctrl_info->rescan_work, 0);
e58081a7
KB
2449 device = sdev->hostdata;
2450 dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2451 ctrl_info->scsi_host->host_no, device->bus,
2452 device->target, device->lun);
6c223761
KB
2453 }
2454}
2455
2456static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2457{
2458 u8 scsi_status;
2459 u8 host_byte;
2460 struct scsi_cmnd *scmd;
2461 struct pqi_raid_error_info *error_info;
2462 size_t sense_data_length;
2463 int residual_count;
2464 int xfer_count;
2465 struct scsi_sense_hdr sshdr;
2466
2467 scmd = io_request->scmd;
2468 if (!scmd)
2469 return;
2470
2471 error_info = io_request->error_info;
2472 scsi_status = error_info->status;
2473 host_byte = DID_OK;
2474
2475 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2476 xfer_count =
2477 get_unaligned_le32(&error_info->data_out_transferred);
2478 residual_count = scsi_bufflen(scmd) - xfer_count;
2479 scsi_set_resid(scmd, residual_count);
2480 if (xfer_count < scmd->underflow)
2481 host_byte = DID_SOFT_ERROR;
2482 }
2483
2484 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2485 if (sense_data_length == 0)
2486 sense_data_length =
2487 get_unaligned_le16(&error_info->response_data_length);
2488 if (sense_data_length) {
2489 if (sense_data_length > sizeof(error_info->data))
2490 sense_data_length = sizeof(error_info->data);
2491
2492 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2493 scsi_normalize_sense(error_info->data,
2494 sense_data_length, &sshdr) &&
2495 sshdr.sense_key == HARDWARE_ERROR &&
2496 sshdr.asc == 0x3e &&
2497 sshdr.ascq == 0x1) {
2498 pqi_take_device_offline(scmd->device);
2499 host_byte = DID_NO_CONNECT;
2500 }
2501
2502 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2503 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2504 memcpy(scmd->sense_buffer, error_info->data,
2505 sense_data_length);
2506 }
2507
2508 scmd->result = scsi_status;
2509 set_host_byte(scmd, host_byte);
2510}
2511
2512static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2513{
2514 u8 scsi_status;
2515 u8 host_byte;
2516 struct scsi_cmnd *scmd;
2517 struct pqi_aio_error_info *error_info;
2518 size_t sense_data_length;
2519 int residual_count;
2520 int xfer_count;
2521 bool device_offline;
2522
2523 scmd = io_request->scmd;
2524 error_info = io_request->error_info;
2525 host_byte = DID_OK;
2526 sense_data_length = 0;
2527 device_offline = false;
2528
2529 switch (error_info->service_response) {
2530 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2531 scsi_status = error_info->status;
2532 break;
2533 case PQI_AIO_SERV_RESPONSE_FAILURE:
2534 switch (error_info->status) {
2535 case PQI_AIO_STATUS_IO_ABORTED:
2536 scsi_status = SAM_STAT_TASK_ABORTED;
2537 break;
2538 case PQI_AIO_STATUS_UNDERRUN:
2539 scsi_status = SAM_STAT_GOOD;
2540 residual_count = get_unaligned_le32(
2541 &error_info->residual_count);
2542 scsi_set_resid(scmd, residual_count);
2543 xfer_count = scsi_bufflen(scmd) - residual_count;
2544 if (xfer_count < scmd->underflow)
2545 host_byte = DID_SOFT_ERROR;
2546 break;
2547 case PQI_AIO_STATUS_OVERRUN:
2548 scsi_status = SAM_STAT_GOOD;
2549 break;
2550 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2551 pqi_aio_path_disabled(io_request);
2552 scsi_status = SAM_STAT_GOOD;
2553 io_request->status = -EAGAIN;
2554 break;
2555 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2556 case PQI_AIO_STATUS_INVALID_DEVICE:
2557 device_offline = true;
2558 pqi_take_device_offline(scmd->device);
2559 host_byte = DID_NO_CONNECT;
2560 scsi_status = SAM_STAT_CHECK_CONDITION;
2561 break;
2562 case PQI_AIO_STATUS_IO_ERROR:
2563 default:
2564 scsi_status = SAM_STAT_CHECK_CONDITION;
2565 break;
2566 }
2567 break;
2568 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2569 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2570 scsi_status = SAM_STAT_GOOD;
2571 break;
2572 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2573 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2574 default:
2575 scsi_status = SAM_STAT_CHECK_CONDITION;
2576 break;
2577 }
2578
2579 if (error_info->data_present) {
2580 sense_data_length =
2581 get_unaligned_le16(&error_info->data_length);
2582 if (sense_data_length) {
2583 if (sense_data_length > sizeof(error_info->data))
2584 sense_data_length = sizeof(error_info->data);
2585 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2586 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2587 memcpy(scmd->sense_buffer, error_info->data,
2588 sense_data_length);
2589 }
2590 }
2591
2592 if (device_offline && sense_data_length == 0)
2593 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2594 0x3e, 0x1);
2595
2596 scmd->result = scsi_status;
2597 set_host_byte(scmd, host_byte);
2598}
2599
2600static void pqi_process_io_error(unsigned int iu_type,
2601 struct pqi_io_request *io_request)
2602{
2603 switch (iu_type) {
2604 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2605 pqi_process_raid_io_error(io_request);
2606 break;
2607 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2608 pqi_process_aio_io_error(io_request);
2609 break;
2610 }
2611}
2612
2613static int pqi_interpret_task_management_response(
2614 struct pqi_task_management_response *response)
2615{
2616 int rc;
2617
2618 switch (response->response_code) {
b17f0486
KB
2619 case SOP_TMF_COMPLETE:
2620 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
2621 rc = 0;
2622 break;
2623 default:
2624 rc = -EIO;
2625 break;
2626 }
2627
2628 return rc;
2629}
2630
2631static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2632 struct pqi_queue_group *queue_group)
2633{
2634 unsigned int num_responses;
2635 pqi_index_t oq_pi;
2636 pqi_index_t oq_ci;
2637 struct pqi_io_request *io_request;
2638 struct pqi_io_response *response;
2639 u16 request_id;
2640
2641 num_responses = 0;
2642 oq_ci = queue_group->oq_ci_copy;
2643
2644 while (1) {
2645 oq_pi = *queue_group->oq_pi;
2646 if (oq_pi == oq_ci)
2647 break;
2648
2649 num_responses++;
2650 response = queue_group->oq_element_array +
2651 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2652
2653 request_id = get_unaligned_le16(&response->request_id);
2654 WARN_ON(request_id >= ctrl_info->max_io_slots);
2655
2656 io_request = &ctrl_info->io_request_pool[request_id];
2657 WARN_ON(atomic_read(&io_request->refcount) == 0);
2658
2659 switch (response->header.iu_type) {
2660 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2661 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2662 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2663 break;
2664 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2665 io_request->status =
2666 pqi_interpret_task_management_response(
2667 (void *)response);
2668 break;
2669 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2670 pqi_aio_path_disabled(io_request);
2671 io_request->status = -EAGAIN;
2672 break;
2673 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2674 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2675 io_request->error_info = ctrl_info->error_buffer +
2676 (get_unaligned_le16(&response->error_index) *
2677 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2678 pqi_process_io_error(response->header.iu_type,
2679 io_request);
2680 break;
2681 default:
2682 dev_err(&ctrl_info->pci_dev->dev,
2683 "unexpected IU type: 0x%x\n",
2684 response->header.iu_type);
2685 WARN_ON(response->header.iu_type);
2686 break;
2687 }
2688
2689 io_request->io_complete_callback(io_request,
2690 io_request->context);
2691
2692 /*
2693 * Note that the I/O request structure CANNOT BE TOUCHED after
2694 * returning from the I/O completion callback!
2695 */
2696
2697 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2698 }
2699
2700 if (num_responses) {
2701 queue_group->oq_ci_copy = oq_ci;
2702 writel(oq_ci, queue_group->oq_ci);
2703 }
2704
2705 return num_responses;
2706}
2707
2708static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 2709 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
2710{
2711 unsigned int num_elements_used;
2712
2713 if (pi >= ci)
2714 num_elements_used = pi - ci;
2715 else
2716 num_elements_used = elements_in_queue - ci + pi;
2717
2718 return elements_in_queue - num_elements_used - 1;
2719}
2720
98f87667 2721static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
2722 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2723{
2724 pqi_index_t iq_pi;
2725 pqi_index_t iq_ci;
2726 unsigned long flags;
2727 void *next_element;
6c223761
KB
2728 struct pqi_queue_group *queue_group;
2729
2730 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2731 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2732
6c223761
KB
2733 while (1) {
2734 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2735
2736 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2737 iq_ci = *queue_group->iq_ci[RAID_PATH];
2738
2739 if (pqi_num_elements_free(iq_pi, iq_ci,
2740 ctrl_info->num_elements_per_iq))
2741 break;
2742
2743 spin_unlock_irqrestore(
2744 &queue_group->submit_lock[RAID_PATH], flags);
2745
98f87667 2746 if (pqi_ctrl_offline(ctrl_info))
6c223761 2747 return;
6c223761
KB
2748 }
2749
2750 next_element = queue_group->iq_element_array[RAID_PATH] +
2751 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2752
2753 memcpy(next_element, iu, iu_length);
2754
2755 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
2756 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2757
2758 /*
2759 * This write notifies the controller that an IU is available to be
2760 * processed.
2761 */
2762 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2763
2764 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
2765}
2766
2767static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2768 struct pqi_event *event)
2769{
2770 struct pqi_event_acknowledge_request request;
2771
2772 memset(&request, 0, sizeof(request));
2773
2774 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2775 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2776 &request.header.iu_length);
2777 request.event_type = event->event_type;
2778 request.event_id = event->event_id;
2779 request.additional_event_id = event->additional_event_id;
2780
98f87667 2781 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
2782}
2783
2784static void pqi_event_worker(struct work_struct *work)
2785{
2786 unsigned int i;
2787 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 2788 struct pqi_event *event;
6c223761
KB
2789
2790 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2791
7561a7e4
KB
2792 pqi_ctrl_busy(ctrl_info);
2793 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
2794
6a50d6ad 2795 event = ctrl_info->events;
6c223761 2796 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
2797 if (event->pending) {
2798 event->pending = false;
2799 pqi_acknowledge_event(ctrl_info, event);
6c223761 2800 }
6a50d6ad 2801 event++;
6c223761
KB
2802 }
2803
7561a7e4
KB
2804 pqi_ctrl_unbusy(ctrl_info);
2805
2806 pqi_schedule_rescan_worker(ctrl_info);
6c223761
KB
2807}
2808
2809static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2810{
2811 unsigned int i;
2812 unsigned int path;
2813 struct pqi_queue_group *queue_group;
2814 unsigned long flags;
2815 struct pqi_io_request *io_request;
2816 struct pqi_io_request *next;
2817 struct scsi_cmnd *scmd;
2818
2819 ctrl_info->controller_online = false;
2820 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
5b0fba0f 2821 sis_shutdown_ctrl(ctrl_info);
6c223761
KB
2822
2823 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2824 queue_group = &ctrl_info->queue_groups[i];
2825
2826 for (path = 0; path < 2; path++) {
2827 spin_lock_irqsave(
2828 &queue_group->submit_lock[path], flags);
2829
2830 list_for_each_entry_safe(io_request, next,
2831 &queue_group->request_list[path],
2832 request_list_entry) {
2833
2834 scmd = io_request->scmd;
2835 if (scmd) {
2836 set_host_byte(scmd, DID_NO_CONNECT);
2837 pqi_scsi_done(scmd);
2838 }
2839
2840 list_del(&io_request->request_list_entry);
2841 }
2842
2843 spin_unlock_irqrestore(
2844 &queue_group->submit_lock[path], flags);
2845 }
2846 }
2847}
2848
98f87667 2849#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
6c223761
KB
2850
2851static void pqi_heartbeat_timer_handler(unsigned long data)
2852{
2853 int num_interrupts;
98f87667 2854 u32 heartbeat_count;
6c223761
KB
2855 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2856
98f87667
KB
2857 pqi_check_ctrl_health(ctrl_info);
2858 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
2859 return;
2860
6c223761 2861 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 2862 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
2863
2864 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
2865 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2866 dev_err(&ctrl_info->pci_dev->dev,
2867 "no heartbeat detected - last heartbeat count: %u\n",
2868 heartbeat_count);
6c223761
KB
2869 pqi_take_ctrl_offline(ctrl_info);
2870 return;
2871 }
6c223761 2872 } else {
98f87667 2873 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
2874 }
2875
98f87667 2876 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
2877 mod_timer(&ctrl_info->heartbeat_timer,
2878 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2879}
2880
2881static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2882{
98f87667
KB
2883 if (!ctrl_info->heartbeat_counter)
2884 return;
2885
6c223761
KB
2886 ctrl_info->previous_num_interrupts =
2887 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
2888 ctrl_info->previous_heartbeat_count =
2889 pqi_read_heartbeat_counter(ctrl_info);
6c223761 2890
6c223761
KB
2891 ctrl_info->heartbeat_timer.expires =
2892 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2893 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2894 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
061ef06a 2895 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
2896}
2897
2898static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2899{
98f87667 2900 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
2901}
2902
6a50d6ad 2903static inline int pqi_event_type_to_event_index(unsigned int event_type)
6c223761
KB
2904{
2905 int index;
2906
6a50d6ad
KB
2907 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2908 if (event_type == pqi_supported_event_types[index])
2909 return index;
6c223761 2910
6a50d6ad
KB
2911 return -1;
2912}
2913
2914static inline bool pqi_is_supported_event(unsigned int event_type)
2915{
2916 return pqi_event_type_to_event_index(event_type) != -1;
6c223761
KB
2917}
2918
2919static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2920{
2921 unsigned int num_events;
2922 pqi_index_t oq_pi;
2923 pqi_index_t oq_ci;
2924 struct pqi_event_queue *event_queue;
2925 struct pqi_event_response *response;
6a50d6ad 2926 struct pqi_event *event;
6c223761
KB
2927 int event_index;
2928
2929 event_queue = &ctrl_info->event_queue;
2930 num_events = 0;
6c223761
KB
2931 oq_ci = event_queue->oq_ci_copy;
2932
2933 while (1) {
2934 oq_pi = *event_queue->oq_pi;
2935 if (oq_pi == oq_ci)
2936 break;
2937
2938 num_events++;
2939 response = event_queue->oq_element_array +
2940 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2941
2942 event_index =
2943 pqi_event_type_to_event_index(response->event_type);
2944
2945 if (event_index >= 0) {
2946 if (response->request_acknowlege) {
6a50d6ad
KB
2947 event = &ctrl_info->events[event_index];
2948 event->pending = true;
2949 event->event_type = response->event_type;
2950 event->event_id = response->event_id;
2951 event->additional_event_id =
6c223761 2952 response->additional_event_id;
6c223761
KB
2953 }
2954 }
2955
2956 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2957 }
2958
2959 if (num_events) {
2960 event_queue->oq_ci_copy = oq_ci;
2961 writel(oq_ci, event_queue->oq_ci);
98f87667 2962 schedule_work(&ctrl_info->event_work);
6c223761
KB
2963 }
2964
2965 return num_events;
2966}
2967
061ef06a
KB
2968#define PQI_LEGACY_INTX_MASK 0x1
2969
2970static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2971 bool enable_intx)
2972{
2973 u32 intx_mask;
2974 struct pqi_device_registers __iomem *pqi_registers;
2975 volatile void __iomem *register_addr;
2976
2977 pqi_registers = ctrl_info->pqi_registers;
2978
2979 if (enable_intx)
2980 register_addr = &pqi_registers->legacy_intx_mask_clear;
2981 else
2982 register_addr = &pqi_registers->legacy_intx_mask_set;
2983
2984 intx_mask = readl(register_addr);
2985 intx_mask |= PQI_LEGACY_INTX_MASK;
2986 writel(intx_mask, register_addr);
2987}
2988
2989static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
2990 enum pqi_irq_mode new_mode)
2991{
2992 switch (ctrl_info->irq_mode) {
2993 case IRQ_MODE_MSIX:
2994 switch (new_mode) {
2995 case IRQ_MODE_MSIX:
2996 break;
2997 case IRQ_MODE_INTX:
2998 pqi_configure_legacy_intx(ctrl_info, true);
2999 sis_disable_msix(ctrl_info);
3000 sis_enable_intx(ctrl_info);
3001 break;
3002 case IRQ_MODE_NONE:
3003 sis_disable_msix(ctrl_info);
3004 break;
3005 }
3006 break;
3007 case IRQ_MODE_INTX:
3008 switch (new_mode) {
3009 case IRQ_MODE_MSIX:
3010 pqi_configure_legacy_intx(ctrl_info, false);
3011 sis_disable_intx(ctrl_info);
3012 sis_enable_msix(ctrl_info);
3013 break;
3014 case IRQ_MODE_INTX:
3015 break;
3016 case IRQ_MODE_NONE:
3017 pqi_configure_legacy_intx(ctrl_info, false);
3018 sis_disable_intx(ctrl_info);
3019 break;
3020 }
3021 break;
3022 case IRQ_MODE_NONE:
3023 switch (new_mode) {
3024 case IRQ_MODE_MSIX:
3025 sis_enable_msix(ctrl_info);
3026 break;
3027 case IRQ_MODE_INTX:
3028 pqi_configure_legacy_intx(ctrl_info, true);
3029 sis_enable_intx(ctrl_info);
3030 break;
3031 case IRQ_MODE_NONE:
3032 break;
3033 }
3034 break;
3035 }
3036
3037 ctrl_info->irq_mode = new_mode;
3038}
3039
3040#define PQI_LEGACY_INTX_PENDING 0x1
3041
3042static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3043{
3044 bool valid_irq;
3045 u32 intx_status;
3046
3047 switch (ctrl_info->irq_mode) {
3048 case IRQ_MODE_MSIX:
3049 valid_irq = true;
3050 break;
3051 case IRQ_MODE_INTX:
3052 intx_status =
3053 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3054 if (intx_status & PQI_LEGACY_INTX_PENDING)
3055 valid_irq = true;
3056 else
3057 valid_irq = false;
3058 break;
3059 case IRQ_MODE_NONE:
3060 default:
3061 valid_irq = false;
3062 break;
3063 }
3064
3065 return valid_irq;
3066}
3067
6c223761
KB
3068static irqreturn_t pqi_irq_handler(int irq, void *data)
3069{
3070 struct pqi_ctrl_info *ctrl_info;
3071 struct pqi_queue_group *queue_group;
3072 unsigned int num_responses_handled;
3073
3074 queue_group = data;
3075 ctrl_info = queue_group->ctrl_info;
3076
061ef06a 3077 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
3078 return IRQ_NONE;
3079
3080 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3081
3082 if (irq == ctrl_info->event_irq)
3083 num_responses_handled += pqi_process_event_intr(ctrl_info);
3084
3085 if (num_responses_handled)
3086 atomic_inc(&ctrl_info->num_interrupts);
3087
3088 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3089 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3090
3091 return IRQ_HANDLED;
3092}
3093
3094static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3095{
52198226 3096 struct pci_dev *pdev = ctrl_info->pci_dev;
6c223761
KB
3097 int i;
3098 int rc;
3099
52198226 3100 ctrl_info->event_irq = pci_irq_vector(pdev, 0);
6c223761
KB
3101
3102 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
52198226
CH
3103 rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
3104 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 3105 if (rc) {
52198226 3106 dev_err(&pdev->dev,
6c223761 3107 "irq %u init failed with error %d\n",
52198226 3108 pci_irq_vector(pdev, i), rc);
6c223761
KB
3109 return rc;
3110 }
3111 ctrl_info->num_msix_vectors_initialized++;
3112 }
3113
3114 return 0;
3115}
3116
98bf061b
KB
3117static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3118{
3119 int i;
3120
3121 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3122 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3123 &ctrl_info->queue_groups[i]);
3124
3125 ctrl_info->num_msix_vectors_initialized = 0;
3126}
3127
6c223761
KB
3128static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3129{
98bf061b 3130 int num_vectors_enabled;
6c223761 3131
98bf061b 3132 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226
CH
3133 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3134 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
98bf061b 3135 if (num_vectors_enabled < 0) {
6c223761 3136 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
3137 "MSI-X init failed with error %d\n",
3138 num_vectors_enabled);
3139 return num_vectors_enabled;
6c223761
KB
3140 }
3141
98bf061b 3142 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 3143 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
3144 return 0;
3145}
3146
98bf061b
KB
3147static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3148{
3149 if (ctrl_info->num_msix_vectors_enabled) {
3150 pci_free_irq_vectors(ctrl_info->pci_dev);
3151 ctrl_info->num_msix_vectors_enabled = 0;
3152 }
3153}
3154
6c223761
KB
3155static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3156{
3157 unsigned int i;
3158 size_t alloc_length;
3159 size_t element_array_length_per_iq;
3160 size_t element_array_length_per_oq;
3161 void *element_array;
3162 void *next_queue_index;
3163 void *aligned_pointer;
3164 unsigned int num_inbound_queues;
3165 unsigned int num_outbound_queues;
3166 unsigned int num_queue_indexes;
3167 struct pqi_queue_group *queue_group;
3168
3169 element_array_length_per_iq =
3170 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3171 ctrl_info->num_elements_per_iq;
3172 element_array_length_per_oq =
3173 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3174 ctrl_info->num_elements_per_oq;
3175 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3176 num_outbound_queues = ctrl_info->num_queue_groups;
3177 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3178
3179 aligned_pointer = NULL;
3180
3181 for (i = 0; i < num_inbound_queues; i++) {
3182 aligned_pointer = PTR_ALIGN(aligned_pointer,
3183 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3184 aligned_pointer += element_array_length_per_iq;
3185 }
3186
3187 for (i = 0; i < num_outbound_queues; i++) {
3188 aligned_pointer = PTR_ALIGN(aligned_pointer,
3189 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3190 aligned_pointer += element_array_length_per_oq;
3191 }
3192
3193 aligned_pointer = PTR_ALIGN(aligned_pointer,
3194 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3195 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3196 PQI_EVENT_OQ_ELEMENT_LENGTH;
3197
3198 for (i = 0; i < num_queue_indexes; i++) {
3199 aligned_pointer = PTR_ALIGN(aligned_pointer,
3200 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3201 aligned_pointer += sizeof(pqi_index_t);
3202 }
3203
3204 alloc_length = (size_t)aligned_pointer +
3205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3206
e1d213bd
KB
3207 alloc_length += PQI_EXTRA_SGL_MEMORY;
3208
6c223761
KB
3209 ctrl_info->queue_memory_base =
3210 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3211 alloc_length,
3212 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3213
3214 if (!ctrl_info->queue_memory_base) {
3215 dev_err(&ctrl_info->pci_dev->dev,
98f87667 3216 "unable to allocate memory for PQI admin queues\n");
6c223761
KB
3217 return -ENOMEM;
3218 }
3219
3220 ctrl_info->queue_memory_length = alloc_length;
3221
3222 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3223 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3224
3225 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3226 queue_group = &ctrl_info->queue_groups[i];
3227 queue_group->iq_element_array[RAID_PATH] = element_array;
3228 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3229 ctrl_info->queue_memory_base_dma_handle +
3230 (element_array - ctrl_info->queue_memory_base);
3231 element_array += element_array_length_per_iq;
3232 element_array = PTR_ALIGN(element_array,
3233 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3234 queue_group->iq_element_array[AIO_PATH] = element_array;
3235 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3236 ctrl_info->queue_memory_base_dma_handle +
3237 (element_array - ctrl_info->queue_memory_base);
3238 element_array += element_array_length_per_iq;
3239 element_array = PTR_ALIGN(element_array,
3240 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3241 }
3242
3243 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3244 queue_group = &ctrl_info->queue_groups[i];
3245 queue_group->oq_element_array = element_array;
3246 queue_group->oq_element_array_bus_addr =
3247 ctrl_info->queue_memory_base_dma_handle +
3248 (element_array - ctrl_info->queue_memory_base);
3249 element_array += element_array_length_per_oq;
3250 element_array = PTR_ALIGN(element_array,
3251 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3252 }
3253
3254 ctrl_info->event_queue.oq_element_array = element_array;
3255 ctrl_info->event_queue.oq_element_array_bus_addr =
3256 ctrl_info->queue_memory_base_dma_handle +
3257 (element_array - ctrl_info->queue_memory_base);
3258 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3259 PQI_EVENT_OQ_ELEMENT_LENGTH;
3260
3261 next_queue_index = PTR_ALIGN(element_array,
3262 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3263
3264 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3265 queue_group = &ctrl_info->queue_groups[i];
3266 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3267 queue_group->iq_ci_bus_addr[RAID_PATH] =
3268 ctrl_info->queue_memory_base_dma_handle +
3269 (next_queue_index - ctrl_info->queue_memory_base);
3270 next_queue_index += sizeof(pqi_index_t);
3271 next_queue_index = PTR_ALIGN(next_queue_index,
3272 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3273 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3274 queue_group->iq_ci_bus_addr[AIO_PATH] =
3275 ctrl_info->queue_memory_base_dma_handle +
3276 (next_queue_index - ctrl_info->queue_memory_base);
3277 next_queue_index += sizeof(pqi_index_t);
3278 next_queue_index = PTR_ALIGN(next_queue_index,
3279 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3280 queue_group->oq_pi = next_queue_index;
3281 queue_group->oq_pi_bus_addr =
3282 ctrl_info->queue_memory_base_dma_handle +
3283 (next_queue_index - ctrl_info->queue_memory_base);
3284 next_queue_index += sizeof(pqi_index_t);
3285 next_queue_index = PTR_ALIGN(next_queue_index,
3286 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3287 }
3288
3289 ctrl_info->event_queue.oq_pi = next_queue_index;
3290 ctrl_info->event_queue.oq_pi_bus_addr =
3291 ctrl_info->queue_memory_base_dma_handle +
3292 (next_queue_index - ctrl_info->queue_memory_base);
3293
3294 return 0;
3295}
3296
3297static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3298{
3299 unsigned int i;
3300 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3301 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3302
3303 /*
3304 * Initialize the backpointers to the controller structure in
3305 * each operational queue group structure.
3306 */
3307 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3308 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3309
3310 /*
3311 * Assign IDs to all operational queues. Note that the IDs
3312 * assigned to operational IQs are independent of the IDs
3313 * assigned to operational OQs.
3314 */
3315 ctrl_info->event_queue.oq_id = next_oq_id++;
3316 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3317 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3318 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3319 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3320 }
3321
3322 /*
3323 * Assign MSI-X table entry indexes to all queues. Note that the
3324 * interrupt for the event queue is shared with the first queue group.
3325 */
3326 ctrl_info->event_queue.int_msg_num = 0;
3327 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3328 ctrl_info->queue_groups[i].int_msg_num = i;
3329
3330 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3331 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3332 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3333 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3334 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3335 }
3336}
3337
3338static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3339{
3340 size_t alloc_length;
3341 struct pqi_admin_queues_aligned *admin_queues_aligned;
3342 struct pqi_admin_queues *admin_queues;
3343
3344 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3345 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3346
3347 ctrl_info->admin_queue_memory_base =
3348 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3349 alloc_length,
3350 &ctrl_info->admin_queue_memory_base_dma_handle,
3351 GFP_KERNEL);
3352
3353 if (!ctrl_info->admin_queue_memory_base)
3354 return -ENOMEM;
3355
3356 ctrl_info->admin_queue_memory_length = alloc_length;
3357
3358 admin_queues = &ctrl_info->admin_queues;
3359 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3360 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3361 admin_queues->iq_element_array =
3362 &admin_queues_aligned->iq_element_array;
3363 admin_queues->oq_element_array =
3364 &admin_queues_aligned->oq_element_array;
3365 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3366 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3367
3368 admin_queues->iq_element_array_bus_addr =
3369 ctrl_info->admin_queue_memory_base_dma_handle +
3370 (admin_queues->iq_element_array -
3371 ctrl_info->admin_queue_memory_base);
3372 admin_queues->oq_element_array_bus_addr =
3373 ctrl_info->admin_queue_memory_base_dma_handle +
3374 (admin_queues->oq_element_array -
3375 ctrl_info->admin_queue_memory_base);
3376 admin_queues->iq_ci_bus_addr =
3377 ctrl_info->admin_queue_memory_base_dma_handle +
3378 ((void *)admin_queues->iq_ci -
3379 ctrl_info->admin_queue_memory_base);
3380 admin_queues->oq_pi_bus_addr =
3381 ctrl_info->admin_queue_memory_base_dma_handle +
3382 ((void *)admin_queues->oq_pi -
3383 ctrl_info->admin_queue_memory_base);
3384
3385 return 0;
3386}
3387
3388#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3389#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3390
3391static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3392{
3393 struct pqi_device_registers __iomem *pqi_registers;
3394 struct pqi_admin_queues *admin_queues;
3395 unsigned long timeout;
3396 u8 status;
3397 u32 reg;
3398
3399 pqi_registers = ctrl_info->pqi_registers;
3400 admin_queues = &ctrl_info->admin_queues;
3401
3402 writeq((u64)admin_queues->iq_element_array_bus_addr,
3403 &pqi_registers->admin_iq_element_array_addr);
3404 writeq((u64)admin_queues->oq_element_array_bus_addr,
3405 &pqi_registers->admin_oq_element_array_addr);
3406 writeq((u64)admin_queues->iq_ci_bus_addr,
3407 &pqi_registers->admin_iq_ci_addr);
3408 writeq((u64)admin_queues->oq_pi_bus_addr,
3409 &pqi_registers->admin_oq_pi_addr);
3410
3411 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3412 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3413 (admin_queues->int_msg_num << 16);
3414 writel(reg, &pqi_registers->admin_iq_num_elements);
3415 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3416 &pqi_registers->function_and_status_code);
3417
3418 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3419 while (1) {
3420 status = readb(&pqi_registers->function_and_status_code);
3421 if (status == PQI_STATUS_IDLE)
3422 break;
3423 if (time_after(jiffies, timeout))
3424 return -ETIMEDOUT;
3425 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3426 }
3427
3428 /*
3429 * The offset registers are not initialized to the correct
3430 * offsets until *after* the create admin queue pair command
3431 * completes successfully.
3432 */
3433 admin_queues->iq_pi = ctrl_info->iomem_base +
3434 PQI_DEVICE_REGISTERS_OFFSET +
3435 readq(&pqi_registers->admin_iq_pi_offset);
3436 admin_queues->oq_ci = ctrl_info->iomem_base +
3437 PQI_DEVICE_REGISTERS_OFFSET +
3438 readq(&pqi_registers->admin_oq_ci_offset);
3439
3440 return 0;
3441}
3442
3443static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3444 struct pqi_general_admin_request *request)
3445{
3446 struct pqi_admin_queues *admin_queues;
3447 void *next_element;
3448 pqi_index_t iq_pi;
3449
3450 admin_queues = &ctrl_info->admin_queues;
3451 iq_pi = admin_queues->iq_pi_copy;
3452
3453 next_element = admin_queues->iq_element_array +
3454 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3455
3456 memcpy(next_element, request, sizeof(*request));
3457
3458 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3459 admin_queues->iq_pi_copy = iq_pi;
3460
3461 /*
3462 * This write notifies the controller that an IU is available to be
3463 * processed.
3464 */
3465 writel(iq_pi, admin_queues->iq_pi);
3466}
3467
3468static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3469 struct pqi_general_admin_response *response)
3470{
3471 struct pqi_admin_queues *admin_queues;
3472 pqi_index_t oq_pi;
3473 pqi_index_t oq_ci;
3474 unsigned long timeout;
3475
3476 admin_queues = &ctrl_info->admin_queues;
3477 oq_ci = admin_queues->oq_ci_copy;
3478
3479 timeout = (3 * HZ) + jiffies;
3480
3481 while (1) {
3482 oq_pi = *admin_queues->oq_pi;
3483 if (oq_pi != oq_ci)
3484 break;
3485 if (time_after(jiffies, timeout)) {
3486 dev_err(&ctrl_info->pci_dev->dev,
3487 "timed out waiting for admin response\n");
3488 return -ETIMEDOUT;
3489 }
3490 usleep_range(1000, 2000);
3491 }
3492
3493 memcpy(response, admin_queues->oq_element_array +
3494 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3495
3496 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3497 admin_queues->oq_ci_copy = oq_ci;
3498 writel(oq_ci, admin_queues->oq_ci);
3499
3500 return 0;
3501}
3502
3503static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3504 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3505 struct pqi_io_request *io_request)
3506{
3507 struct pqi_io_request *next;
3508 void *next_element;
3509 pqi_index_t iq_pi;
3510 pqi_index_t iq_ci;
3511 size_t iu_length;
3512 unsigned long flags;
3513 unsigned int num_elements_needed;
3514 unsigned int num_elements_to_end_of_queue;
3515 size_t copy_count;
3516 struct pqi_iu_header *request;
3517
3518 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3519
3520 if (io_request)
3521 list_add_tail(&io_request->request_list_entry,
3522 &queue_group->request_list[path]);
3523
3524 iq_pi = queue_group->iq_pi_copy[path];
3525
3526 list_for_each_entry_safe(io_request, next,
3527 &queue_group->request_list[path], request_list_entry) {
3528
3529 request = io_request->iu;
3530
3531 iu_length = get_unaligned_le16(&request->iu_length) +
3532 PQI_REQUEST_HEADER_LENGTH;
3533 num_elements_needed =
3534 DIV_ROUND_UP(iu_length,
3535 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3536
3537 iq_ci = *queue_group->iq_ci[path];
3538
3539 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3540 ctrl_info->num_elements_per_iq))
3541 break;
3542
3543 put_unaligned_le16(queue_group->oq_id,
3544 &request->response_queue_id);
3545
3546 next_element = queue_group->iq_element_array[path] +
3547 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3548
3549 num_elements_to_end_of_queue =
3550 ctrl_info->num_elements_per_iq - iq_pi;
3551
3552 if (num_elements_needed <= num_elements_to_end_of_queue) {
3553 memcpy(next_element, request, iu_length);
3554 } else {
3555 copy_count = num_elements_to_end_of_queue *
3556 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3557 memcpy(next_element, request, copy_count);
3558 memcpy(queue_group->iq_element_array[path],
3559 (u8 *)request + copy_count,
3560 iu_length - copy_count);
3561 }
3562
3563 iq_pi = (iq_pi + num_elements_needed) %
3564 ctrl_info->num_elements_per_iq;
3565
3566 list_del(&io_request->request_list_entry);
3567 }
3568
3569 if (iq_pi != queue_group->iq_pi_copy[path]) {
3570 queue_group->iq_pi_copy[path] = iq_pi;
3571 /*
3572 * This write notifies the controller that one or more IUs are
3573 * available to be processed.
3574 */
3575 writel(iq_pi, queue_group->iq_pi[path]);
3576 }
3577
3578 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3579}
3580
3581static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3582 void *context)
3583{
3584 struct completion *waiting = context;
3585
3586 complete(waiting);
3587}
3588
3589static int pqi_submit_raid_request_synchronous_with_io_request(
3590 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3591 unsigned long timeout_msecs)
3592{
3593 int rc = 0;
3594 DECLARE_COMPLETION_ONSTACK(wait);
3595
3596 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3597 io_request->context = &wait;
3598
3599 pqi_start_io(ctrl_info,
3600 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3601 io_request);
3602
3603 if (timeout_msecs == NO_TIMEOUT) {
3604 wait_for_completion_io(&wait);
3605 } else {
3606 if (!wait_for_completion_io_timeout(&wait,
3607 msecs_to_jiffies(timeout_msecs))) {
3608 dev_warn(&ctrl_info->pci_dev->dev,
3609 "command timed out\n");
3610 rc = -ETIMEDOUT;
3611 }
3612 }
3613
3614 return rc;
3615}
3616
3617static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3618 struct pqi_iu_header *request, unsigned int flags,
3619 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3620{
3621 int rc;
3622 struct pqi_io_request *io_request;
3623 unsigned long start_jiffies;
3624 unsigned long msecs_blocked;
3625 size_t iu_length;
3626
3627 /*
3628 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3629 * are mutually exclusive.
3630 */
3631
3632 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3633 if (down_interruptible(&ctrl_info->sync_request_sem))
3634 return -ERESTARTSYS;
3635 } else {
3636 if (timeout_msecs == NO_TIMEOUT) {
3637 down(&ctrl_info->sync_request_sem);
3638 } else {
3639 start_jiffies = jiffies;
3640 if (down_timeout(&ctrl_info->sync_request_sem,
3641 msecs_to_jiffies(timeout_msecs)))
3642 return -ETIMEDOUT;
3643 msecs_blocked =
3644 jiffies_to_msecs(jiffies - start_jiffies);
3645 if (msecs_blocked >= timeout_msecs)
3646 return -ETIMEDOUT;
3647 timeout_msecs -= msecs_blocked;
3648 }
3649 }
3650
7561a7e4
KB
3651 pqi_ctrl_busy(ctrl_info);
3652 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3653 if (timeout_msecs == 0) {
3654 rc = -ETIMEDOUT;
3655 goto out;
3656 }
3657
6c223761
KB
3658 io_request = pqi_alloc_io_request(ctrl_info);
3659
3660 put_unaligned_le16(io_request->index,
3661 &(((struct pqi_raid_path_request *)request)->request_id));
3662
3663 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3664 ((struct pqi_raid_path_request *)request)->error_index =
3665 ((struct pqi_raid_path_request *)request)->request_id;
3666
3667 iu_length = get_unaligned_le16(&request->iu_length) +
3668 PQI_REQUEST_HEADER_LENGTH;
3669 memcpy(io_request->iu, request, iu_length);
3670
3671 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3672 io_request, timeout_msecs);
3673
3674 if (error_info) {
3675 if (io_request->error_info)
3676 memcpy(error_info, io_request->error_info,
3677 sizeof(*error_info));
3678 else
3679 memset(error_info, 0, sizeof(*error_info));
3680 } else if (rc == 0 && io_request->error_info) {
3681 u8 scsi_status;
3682 struct pqi_raid_error_info *raid_error_info;
3683
3684 raid_error_info = io_request->error_info;
3685 scsi_status = raid_error_info->status;
3686
3687 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3688 raid_error_info->data_out_result ==
3689 PQI_DATA_IN_OUT_UNDERFLOW)
3690 scsi_status = SAM_STAT_GOOD;
3691
3692 if (scsi_status != SAM_STAT_GOOD)
3693 rc = -EIO;
3694 }
3695
3696 pqi_free_io_request(io_request);
3697
7561a7e4
KB
3698out:
3699 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
3700 up(&ctrl_info->sync_request_sem);
3701
3702 return rc;
3703}
3704
3705static int pqi_validate_admin_response(
3706 struct pqi_general_admin_response *response, u8 expected_function_code)
3707{
3708 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3709 return -EINVAL;
3710
3711 if (get_unaligned_le16(&response->header.iu_length) !=
3712 PQI_GENERAL_ADMIN_IU_LENGTH)
3713 return -EINVAL;
3714
3715 if (response->function_code != expected_function_code)
3716 return -EINVAL;
3717
3718 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3719 return -EINVAL;
3720
3721 return 0;
3722}
3723
3724static int pqi_submit_admin_request_synchronous(
3725 struct pqi_ctrl_info *ctrl_info,
3726 struct pqi_general_admin_request *request,
3727 struct pqi_general_admin_response *response)
3728{
3729 int rc;
3730
3731 pqi_submit_admin_request(ctrl_info, request);
3732
3733 rc = pqi_poll_for_admin_response(ctrl_info, response);
3734
3735 if (rc == 0)
3736 rc = pqi_validate_admin_response(response,
3737 request->function_code);
3738
3739 return rc;
3740}
3741
3742static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3743{
3744 int rc;
3745 struct pqi_general_admin_request request;
3746 struct pqi_general_admin_response response;
3747 struct pqi_device_capability *capability;
3748 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3749
3750 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3751 if (!capability)
3752 return -ENOMEM;
3753
3754 memset(&request, 0, sizeof(request));
3755
3756 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3757 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3758 &request.header.iu_length);
3759 request.function_code =
3760 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3761 put_unaligned_le32(sizeof(*capability),
3762 &request.data.report_device_capability.buffer_length);
3763
3764 rc = pqi_map_single(ctrl_info->pci_dev,
3765 &request.data.report_device_capability.sg_descriptor,
3766 capability, sizeof(*capability),
3767 PCI_DMA_FROMDEVICE);
3768 if (rc)
3769 goto out;
3770
3771 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3772 &response);
3773
3774 pqi_pci_unmap(ctrl_info->pci_dev,
3775 &request.data.report_device_capability.sg_descriptor, 1,
3776 PCI_DMA_FROMDEVICE);
3777
3778 if (rc)
3779 goto out;
3780
3781 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3782 rc = -EIO;
3783 goto out;
3784 }
3785
3786 ctrl_info->max_inbound_queues =
3787 get_unaligned_le16(&capability->max_inbound_queues);
3788 ctrl_info->max_elements_per_iq =
3789 get_unaligned_le16(&capability->max_elements_per_iq);
3790 ctrl_info->max_iq_element_length =
3791 get_unaligned_le16(&capability->max_iq_element_length)
3792 * 16;
3793 ctrl_info->max_outbound_queues =
3794 get_unaligned_le16(&capability->max_outbound_queues);
3795 ctrl_info->max_elements_per_oq =
3796 get_unaligned_le16(&capability->max_elements_per_oq);
3797 ctrl_info->max_oq_element_length =
3798 get_unaligned_le16(&capability->max_oq_element_length)
3799 * 16;
3800
3801 sop_iu_layer_descriptor =
3802 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3803
3804 ctrl_info->max_inbound_iu_length_per_firmware =
3805 get_unaligned_le16(
3806 &sop_iu_layer_descriptor->max_inbound_iu_length);
3807 ctrl_info->inbound_spanning_supported =
3808 sop_iu_layer_descriptor->inbound_spanning_supported;
3809 ctrl_info->outbound_spanning_supported =
3810 sop_iu_layer_descriptor->outbound_spanning_supported;
3811
3812out:
3813 kfree(capability);
3814
3815 return rc;
3816}
3817
3818static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3819{
3820 if (ctrl_info->max_iq_element_length <
3821 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3822 dev_err(&ctrl_info->pci_dev->dev,
3823 "max. inbound queue element length of %d is less than the required length of %d\n",
3824 ctrl_info->max_iq_element_length,
3825 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3826 return -EINVAL;
3827 }
3828
3829 if (ctrl_info->max_oq_element_length <
3830 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3831 dev_err(&ctrl_info->pci_dev->dev,
3832 "max. outbound queue element length of %d is less than the required length of %d\n",
3833 ctrl_info->max_oq_element_length,
3834 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3835 return -EINVAL;
3836 }
3837
3838 if (ctrl_info->max_inbound_iu_length_per_firmware <
3839 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3840 dev_err(&ctrl_info->pci_dev->dev,
3841 "max. inbound IU length of %u is less than the min. required length of %d\n",
3842 ctrl_info->max_inbound_iu_length_per_firmware,
3843 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3844 return -EINVAL;
3845 }
3846
77668f41
KB
3847 if (!ctrl_info->inbound_spanning_supported) {
3848 dev_err(&ctrl_info->pci_dev->dev,
3849 "the controller does not support inbound spanning\n");
3850 return -EINVAL;
3851 }
3852
3853 if (ctrl_info->outbound_spanning_supported) {
3854 dev_err(&ctrl_info->pci_dev->dev,
3855 "the controller supports outbound spanning but this driver does not\n");
3856 return -EINVAL;
3857 }
3858
6c223761
KB
3859 return 0;
3860}
3861
3862static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3863 bool inbound_queue, u16 queue_id)
3864{
3865 struct pqi_general_admin_request request;
3866 struct pqi_general_admin_response response;
3867
3868 memset(&request, 0, sizeof(request));
3869 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3870 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3871 &request.header.iu_length);
3872 if (inbound_queue)
3873 request.function_code =
3874 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3875 else
3876 request.function_code =
3877 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3878 put_unaligned_le16(queue_id,
3879 &request.data.delete_operational_queue.queue_id);
3880
3881 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3882 &response);
3883}
3884
3885static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3886{
3887 int rc;
3888 struct pqi_event_queue *event_queue;
3889 struct pqi_general_admin_request request;
3890 struct pqi_general_admin_response response;
3891
3892 event_queue = &ctrl_info->event_queue;
3893
3894 /*
3895 * Create OQ (Outbound Queue - device to host queue) to dedicate
3896 * to events.
3897 */
3898 memset(&request, 0, sizeof(request));
3899 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3900 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3901 &request.header.iu_length);
3902 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3903 put_unaligned_le16(event_queue->oq_id,
3904 &request.data.create_operational_oq.queue_id);
3905 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3906 &request.data.create_operational_oq.element_array_addr);
3907 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3908 &request.data.create_operational_oq.pi_addr);
3909 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3910 &request.data.create_operational_oq.num_elements);
3911 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3912 &request.data.create_operational_oq.element_length);
3913 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3914 put_unaligned_le16(event_queue->int_msg_num,
3915 &request.data.create_operational_oq.int_msg_num);
3916
3917 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3918 &response);
3919 if (rc)
3920 return rc;
3921
3922 event_queue->oq_ci = ctrl_info->iomem_base +
3923 PQI_DEVICE_REGISTERS_OFFSET +
3924 get_unaligned_le64(
3925 &response.data.create_operational_oq.oq_ci_offset);
3926
3927 return 0;
3928}
3929
061ef06a
KB
3930static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3931 unsigned int group_number)
6c223761 3932{
6c223761
KB
3933 int rc;
3934 struct pqi_queue_group *queue_group;
3935 struct pqi_general_admin_request request;
3936 struct pqi_general_admin_response response;
3937
061ef06a 3938 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
3939
3940 /*
3941 * Create IQ (Inbound Queue - host to device queue) for
3942 * RAID path.
3943 */
3944 memset(&request, 0, sizeof(request));
3945 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3946 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3947 &request.header.iu_length);
3948 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3949 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3950 &request.data.create_operational_iq.queue_id);
3951 put_unaligned_le64(
3952 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3953 &request.data.create_operational_iq.element_array_addr);
3954 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3955 &request.data.create_operational_iq.ci_addr);
3956 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3957 &request.data.create_operational_iq.num_elements);
3958 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3959 &request.data.create_operational_iq.element_length);
3960 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3961
3962 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3963 &response);
3964 if (rc) {
3965 dev_err(&ctrl_info->pci_dev->dev,
3966 "error creating inbound RAID queue\n");
3967 return rc;
3968 }
3969
3970 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3971 PQI_DEVICE_REGISTERS_OFFSET +
3972 get_unaligned_le64(
3973 &response.data.create_operational_iq.iq_pi_offset);
3974
3975 /*
3976 * Create IQ (Inbound Queue - host to device queue) for
3977 * Advanced I/O (AIO) path.
3978 */
3979 memset(&request, 0, sizeof(request));
3980 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3981 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3982 &request.header.iu_length);
3983 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3984 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3985 &request.data.create_operational_iq.queue_id);
3986 put_unaligned_le64((u64)queue_group->
3987 iq_element_array_bus_addr[AIO_PATH],
3988 &request.data.create_operational_iq.element_array_addr);
3989 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3990 &request.data.create_operational_iq.ci_addr);
3991 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3992 &request.data.create_operational_iq.num_elements);
3993 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3994 &request.data.create_operational_iq.element_length);
3995 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3996
3997 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3998 &response);
3999 if (rc) {
4000 dev_err(&ctrl_info->pci_dev->dev,
4001 "error creating inbound AIO queue\n");
4002 goto delete_inbound_queue_raid;
4003 }
4004
4005 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4006 PQI_DEVICE_REGISTERS_OFFSET +
4007 get_unaligned_le64(
4008 &response.data.create_operational_iq.iq_pi_offset);
4009
4010 /*
4011 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4012 * assumed to be for RAID path I/O unless we change the queue's
4013 * property.
4014 */
4015 memset(&request, 0, sizeof(request));
4016 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4017 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4018 &request.header.iu_length);
4019 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4020 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4021 &request.data.change_operational_iq_properties.queue_id);
4022 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4023 &request.data.change_operational_iq_properties.vendor_specific);
4024
4025 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4026 &response);
4027 if (rc) {
4028 dev_err(&ctrl_info->pci_dev->dev,
4029 "error changing queue property\n");
4030 goto delete_inbound_queue_aio;
4031 }
4032
4033 /*
4034 * Create OQ (Outbound Queue - device to host queue).
4035 */
4036 memset(&request, 0, sizeof(request));
4037 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4038 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4039 &request.header.iu_length);
4040 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4041 put_unaligned_le16(queue_group->oq_id,
4042 &request.data.create_operational_oq.queue_id);
4043 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4044 &request.data.create_operational_oq.element_array_addr);
4045 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4046 &request.data.create_operational_oq.pi_addr);
4047 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4048 &request.data.create_operational_oq.num_elements);
4049 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4050 &request.data.create_operational_oq.element_length);
4051 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4052 put_unaligned_le16(queue_group->int_msg_num,
4053 &request.data.create_operational_oq.int_msg_num);
4054
4055 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4056 &response);
4057 if (rc) {
4058 dev_err(&ctrl_info->pci_dev->dev,
4059 "error creating outbound queue\n");
4060 goto delete_inbound_queue_aio;
4061 }
4062
4063 queue_group->oq_ci = ctrl_info->iomem_base +
4064 PQI_DEVICE_REGISTERS_OFFSET +
4065 get_unaligned_le64(
4066 &response.data.create_operational_oq.oq_ci_offset);
4067
6c223761
KB
4068 return 0;
4069
4070delete_inbound_queue_aio:
4071 pqi_delete_operational_queue(ctrl_info, true,
4072 queue_group->iq_id[AIO_PATH]);
4073
4074delete_inbound_queue_raid:
4075 pqi_delete_operational_queue(ctrl_info, true,
4076 queue_group->iq_id[RAID_PATH]);
4077
4078 return rc;
4079}
4080
4081static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4082{
4083 int rc;
4084 unsigned int i;
4085
4086 rc = pqi_create_event_queue(ctrl_info);
4087 if (rc) {
4088 dev_err(&ctrl_info->pci_dev->dev,
4089 "error creating event queue\n");
4090 return rc;
4091 }
4092
4093 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4094 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4095 if (rc) {
4096 dev_err(&ctrl_info->pci_dev->dev,
4097 "error creating queue group number %u/%u\n",
4098 i, ctrl_info->num_queue_groups);
4099 return rc;
4100 }
4101 }
4102
4103 return 0;
4104}
4105
4106#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4107 (offsetof(struct pqi_event_config, descriptors) + \
4108 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4109
6a50d6ad
KB
4110static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4111 bool enable_events)
6c223761
KB
4112{
4113 int rc;
4114 unsigned int i;
4115 struct pqi_event_config *event_config;
6a50d6ad 4116 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
4117 struct pqi_general_management_request request;
4118
4119 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4120 GFP_KERNEL);
4121 if (!event_config)
4122 return -ENOMEM;
4123
4124 memset(&request, 0, sizeof(request));
4125
4126 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4127 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4128 data.report_event_configuration.sg_descriptors[1]) -
4129 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4130 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4131 &request.data.report_event_configuration.buffer_length);
4132
4133 rc = pqi_map_single(ctrl_info->pci_dev,
4134 request.data.report_event_configuration.sg_descriptors,
4135 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4136 PCI_DMA_FROMDEVICE);
4137 if (rc)
4138 goto out;
4139
4140 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4141 0, NULL, NO_TIMEOUT);
4142
4143 pqi_pci_unmap(ctrl_info->pci_dev,
4144 request.data.report_event_configuration.sg_descriptors, 1,
4145 PCI_DMA_FROMDEVICE);
4146
4147 if (rc)
4148 goto out;
4149
6a50d6ad
KB
4150 for (i = 0; i < event_config->num_event_descriptors; i++) {
4151 event_descriptor = &event_config->descriptors[i];
4152 if (enable_events &&
4153 pqi_is_supported_event(event_descriptor->event_type))
4154 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4155 &event_descriptor->oq_id);
4156 else
4157 put_unaligned_le16(0, &event_descriptor->oq_id);
4158 }
6c223761
KB
4159
4160 memset(&request, 0, sizeof(request));
4161
4162 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4163 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4164 data.report_event_configuration.sg_descriptors[1]) -
4165 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4166 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4167 &request.data.report_event_configuration.buffer_length);
4168
4169 rc = pqi_map_single(ctrl_info->pci_dev,
4170 request.data.report_event_configuration.sg_descriptors,
4171 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4172 PCI_DMA_TODEVICE);
4173 if (rc)
4174 goto out;
4175
4176 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4177 NULL, NO_TIMEOUT);
4178
4179 pqi_pci_unmap(ctrl_info->pci_dev,
4180 request.data.report_event_configuration.sg_descriptors, 1,
4181 PCI_DMA_TODEVICE);
4182
4183out:
4184 kfree(event_config);
4185
4186 return rc;
4187}
4188
6a50d6ad
KB
4189static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4190{
4191 return pqi_configure_events(ctrl_info, true);
4192}
4193
4194static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4195{
4196 return pqi_configure_events(ctrl_info, false);
4197}
4198
6c223761
KB
4199static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4200{
4201 unsigned int i;
4202 struct device *dev;
4203 size_t sg_chain_buffer_length;
4204 struct pqi_io_request *io_request;
4205
4206 if (!ctrl_info->io_request_pool)
4207 return;
4208
4209 dev = &ctrl_info->pci_dev->dev;
4210 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4211 io_request = ctrl_info->io_request_pool;
4212
4213 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4214 kfree(io_request->iu);
4215 if (!io_request->sg_chain_buffer)
4216 break;
4217 dma_free_coherent(dev, sg_chain_buffer_length,
4218 io_request->sg_chain_buffer,
4219 io_request->sg_chain_buffer_dma_handle);
4220 io_request++;
4221 }
4222
4223 kfree(ctrl_info->io_request_pool);
4224 ctrl_info->io_request_pool = NULL;
4225}
4226
4227static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4228{
4229 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4230 ctrl_info->error_buffer_length,
4231 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4232
4233 if (!ctrl_info->error_buffer)
4234 return -ENOMEM;
4235
4236 return 0;
4237}
4238
4239static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4240{
4241 unsigned int i;
4242 void *sg_chain_buffer;
4243 size_t sg_chain_buffer_length;
4244 dma_addr_t sg_chain_buffer_dma_handle;
4245 struct device *dev;
4246 struct pqi_io_request *io_request;
4247
4248 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4249 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4250
4251 if (!ctrl_info->io_request_pool) {
4252 dev_err(&ctrl_info->pci_dev->dev,
4253 "failed to allocate I/O request pool\n");
4254 goto error;
4255 }
4256
4257 dev = &ctrl_info->pci_dev->dev;
4258 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4259 io_request = ctrl_info->io_request_pool;
4260
4261 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4262 io_request->iu =
4263 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4264
4265 if (!io_request->iu) {
4266 dev_err(&ctrl_info->pci_dev->dev,
4267 "failed to allocate IU buffers\n");
4268 goto error;
4269 }
4270
4271 sg_chain_buffer = dma_alloc_coherent(dev,
4272 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4273 GFP_KERNEL);
4274
4275 if (!sg_chain_buffer) {
4276 dev_err(&ctrl_info->pci_dev->dev,
4277 "failed to allocate PQI scatter-gather chain buffers\n");
4278 goto error;
4279 }
4280
4281 io_request->index = i;
4282 io_request->sg_chain_buffer = sg_chain_buffer;
4283 io_request->sg_chain_buffer_dma_handle =
4284 sg_chain_buffer_dma_handle;
4285 io_request++;
4286 }
4287
4288 return 0;
4289
4290error:
4291 pqi_free_all_io_requests(ctrl_info);
4292
4293 return -ENOMEM;
4294}
4295
4296/*
4297 * Calculate required resources that are sized based on max. outstanding
4298 * requests and max. transfer size.
4299 */
4300
4301static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4302{
4303 u32 max_transfer_size;
4304 u32 max_sg_entries;
4305
4306 ctrl_info->scsi_ml_can_queue =
4307 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4308 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4309
4310 ctrl_info->error_buffer_length =
4311 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4312
4313 max_transfer_size =
4314 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4315
4316 max_sg_entries = max_transfer_size / PAGE_SIZE;
4317
4318 /* +1 to cover when the buffer is not page-aligned. */
4319 max_sg_entries++;
4320
4321 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4322
4323 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4324
4325 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
4326 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4327 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
4328 ctrl_info->sg_tablesize = max_sg_entries;
4329 ctrl_info->max_sectors = max_transfer_size / 512;
4330}
4331
4332static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4333{
4334 int num_cpus;
4335 int max_queue_groups;
4336 int num_queue_groups;
4337 u16 num_elements_per_iq;
4338 u16 num_elements_per_oq;
4339
4340 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4341 ctrl_info->max_outbound_queues - 1);
4342 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4343
4344 num_cpus = num_online_cpus();
4345 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4346 num_queue_groups = min(num_queue_groups, max_queue_groups);
4347
4348 ctrl_info->num_queue_groups = num_queue_groups;
061ef06a 4349 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
6c223761 4350
77668f41
KB
4351 /*
4352 * Make sure that the max. inbound IU length is an even multiple
4353 * of our inbound element length.
4354 */
4355 ctrl_info->max_inbound_iu_length =
4356 (ctrl_info->max_inbound_iu_length_per_firmware /
4357 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4358 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
4359
4360 num_elements_per_iq =
4361 (ctrl_info->max_inbound_iu_length /
4362 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4363
4364 /* Add one because one element in each queue is unusable. */
4365 num_elements_per_iq++;
4366
4367 num_elements_per_iq = min(num_elements_per_iq,
4368 ctrl_info->max_elements_per_iq);
4369
4370 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4371 num_elements_per_oq = min(num_elements_per_oq,
4372 ctrl_info->max_elements_per_oq);
4373
4374 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4375 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4376
4377 ctrl_info->max_sg_per_iu =
4378 ((ctrl_info->max_inbound_iu_length -
4379 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4380 sizeof(struct pqi_sg_descriptor)) +
4381 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4382}
4383
4384static inline void pqi_set_sg_descriptor(
4385 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4386{
4387 u64 address = (u64)sg_dma_address(sg);
4388 unsigned int length = sg_dma_len(sg);
4389
4390 put_unaligned_le64(address, &sg_descriptor->address);
4391 put_unaligned_le32(length, &sg_descriptor->length);
4392 put_unaligned_le32(0, &sg_descriptor->flags);
4393}
4394
4395static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4396 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4397 struct pqi_io_request *io_request)
4398{
4399 int i;
4400 u16 iu_length;
4401 int sg_count;
4402 bool chained;
4403 unsigned int num_sg_in_iu;
4404 unsigned int max_sg_per_iu;
4405 struct scatterlist *sg;
4406 struct pqi_sg_descriptor *sg_descriptor;
4407
4408 sg_count = scsi_dma_map(scmd);
4409 if (sg_count < 0)
4410 return sg_count;
4411
4412 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4413 PQI_REQUEST_HEADER_LENGTH;
4414
4415 if (sg_count == 0)
4416 goto out;
4417
4418 sg = scsi_sglist(scmd);
4419 sg_descriptor = request->sg_descriptors;
4420 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4421 chained = false;
4422 num_sg_in_iu = 0;
4423 i = 0;
4424
4425 while (1) {
4426 pqi_set_sg_descriptor(sg_descriptor, sg);
4427 if (!chained)
4428 num_sg_in_iu++;
4429 i++;
4430 if (i == sg_count)
4431 break;
4432 sg_descriptor++;
4433 if (i == max_sg_per_iu) {
4434 put_unaligned_le64(
4435 (u64)io_request->sg_chain_buffer_dma_handle,
4436 &sg_descriptor->address);
4437 put_unaligned_le32((sg_count - num_sg_in_iu)
4438 * sizeof(*sg_descriptor),
4439 &sg_descriptor->length);
4440 put_unaligned_le32(CISS_SG_CHAIN,
4441 &sg_descriptor->flags);
4442 chained = true;
4443 num_sg_in_iu++;
4444 sg_descriptor = io_request->sg_chain_buffer;
4445 }
4446 sg = sg_next(sg);
4447 }
4448
4449 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4450 request->partial = chained;
4451 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4452
4453out:
4454 put_unaligned_le16(iu_length, &request->header.iu_length);
4455
4456 return 0;
4457}
4458
4459static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4460 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4461 struct pqi_io_request *io_request)
4462{
4463 int i;
4464 u16 iu_length;
4465 int sg_count;
a60eec02
KB
4466 bool chained;
4467 unsigned int num_sg_in_iu;
4468 unsigned int max_sg_per_iu;
6c223761
KB
4469 struct scatterlist *sg;
4470 struct pqi_sg_descriptor *sg_descriptor;
4471
4472 sg_count = scsi_dma_map(scmd);
4473 if (sg_count < 0)
4474 return sg_count;
a60eec02
KB
4475
4476 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4477 PQI_REQUEST_HEADER_LENGTH;
4478 num_sg_in_iu = 0;
4479
6c223761
KB
4480 if (sg_count == 0)
4481 goto out;
4482
a60eec02
KB
4483 sg = scsi_sglist(scmd);
4484 sg_descriptor = request->sg_descriptors;
4485 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4486 chained = false;
4487 i = 0;
4488
4489 while (1) {
4490 pqi_set_sg_descriptor(sg_descriptor, sg);
4491 if (!chained)
4492 num_sg_in_iu++;
4493 i++;
4494 if (i == sg_count)
4495 break;
4496 sg_descriptor++;
4497 if (i == max_sg_per_iu) {
4498 put_unaligned_le64(
4499 (u64)io_request->sg_chain_buffer_dma_handle,
4500 &sg_descriptor->address);
4501 put_unaligned_le32((sg_count - num_sg_in_iu)
4502 * sizeof(*sg_descriptor),
4503 &sg_descriptor->length);
4504 put_unaligned_le32(CISS_SG_CHAIN,
4505 &sg_descriptor->flags);
4506 chained = true;
4507 num_sg_in_iu++;
4508 sg_descriptor = io_request->sg_chain_buffer;
6c223761 4509 }
a60eec02 4510 sg = sg_next(sg);
6c223761
KB
4511 }
4512
a60eec02
KB
4513 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4514 request->partial = chained;
6c223761 4515 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
4516
4517out:
6c223761
KB
4518 put_unaligned_le16(iu_length, &request->header.iu_length);
4519 request->num_sg_descriptors = num_sg_in_iu;
4520
4521 return 0;
4522}
4523
4524static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4525 void *context)
4526{
4527 struct scsi_cmnd *scmd;
4528
4529 scmd = io_request->scmd;
4530 pqi_free_io_request(io_request);
4531 scsi_dma_unmap(scmd);
4532 pqi_scsi_done(scmd);
4533}
4534
4535static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4536 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4537 struct pqi_queue_group *queue_group)
4538{
4539 int rc;
4540 size_t cdb_length;
4541 struct pqi_io_request *io_request;
4542 struct pqi_raid_path_request *request;
4543
4544 io_request = pqi_alloc_io_request(ctrl_info);
4545 io_request->io_complete_callback = pqi_raid_io_complete;
4546 io_request->scmd = scmd;
4547
4548 scmd->host_scribble = (unsigned char *)io_request;
4549
4550 request = io_request->iu;
4551 memset(request, 0,
4552 offsetof(struct pqi_raid_path_request, sg_descriptors));
4553
4554 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4555 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4556 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4557 put_unaligned_le16(io_request->index, &request->request_id);
4558 request->error_index = request->request_id;
4559 memcpy(request->lun_number, device->scsi3addr,
4560 sizeof(request->lun_number));
4561
4562 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4563 memcpy(request->cdb, scmd->cmnd, cdb_length);
4564
4565 switch (cdb_length) {
4566 case 6:
4567 case 10:
4568 case 12:
4569 case 16:
4570 /* No bytes in the Additional CDB bytes field */
4571 request->additional_cdb_bytes_usage =
4572 SOP_ADDITIONAL_CDB_BYTES_0;
4573 break;
4574 case 20:
4575 /* 4 bytes in the Additional cdb field */
4576 request->additional_cdb_bytes_usage =
4577 SOP_ADDITIONAL_CDB_BYTES_4;
4578 break;
4579 case 24:
4580 /* 8 bytes in the Additional cdb field */
4581 request->additional_cdb_bytes_usage =
4582 SOP_ADDITIONAL_CDB_BYTES_8;
4583 break;
4584 case 28:
4585 /* 12 bytes in the Additional cdb field */
4586 request->additional_cdb_bytes_usage =
4587 SOP_ADDITIONAL_CDB_BYTES_12;
4588 break;
4589 case 32:
4590 default:
4591 /* 16 bytes in the Additional cdb field */
4592 request->additional_cdb_bytes_usage =
4593 SOP_ADDITIONAL_CDB_BYTES_16;
4594 break;
4595 }
4596
4597 switch (scmd->sc_data_direction) {
4598 case DMA_TO_DEVICE:
4599 request->data_direction = SOP_READ_FLAG;
4600 break;
4601 case DMA_FROM_DEVICE:
4602 request->data_direction = SOP_WRITE_FLAG;
4603 break;
4604 case DMA_NONE:
4605 request->data_direction = SOP_NO_DIRECTION_FLAG;
4606 break;
4607 case DMA_BIDIRECTIONAL:
4608 request->data_direction = SOP_BIDIRECTIONAL;
4609 break;
4610 default:
4611 dev_err(&ctrl_info->pci_dev->dev,
4612 "unknown data direction: %d\n",
4613 scmd->sc_data_direction);
4614 WARN_ON(scmd->sc_data_direction);
4615 break;
4616 }
4617
4618 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4619 if (rc) {
4620 pqi_free_io_request(io_request);
4621 return SCSI_MLQUEUE_HOST_BUSY;
4622 }
4623
4624 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4625
4626 return 0;
4627}
4628
4629static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4630 void *context)
4631{
4632 struct scsi_cmnd *scmd;
4633
4634 scmd = io_request->scmd;
4635 scsi_dma_unmap(scmd);
4636 if (io_request->status == -EAGAIN)
4637 set_host_byte(scmd, DID_IMM_RETRY);
4638 pqi_free_io_request(io_request);
4639 pqi_scsi_done(scmd);
4640}
4641
4642static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4643 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4644 struct pqi_queue_group *queue_group)
4645{
4646 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4647 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4648}
4649
4650static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4651 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4652 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4653 struct pqi_encryption_info *encryption_info)
4654{
4655 int rc;
4656 struct pqi_io_request *io_request;
4657 struct pqi_aio_path_request *request;
4658
4659 io_request = pqi_alloc_io_request(ctrl_info);
4660 io_request->io_complete_callback = pqi_aio_io_complete;
4661 io_request->scmd = scmd;
4662
4663 scmd->host_scribble = (unsigned char *)io_request;
4664
4665 request = io_request->iu;
4666 memset(request, 0,
4667 offsetof(struct pqi_raid_path_request, sg_descriptors));
4668
4669 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4670 put_unaligned_le32(aio_handle, &request->nexus_id);
4671 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4672 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4673 put_unaligned_le16(io_request->index, &request->request_id);
4674 request->error_index = request->request_id;
4675 if (cdb_length > sizeof(request->cdb))
4676 cdb_length = sizeof(request->cdb);
4677 request->cdb_length = cdb_length;
4678 memcpy(request->cdb, cdb, cdb_length);
4679
4680 switch (scmd->sc_data_direction) {
4681 case DMA_TO_DEVICE:
4682 request->data_direction = SOP_READ_FLAG;
4683 break;
4684 case DMA_FROM_DEVICE:
4685 request->data_direction = SOP_WRITE_FLAG;
4686 break;
4687 case DMA_NONE:
4688 request->data_direction = SOP_NO_DIRECTION_FLAG;
4689 break;
4690 case DMA_BIDIRECTIONAL:
4691 request->data_direction = SOP_BIDIRECTIONAL;
4692 break;
4693 default:
4694 dev_err(&ctrl_info->pci_dev->dev,
4695 "unknown data direction: %d\n",
4696 scmd->sc_data_direction);
4697 WARN_ON(scmd->sc_data_direction);
4698 break;
4699 }
4700
4701 if (encryption_info) {
4702 request->encryption_enable = true;
4703 put_unaligned_le16(encryption_info->data_encryption_key_index,
4704 &request->data_encryption_key_index);
4705 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4706 &request->encrypt_tweak_lower);
4707 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4708 &request->encrypt_tweak_upper);
4709 }
4710
4711 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4712 if (rc) {
4713 pqi_free_io_request(io_request);
4714 return SCSI_MLQUEUE_HOST_BUSY;
4715 }
4716
4717 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4718
4719 return 0;
4720}
4721
061ef06a
KB
4722static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4723 struct scsi_cmnd *scmd)
4724{
4725 u16 hw_queue;
4726
4727 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4728 if (hw_queue > ctrl_info->max_hw_queue_index)
4729 hw_queue = 0;
4730
4731 return hw_queue;
4732}
4733
7561a7e4
KB
4734/*
4735 * This function gets called just before we hand the completed SCSI request
4736 * back to the SML.
4737 */
4738
4739void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4740{
4741 struct pqi_scsi_dev *device;
4742
4743 device = scmd->device->hostdata;
4744 atomic_dec(&device->scsi_cmds_outstanding);
4745}
4746
6c223761 4747static int pqi_scsi_queue_command(struct Scsi_Host *shost,
7d81d2b8 4748 struct scsi_cmnd *scmd)
6c223761
KB
4749{
4750 int rc;
4751 struct pqi_ctrl_info *ctrl_info;
4752 struct pqi_scsi_dev *device;
061ef06a 4753 u16 hw_queue;
6c223761
KB
4754 struct pqi_queue_group *queue_group;
4755 bool raid_bypassed;
4756
4757 device = scmd->device->hostdata;
6c223761
KB
4758 ctrl_info = shost_to_hba(shost);
4759
7561a7e4
KB
4760 atomic_inc(&device->scsi_cmds_outstanding);
4761
6c223761
KB
4762 if (pqi_ctrl_offline(ctrl_info)) {
4763 set_host_byte(scmd, DID_NO_CONNECT);
4764 pqi_scsi_done(scmd);
4765 return 0;
4766 }
4767
7561a7e4
KB
4768 pqi_ctrl_busy(ctrl_info);
4769 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4770 rc = SCSI_MLQUEUE_HOST_BUSY;
4771 goto out;
4772 }
4773
7d81d2b8
KB
4774 /*
4775 * This is necessary because the SML doesn't zero out this field during
4776 * error recovery.
4777 */
4778 scmd->result = 0;
4779
061ef06a
KB
4780 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
4781 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
4782
4783 if (pqi_is_logical_device(device)) {
4784 raid_bypassed = false;
4785 if (device->offload_enabled &&
57292b58 4786 !blk_rq_is_passthrough(scmd->request)) {
6c223761
KB
4787 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4788 scmd, queue_group);
4789 if (rc == 0 ||
4790 rc == SCSI_MLQUEUE_HOST_BUSY ||
4791 rc == SAM_STAT_CHECK_CONDITION ||
4792 rc == SAM_STAT_RESERVATION_CONFLICT)
4793 raid_bypassed = true;
4794 }
4795 if (!raid_bypassed)
4796 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4797 queue_group);
4798 } else {
4799 if (device->aio_enabled)
4800 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4801 queue_group);
4802 else
4803 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4804 queue_group);
4805 }
4806
7561a7e4
KB
4807out:
4808 pqi_ctrl_unbusy(ctrl_info);
4809 if (rc)
4810 atomic_dec(&device->scsi_cmds_outstanding);
4811
6c223761
KB
4812 return rc;
4813}
4814
7561a7e4
KB
4815static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
4816 struct pqi_queue_group *queue_group)
4817{
4818 unsigned int path;
4819 unsigned long flags;
4820 bool list_is_empty;
4821
4822 for (path = 0; path < 2; path++) {
4823 while (1) {
4824 spin_lock_irqsave(
4825 &queue_group->submit_lock[path], flags);
4826 list_is_empty =
4827 list_empty(&queue_group->request_list[path]);
4828 spin_unlock_irqrestore(
4829 &queue_group->submit_lock[path], flags);
4830 if (list_is_empty)
4831 break;
4832 pqi_check_ctrl_health(ctrl_info);
4833 if (pqi_ctrl_offline(ctrl_info))
4834 return -ENXIO;
4835 usleep_range(1000, 2000);
4836 }
4837 }
4838
4839 return 0;
4840}
4841
4842static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
4843{
4844 int rc;
4845 unsigned int i;
4846 unsigned int path;
4847 struct pqi_queue_group *queue_group;
4848 pqi_index_t iq_pi;
4849 pqi_index_t iq_ci;
4850
4851 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4852 queue_group = &ctrl_info->queue_groups[i];
4853
4854 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
4855 if (rc)
4856 return rc;
4857
4858 for (path = 0; path < 2; path++) {
4859 iq_pi = queue_group->iq_pi_copy[path];
4860
4861 while (1) {
4862 iq_ci = *queue_group->iq_ci[path];
4863 if (iq_ci == iq_pi)
4864 break;
4865 pqi_check_ctrl_health(ctrl_info);
4866 if (pqi_ctrl_offline(ctrl_info))
4867 return -ENXIO;
4868 usleep_range(1000, 2000);
4869 }
4870 }
4871 }
4872
4873 return 0;
4874}
4875
4876static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
4877 struct pqi_scsi_dev *device)
4878{
4879 unsigned int i;
4880 unsigned int path;
4881 struct pqi_queue_group *queue_group;
4882 unsigned long flags;
4883 struct pqi_io_request *io_request;
4884 struct pqi_io_request *next;
4885 struct scsi_cmnd *scmd;
4886 struct pqi_scsi_dev *scsi_device;
4887
4888 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4889 queue_group = &ctrl_info->queue_groups[i];
4890
4891 for (path = 0; path < 2; path++) {
4892 spin_lock_irqsave(
4893 &queue_group->submit_lock[path], flags);
4894
4895 list_for_each_entry_safe(io_request, next,
4896 &queue_group->request_list[path],
4897 request_list_entry) {
4898 scmd = io_request->scmd;
4899 if (!scmd)
4900 continue;
4901
4902 scsi_device = scmd->device->hostdata;
4903 if (scsi_device != device)
4904 continue;
4905
4906 list_del(&io_request->request_list_entry);
4907 set_host_byte(scmd, DID_RESET);
4908 pqi_scsi_done(scmd);
4909 }
4910
4911 spin_unlock_irqrestore(
4912 &queue_group->submit_lock[path], flags);
4913 }
4914 }
4915}
4916
061ef06a
KB
4917static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
4918 struct pqi_scsi_dev *device)
4919{
4920 while (atomic_read(&device->scsi_cmds_outstanding)) {
4921 pqi_check_ctrl_health(ctrl_info);
4922 if (pqi_ctrl_offline(ctrl_info))
4923 return -ENXIO;
4924 usleep_range(1000, 2000);
4925 }
4926
4927 return 0;
4928}
4929
4930static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
4931{
4932 bool io_pending;
4933 unsigned long flags;
4934 struct pqi_scsi_dev *device;
4935
4936 while (1) {
4937 io_pending = false;
4938
4939 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4940 list_for_each_entry(device, &ctrl_info->scsi_device_list,
4941 scsi_device_list_entry) {
4942 if (atomic_read(&device->scsi_cmds_outstanding)) {
4943 io_pending = true;
4944 break;
4945 }
4946 }
4947 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
4948 flags);
4949
4950 if (!io_pending)
4951 break;
4952
4953 pqi_check_ctrl_health(ctrl_info);
4954 if (pqi_ctrl_offline(ctrl_info))
4955 return -ENXIO;
4956
4957 usleep_range(1000, 2000);
4958 }
4959
4960 return 0;
4961}
4962
14bb215d
KB
4963static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4964 void *context)
6c223761 4965{
14bb215d 4966 struct completion *waiting = context;
6c223761 4967
14bb215d
KB
4968 complete(waiting);
4969}
6c223761 4970
14bb215d
KB
4971#define PQI_LUN_RESET_TIMEOUT_SECS 10
4972
4973static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4974 struct pqi_scsi_dev *device, struct completion *wait)
4975{
4976 int rc;
14bb215d
KB
4977
4978 while (1) {
4979 if (wait_for_completion_io_timeout(wait,
4980 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4981 rc = 0;
4982 break;
6c223761
KB
4983 }
4984
14bb215d
KB
4985 pqi_check_ctrl_health(ctrl_info);
4986 if (pqi_ctrl_offline(ctrl_info)) {
4987 rc = -ETIMEDOUT;
4988 break;
4989 }
6c223761 4990 }
6c223761 4991
14bb215d 4992 return rc;
6c223761
KB
4993}
4994
14bb215d 4995static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
4996 struct pqi_scsi_dev *device)
4997{
4998 int rc;
4999 struct pqi_io_request *io_request;
5000 DECLARE_COMPLETION_ONSTACK(wait);
5001 struct pqi_task_management_request *request;
5002
6c223761 5003 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 5004 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
5005 io_request->context = &wait;
5006
5007 request = io_request->iu;
5008 memset(request, 0, sizeof(*request));
5009
5010 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5011 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5012 &request->header.iu_length);
5013 put_unaligned_le16(io_request->index, &request->request_id);
5014 memcpy(request->lun_number, device->scsi3addr,
5015 sizeof(request->lun_number));
5016 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5017
5018 pqi_start_io(ctrl_info,
5019 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5020 io_request);
5021
14bb215d
KB
5022 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5023 if (rc == 0)
6c223761 5024 rc = io_request->status;
6c223761
KB
5025
5026 pqi_free_io_request(io_request);
6c223761
KB
5027
5028 return rc;
5029}
5030
5031/* Performs a reset at the LUN level. */
5032
5033static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5034 struct pqi_scsi_dev *device)
5035{
5036 int rc;
5037
14bb215d 5038 rc = pqi_lun_reset(ctrl_info, device);
061ef06a
KB
5039 if (rc == 0)
5040 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
6c223761 5041
14bb215d 5042 return rc == 0 ? SUCCESS : FAILED;
6c223761
KB
5043}
5044
5045static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5046{
5047 int rc;
7561a7e4 5048 struct Scsi_Host *shost;
6c223761
KB
5049 struct pqi_ctrl_info *ctrl_info;
5050 struct pqi_scsi_dev *device;
5051
7561a7e4
KB
5052 shost = scmd->device->host;
5053 ctrl_info = shost_to_hba(shost);
6c223761
KB
5054 device = scmd->device->hostdata;
5055
5056 dev_err(&ctrl_info->pci_dev->dev,
5057 "resetting scsi %d:%d:%d:%d\n",
7561a7e4 5058 shost->host_no, device->bus, device->target, device->lun);
6c223761 5059
7561a7e4
KB
5060 pqi_check_ctrl_health(ctrl_info);
5061 if (pqi_ctrl_offline(ctrl_info)) {
5062 rc = FAILED;
5063 goto out;
5064 }
6c223761 5065
7561a7e4
KB
5066 mutex_lock(&ctrl_info->lun_reset_mutex);
5067
5068 pqi_ctrl_block_requests(ctrl_info);
5069 pqi_ctrl_wait_until_quiesced(ctrl_info);
5070 pqi_fail_io_queued_for_device(ctrl_info, device);
5071 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5072 pqi_device_reset_start(device);
5073 pqi_ctrl_unblock_requests(ctrl_info);
5074
5075 if (rc)
5076 rc = FAILED;
5077 else
5078 rc = pqi_device_reset(ctrl_info, device);
5079
5080 pqi_device_reset_done(device);
5081
5082 mutex_unlock(&ctrl_info->lun_reset_mutex);
5083
5084out:
6c223761
KB
5085 dev_err(&ctrl_info->pci_dev->dev,
5086 "reset of scsi %d:%d:%d:%d: %s\n",
7561a7e4 5087 shost->host_no, device->bus, device->target, device->lun,
6c223761
KB
5088 rc == SUCCESS ? "SUCCESS" : "FAILED");
5089
5090 return rc;
5091}
5092
5093static int pqi_slave_alloc(struct scsi_device *sdev)
5094{
5095 struct pqi_scsi_dev *device;
5096 unsigned long flags;
5097 struct pqi_ctrl_info *ctrl_info;
5098 struct scsi_target *starget;
5099 struct sas_rphy *rphy;
5100
5101 ctrl_info = shost_to_hba(sdev->host);
5102
5103 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5104
5105 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5106 starget = scsi_target(sdev);
5107 rphy = target_to_rphy(starget);
5108 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5109 if (device) {
5110 device->target = sdev_id(sdev);
5111 device->lun = sdev->lun;
5112 device->target_lun_valid = true;
5113 }
5114 } else {
5115 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5116 sdev_id(sdev), sdev->lun);
5117 }
5118
5119 if (device && device->expose_device) {
5120 sdev->hostdata = device;
5121 device->sdev = sdev;
5122 if (device->queue_depth) {
5123 device->advertised_queue_depth = device->queue_depth;
5124 scsi_change_queue_depth(sdev,
5125 device->advertised_queue_depth);
5126 }
5127 }
5128
5129 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5130
5131 return 0;
5132}
5133
5134static int pqi_slave_configure(struct scsi_device *sdev)
5135{
5136 struct pqi_scsi_dev *device;
5137
5138 device = sdev->hostdata;
5139 if (!device->expose_device)
5140 sdev->no_uld_attach = true;
5141
5142 return 0;
5143}
5144
52198226
CH
5145static int pqi_map_queues(struct Scsi_Host *shost)
5146{
5147 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5148
5149 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
5150}
5151
6c223761
KB
5152static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5153 void __user *arg)
5154{
5155 struct pci_dev *pci_dev;
5156 u32 subsystem_vendor;
5157 u32 subsystem_device;
5158 cciss_pci_info_struct pciinfo;
5159
5160 if (!arg)
5161 return -EINVAL;
5162
5163 pci_dev = ctrl_info->pci_dev;
5164
5165 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5166 pciinfo.bus = pci_dev->bus->number;
5167 pciinfo.dev_fn = pci_dev->devfn;
5168 subsystem_vendor = pci_dev->subsystem_vendor;
5169 subsystem_device = pci_dev->subsystem_device;
5170 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5171 subsystem_vendor;
5172
5173 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5174 return -EFAULT;
5175
5176 return 0;
5177}
5178
5179static int pqi_getdrivver_ioctl(void __user *arg)
5180{
5181 u32 version;
5182
5183 if (!arg)
5184 return -EINVAL;
5185
5186 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5187 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5188
5189 if (copy_to_user(arg, &version, sizeof(version)))
5190 return -EFAULT;
5191
5192 return 0;
5193}
5194
5195struct ciss_error_info {
5196 u8 scsi_status;
5197 int command_status;
5198 size_t sense_data_length;
5199};
5200
5201static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5202 struct ciss_error_info *ciss_error_info)
5203{
5204 int ciss_cmd_status;
5205 size_t sense_data_length;
5206
5207 switch (pqi_error_info->data_out_result) {
5208 case PQI_DATA_IN_OUT_GOOD:
5209 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5210 break;
5211 case PQI_DATA_IN_OUT_UNDERFLOW:
5212 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5213 break;
5214 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5215 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5216 break;
5217 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5218 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5219 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5220 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5221 case PQI_DATA_IN_OUT_ERROR:
5222 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5223 break;
5224 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5225 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5226 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5227 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5228 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5229 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5230 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5231 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5232 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5233 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5234 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5235 break;
5236 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5237 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5238 break;
5239 case PQI_DATA_IN_OUT_ABORTED:
5240 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5241 break;
5242 case PQI_DATA_IN_OUT_TIMEOUT:
5243 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5244 break;
5245 default:
5246 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5247 break;
5248 }
5249
5250 sense_data_length =
5251 get_unaligned_le16(&pqi_error_info->sense_data_length);
5252 if (sense_data_length == 0)
5253 sense_data_length =
5254 get_unaligned_le16(&pqi_error_info->response_data_length);
5255 if (sense_data_length)
5256 if (sense_data_length > sizeof(pqi_error_info->data))
5257 sense_data_length = sizeof(pqi_error_info->data);
5258
5259 ciss_error_info->scsi_status = pqi_error_info->status;
5260 ciss_error_info->command_status = ciss_cmd_status;
5261 ciss_error_info->sense_data_length = sense_data_length;
5262}
5263
5264static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5265{
5266 int rc;
5267 char *kernel_buffer = NULL;
5268 u16 iu_length;
5269 size_t sense_data_length;
5270 IOCTL_Command_struct iocommand;
5271 struct pqi_raid_path_request request;
5272 struct pqi_raid_error_info pqi_error_info;
5273 struct ciss_error_info ciss_error_info;
5274
5275 if (pqi_ctrl_offline(ctrl_info))
5276 return -ENXIO;
5277 if (!arg)
5278 return -EINVAL;
5279 if (!capable(CAP_SYS_RAWIO))
5280 return -EPERM;
5281 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5282 return -EFAULT;
5283 if (iocommand.buf_size < 1 &&
5284 iocommand.Request.Type.Direction != XFER_NONE)
5285 return -EINVAL;
5286 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5287 return -EINVAL;
5288 if (iocommand.Request.Type.Type != TYPE_CMD)
5289 return -EINVAL;
5290
5291 switch (iocommand.Request.Type.Direction) {
5292 case XFER_NONE:
5293 case XFER_WRITE:
5294 case XFER_READ:
5295 break;
5296 default:
5297 return -EINVAL;
5298 }
5299
5300 if (iocommand.buf_size > 0) {
5301 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5302 if (!kernel_buffer)
5303 return -ENOMEM;
5304 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5305 if (copy_from_user(kernel_buffer, iocommand.buf,
5306 iocommand.buf_size)) {
5307 rc = -EFAULT;
5308 goto out;
5309 }
5310 } else {
5311 memset(kernel_buffer, 0, iocommand.buf_size);
5312 }
5313 }
5314
5315 memset(&request, 0, sizeof(request));
5316
5317 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5318 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5319 PQI_REQUEST_HEADER_LENGTH;
5320 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5321 sizeof(request.lun_number));
5322 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5323 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5324
5325 switch (iocommand.Request.Type.Direction) {
5326 case XFER_NONE:
5327 request.data_direction = SOP_NO_DIRECTION_FLAG;
5328 break;
5329 case XFER_WRITE:
5330 request.data_direction = SOP_WRITE_FLAG;
5331 break;
5332 case XFER_READ:
5333 request.data_direction = SOP_READ_FLAG;
5334 break;
5335 }
5336
5337 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5338
5339 if (iocommand.buf_size > 0) {
5340 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5341
5342 rc = pqi_map_single(ctrl_info->pci_dev,
5343 &request.sg_descriptors[0], kernel_buffer,
5344 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5345 if (rc)
5346 goto out;
5347
5348 iu_length += sizeof(request.sg_descriptors[0]);
5349 }
5350
5351 put_unaligned_le16(iu_length, &request.header.iu_length);
5352
5353 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5354 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5355
5356 if (iocommand.buf_size > 0)
5357 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5358 PCI_DMA_BIDIRECTIONAL);
5359
5360 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5361
5362 if (rc == 0) {
5363 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5364 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5365 iocommand.error_info.CommandStatus =
5366 ciss_error_info.command_status;
5367 sense_data_length = ciss_error_info.sense_data_length;
5368 if (sense_data_length) {
5369 if (sense_data_length >
5370 sizeof(iocommand.error_info.SenseInfo))
5371 sense_data_length =
5372 sizeof(iocommand.error_info.SenseInfo);
5373 memcpy(iocommand.error_info.SenseInfo,
5374 pqi_error_info.data, sense_data_length);
5375 iocommand.error_info.SenseLen = sense_data_length;
5376 }
5377 }
5378
5379 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5380 rc = -EFAULT;
5381 goto out;
5382 }
5383
5384 if (rc == 0 && iocommand.buf_size > 0 &&
5385 (iocommand.Request.Type.Direction & XFER_READ)) {
5386 if (copy_to_user(iocommand.buf, kernel_buffer,
5387 iocommand.buf_size)) {
5388 rc = -EFAULT;
5389 }
5390 }
5391
5392out:
5393 kfree(kernel_buffer);
5394
5395 return rc;
5396}
5397
5398static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5399{
5400 int rc;
5401 struct pqi_ctrl_info *ctrl_info;
5402
5403 ctrl_info = shost_to_hba(sdev->host);
5404
5405 switch (cmd) {
5406 case CCISS_DEREGDISK:
5407 case CCISS_REGNEWDISK:
5408 case CCISS_REGNEWD:
5409 rc = pqi_scan_scsi_devices(ctrl_info);
5410 break;
5411 case CCISS_GETPCIINFO:
5412 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5413 break;
5414 case CCISS_GETDRIVVER:
5415 rc = pqi_getdrivver_ioctl(arg);
5416 break;
5417 case CCISS_PASSTHRU:
5418 rc = pqi_passthru_ioctl(ctrl_info, arg);
5419 break;
5420 default:
5421 rc = -EINVAL;
5422 break;
5423 }
5424
5425 return rc;
5426}
5427
5428static ssize_t pqi_version_show(struct device *dev,
5429 struct device_attribute *attr, char *buffer)
5430{
5431 ssize_t count = 0;
5432 struct Scsi_Host *shost;
5433 struct pqi_ctrl_info *ctrl_info;
5434
5435 shost = class_to_shost(dev);
5436 ctrl_info = shost_to_hba(shost);
5437
5438 count += snprintf(buffer + count, PAGE_SIZE - count,
5439 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5440
5441 count += snprintf(buffer + count, PAGE_SIZE - count,
5442 "firmware: %s\n", ctrl_info->firmware_version);
5443
5444 return count;
5445}
5446
5447static ssize_t pqi_host_rescan_store(struct device *dev,
5448 struct device_attribute *attr, const char *buffer, size_t count)
5449{
5450 struct Scsi_Host *shost = class_to_shost(dev);
5451
5452 pqi_scan_start(shost);
5453
5454 return count;
5455}
5456
5457static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5458static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5459
5460static struct device_attribute *pqi_shost_attrs[] = {
5461 &dev_attr_version,
5462 &dev_attr_rescan,
5463 NULL
5464};
5465
5466static ssize_t pqi_sas_address_show(struct device *dev,
5467 struct device_attribute *attr, char *buffer)
5468{
5469 struct pqi_ctrl_info *ctrl_info;
5470 struct scsi_device *sdev;
5471 struct pqi_scsi_dev *device;
5472 unsigned long flags;
5473 u64 sas_address;
5474
5475 sdev = to_scsi_device(dev);
5476 ctrl_info = shost_to_hba(sdev->host);
5477
5478 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5479
5480 device = sdev->hostdata;
5481 if (pqi_is_logical_device(device)) {
5482 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5483 flags);
5484 return -ENODEV;
5485 }
5486 sas_address = device->sas_address;
5487
5488 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5489
5490 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5491}
5492
5493static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5494 struct device_attribute *attr, char *buffer)
5495{
5496 struct pqi_ctrl_info *ctrl_info;
5497 struct scsi_device *sdev;
5498 struct pqi_scsi_dev *device;
5499 unsigned long flags;
5500
5501 sdev = to_scsi_device(dev);
5502 ctrl_info = shost_to_hba(sdev->host);
5503
5504 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5505
5506 device = sdev->hostdata;
5507 buffer[0] = device->offload_enabled ? '1' : '0';
5508 buffer[1] = '\n';
5509 buffer[2] = '\0';
5510
5511 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5512
5513 return 2;
5514}
5515
5516static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5517static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5518 pqi_ssd_smart_path_enabled_show, NULL);
5519
5520static struct device_attribute *pqi_sdev_attrs[] = {
5521 &dev_attr_sas_address,
5522 &dev_attr_ssd_smart_path_enabled,
5523 NULL
5524};
5525
5526static struct scsi_host_template pqi_driver_template = {
5527 .module = THIS_MODULE,
5528 .name = DRIVER_NAME_SHORT,
5529 .proc_name = DRIVER_NAME_SHORT,
5530 .queuecommand = pqi_scsi_queue_command,
5531 .scan_start = pqi_scan_start,
5532 .scan_finished = pqi_scan_finished,
5533 .this_id = -1,
5534 .use_clustering = ENABLE_CLUSTERING,
5535 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5536 .ioctl = pqi_ioctl,
5537 .slave_alloc = pqi_slave_alloc,
5538 .slave_configure = pqi_slave_configure,
52198226 5539 .map_queues = pqi_map_queues,
6c223761
KB
5540 .sdev_attrs = pqi_sdev_attrs,
5541 .shost_attrs = pqi_shost_attrs,
5542};
5543
5544static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5545{
5546 int rc;
5547 struct Scsi_Host *shost;
5548
5549 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5550 if (!shost) {
5551 dev_err(&ctrl_info->pci_dev->dev,
5552 "scsi_host_alloc failed for controller %u\n",
5553 ctrl_info->ctrl_id);
5554 return -ENOMEM;
5555 }
5556
5557 shost->io_port = 0;
5558 shost->n_io_port = 0;
5559 shost->this_id = -1;
5560 shost->max_channel = PQI_MAX_BUS;
5561 shost->max_cmd_len = MAX_COMMAND_SIZE;
5562 shost->max_lun = ~0;
5563 shost->max_id = ~0;
5564 shost->max_sectors = ctrl_info->max_sectors;
5565 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5566 shost->cmd_per_lun = shost->can_queue;
5567 shost->sg_tablesize = ctrl_info->sg_tablesize;
5568 shost->transportt = pqi_sas_transport_template;
52198226 5569 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
5570 shost->unique_id = shost->irq;
5571 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5572 shost->hostdata[0] = (unsigned long)ctrl_info;
5573
5574 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5575 if (rc) {
5576 dev_err(&ctrl_info->pci_dev->dev,
5577 "scsi_add_host failed for controller %u\n",
5578 ctrl_info->ctrl_id);
5579 goto free_host;
5580 }
5581
5582 rc = pqi_add_sas_host(shost, ctrl_info);
5583 if (rc) {
5584 dev_err(&ctrl_info->pci_dev->dev,
5585 "add SAS host failed for controller %u\n",
5586 ctrl_info->ctrl_id);
5587 goto remove_host;
5588 }
5589
5590 ctrl_info->scsi_host = shost;
5591
5592 return 0;
5593
5594remove_host:
5595 scsi_remove_host(shost);
5596free_host:
5597 scsi_host_put(shost);
5598
5599 return rc;
5600}
5601
5602static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5603{
5604 struct Scsi_Host *shost;
5605
5606 pqi_delete_sas_host(ctrl_info);
5607
5608 shost = ctrl_info->scsi_host;
5609 if (!shost)
5610 return;
5611
5612 scsi_remove_host(shost);
5613 scsi_host_put(shost);
5614}
5615
5616#define PQI_RESET_ACTION_RESET 0x1
5617
5618#define PQI_RESET_TYPE_NO_RESET 0x0
5619#define PQI_RESET_TYPE_SOFT_RESET 0x1
5620#define PQI_RESET_TYPE_FIRM_RESET 0x2
5621#define PQI_RESET_TYPE_HARD_RESET 0x3
5622
5623static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5624{
5625 int rc;
5626 u32 reset_params;
5627
5628 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5629 PQI_RESET_TYPE_HARD_RESET;
5630
5631 writel(reset_params,
5632 &ctrl_info->pqi_registers->device_reset);
5633
5634 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5635 if (rc)
5636 dev_err(&ctrl_info->pci_dev->dev,
5637 "PQI reset failed\n");
5638
5639 return rc;
5640}
5641
5642static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5643{
5644 int rc;
5645 struct bmic_identify_controller *identify;
5646
5647 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5648 if (!identify)
5649 return -ENOMEM;
5650
5651 rc = pqi_identify_controller(ctrl_info, identify);
5652 if (rc)
5653 goto out;
5654
5655 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5656 sizeof(identify->firmware_version));
5657 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5658 snprintf(ctrl_info->firmware_version +
5659 strlen(ctrl_info->firmware_version),
5660 sizeof(ctrl_info->firmware_version),
5661 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5662
5663out:
5664 kfree(identify);
5665
5666 return rc;
5667}
5668
98f87667
KB
5669static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5670{
5671 u32 table_length;
5672 u32 section_offset;
5673 void __iomem *table_iomem_addr;
5674 struct pqi_config_table *config_table;
5675 struct pqi_config_table_section_header *section;
5676
5677 table_length = ctrl_info->config_table_length;
5678
5679 config_table = kmalloc(table_length, GFP_KERNEL);
5680 if (!config_table) {
5681 dev_err(&ctrl_info->pci_dev->dev,
5682 "unable to allocate memory for PQI configuration table\n");
5683 return -ENOMEM;
5684 }
5685
5686 /*
5687 * Copy the config table contents from I/O memory space into the
5688 * temporary buffer.
5689 */
5690 table_iomem_addr = ctrl_info->iomem_base +
5691 ctrl_info->config_table_offset;
5692 memcpy_fromio(config_table, table_iomem_addr, table_length);
5693
5694 section_offset =
5695 get_unaligned_le32(&config_table->first_section_offset);
5696
5697 while (section_offset) {
5698 section = (void *)config_table + section_offset;
5699
5700 switch (get_unaligned_le16(&section->section_id)) {
5701 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5702 ctrl_info->heartbeat_counter = table_iomem_addr +
5703 section_offset +
5704 offsetof(struct pqi_config_table_heartbeat,
5705 heartbeat_counter);
5706 break;
5707 }
5708
5709 section_offset =
5710 get_unaligned_le16(&section->next_section_offset);
5711 }
5712
5713 kfree(config_table);
5714
5715 return 0;
5716}
5717
162d7753
KB
5718/* Switches the controller from PQI mode back into SIS mode. */
5719
5720static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
5721{
5722 int rc;
5723
061ef06a 5724 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
5725 rc = pqi_reset(ctrl_info);
5726 if (rc)
5727 return rc;
5728 sis_reenable_sis_mode(ctrl_info);
5729 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5730
5731 return 0;
5732}
5733
5734/*
5735 * If the controller isn't already in SIS mode, this function forces it into
5736 * SIS mode.
5737 */
5738
5739static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
5740{
5741 if (!sis_is_firmware_running(ctrl_info))
5742 return -ENXIO;
5743
162d7753
KB
5744 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
5745 return 0;
5746
5747 if (sis_is_kernel_up(ctrl_info)) {
5748 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5749 return 0;
ff6abb73
KB
5750 }
5751
162d7753 5752 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
5753}
5754
6c223761
KB
5755static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5756{
5757 int rc;
5758
162d7753
KB
5759 rc = pqi_force_sis_mode(ctrl_info);
5760 if (rc)
5761 return rc;
6c223761
KB
5762
5763 /*
5764 * Wait until the controller is ready to start accepting SIS
5765 * commands.
5766 */
5767 rc = sis_wait_for_ctrl_ready(ctrl_info);
5768 if (rc) {
5769 dev_err(&ctrl_info->pci_dev->dev,
5770 "error initializing SIS interface\n");
5771 return rc;
5772 }
5773
5774 /*
5775 * Get the controller properties. This allows us to determine
5776 * whether or not it supports PQI mode.
5777 */
5778 rc = sis_get_ctrl_properties(ctrl_info);
5779 if (rc) {
5780 dev_err(&ctrl_info->pci_dev->dev,
5781 "error obtaining controller properties\n");
5782 return rc;
5783 }
5784
5785 rc = sis_get_pqi_capabilities(ctrl_info);
5786 if (rc) {
5787 dev_err(&ctrl_info->pci_dev->dev,
5788 "error obtaining controller capabilities\n");
5789 return rc;
5790 }
5791
5792 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5793 ctrl_info->max_outstanding_requests =
5794 PQI_MAX_OUTSTANDING_REQUESTS;
5795
5796 pqi_calculate_io_resources(ctrl_info);
5797
5798 rc = pqi_alloc_error_buffer(ctrl_info);
5799 if (rc) {
5800 dev_err(&ctrl_info->pci_dev->dev,
5801 "failed to allocate PQI error buffer\n");
5802 return rc;
5803 }
5804
5805 /*
5806 * If the function we are about to call succeeds, the
5807 * controller will transition from legacy SIS mode
5808 * into PQI mode.
5809 */
5810 rc = sis_init_base_struct_addr(ctrl_info);
5811 if (rc) {
5812 dev_err(&ctrl_info->pci_dev->dev,
5813 "error initializing PQI mode\n");
5814 return rc;
5815 }
5816
5817 /* Wait for the controller to complete the SIS -> PQI transition. */
5818 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5819 if (rc) {
5820 dev_err(&ctrl_info->pci_dev->dev,
5821 "transition to PQI mode failed\n");
5822 return rc;
5823 }
5824
5825 /* From here on, we are running in PQI mode. */
5826 ctrl_info->pqi_mode_enabled = true;
ff6abb73 5827 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761 5828
98f87667
KB
5829 rc = pqi_process_config_table(ctrl_info);
5830 if (rc)
5831 return rc;
5832
6c223761
KB
5833 rc = pqi_alloc_admin_queues(ctrl_info);
5834 if (rc) {
5835 dev_err(&ctrl_info->pci_dev->dev,
5836 "error allocating admin queues\n");
5837 return rc;
5838 }
5839
5840 rc = pqi_create_admin_queues(ctrl_info);
5841 if (rc) {
5842 dev_err(&ctrl_info->pci_dev->dev,
5843 "error creating admin queues\n");
5844 return rc;
5845 }
5846
5847 rc = pqi_report_device_capability(ctrl_info);
5848 if (rc) {
5849 dev_err(&ctrl_info->pci_dev->dev,
5850 "obtaining device capability failed\n");
5851 return rc;
5852 }
5853
5854 rc = pqi_validate_device_capability(ctrl_info);
5855 if (rc)
5856 return rc;
5857
5858 pqi_calculate_queue_resources(ctrl_info);
5859
5860 rc = pqi_enable_msix_interrupts(ctrl_info);
5861 if (rc)
5862 return rc;
5863
5864 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5865 ctrl_info->max_msix_vectors =
5866 ctrl_info->num_msix_vectors_enabled;
5867 pqi_calculate_queue_resources(ctrl_info);
5868 }
5869
5870 rc = pqi_alloc_io_resources(ctrl_info);
5871 if (rc)
5872 return rc;
5873
5874 rc = pqi_alloc_operational_queues(ctrl_info);
5875 if (rc)
5876 return rc;
5877
5878 pqi_init_operational_queues(ctrl_info);
5879
5880 rc = pqi_request_irqs(ctrl_info);
5881 if (rc)
5882 return rc;
5883
6c223761
KB
5884 rc = pqi_create_queues(ctrl_info);
5885 if (rc)
5886 return rc;
5887
061ef06a
KB
5888 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
5889
5890 ctrl_info->controller_online = true;
5891 pqi_start_heartbeat_timer(ctrl_info);
6c223761 5892
6a50d6ad 5893 rc = pqi_enable_events(ctrl_info);
6c223761
KB
5894 if (rc) {
5895 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 5896 "error enabling events\n");
6c223761
KB
5897 return rc;
5898 }
5899
6c223761
KB
5900 /* Register with the SCSI subsystem. */
5901 rc = pqi_register_scsi(ctrl_info);
5902 if (rc)
5903 return rc;
5904
5905 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5906 if (rc) {
5907 dev_err(&ctrl_info->pci_dev->dev,
5908 "error obtaining firmware version\n");
5909 return rc;
5910 }
5911
5912 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5913 if (rc) {
5914 dev_err(&ctrl_info->pci_dev->dev,
5915 "error updating host wellness\n");
5916 return rc;
5917 }
5918
5919 pqi_schedule_update_time_worker(ctrl_info);
5920
5921 pqi_scan_scsi_devices(ctrl_info);
5922
5923 return 0;
5924}
5925
061ef06a
KB
5926#if defined(CONFIG_PM)
5927
5928static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
5929{
5930 unsigned int i;
5931 struct pqi_admin_queues *admin_queues;
5932 struct pqi_event_queue *event_queue;
5933
5934 admin_queues = &ctrl_info->admin_queues;
5935 admin_queues->iq_pi_copy = 0;
5936 admin_queues->oq_ci_copy = 0;
5937 *admin_queues->oq_pi = 0;
5938
5939 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5940 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
5941 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
5942 ctrl_info->queue_groups[i].oq_ci_copy = 0;
5943
5944 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
5945 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
5946 *ctrl_info->queue_groups[i].oq_pi = 0;
5947 }
5948
5949 event_queue = &ctrl_info->event_queue;
5950 *event_queue->oq_pi = 0;
5951 event_queue->oq_ci_copy = 0;
5952}
5953
5954static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
5955{
5956 int rc;
5957
5958 rc = pqi_force_sis_mode(ctrl_info);
5959 if (rc)
5960 return rc;
5961
5962 /*
5963 * Wait until the controller is ready to start accepting SIS
5964 * commands.
5965 */
5966 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
5967 if (rc)
5968 return rc;
5969
5970 /*
5971 * If the function we are about to call succeeds, the
5972 * controller will transition from legacy SIS mode
5973 * into PQI mode.
5974 */
5975 rc = sis_init_base_struct_addr(ctrl_info);
5976 if (rc) {
5977 dev_err(&ctrl_info->pci_dev->dev,
5978 "error initializing PQI mode\n");
5979 return rc;
5980 }
5981
5982 /* Wait for the controller to complete the SIS -> PQI transition. */
5983 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5984 if (rc) {
5985 dev_err(&ctrl_info->pci_dev->dev,
5986 "transition to PQI mode failed\n");
5987 return rc;
5988 }
5989
5990 /* From here on, we are running in PQI mode. */
5991 ctrl_info->pqi_mode_enabled = true;
5992 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
5993
5994 pqi_reinit_queues(ctrl_info);
5995
5996 rc = pqi_create_admin_queues(ctrl_info);
5997 if (rc) {
5998 dev_err(&ctrl_info->pci_dev->dev,
5999 "error creating admin queues\n");
6000 return rc;
6001 }
6002
6003 rc = pqi_create_queues(ctrl_info);
6004 if (rc)
6005 return rc;
6006
6007 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6008
6009 ctrl_info->controller_online = true;
6010 pqi_start_heartbeat_timer(ctrl_info);
6011 pqi_ctrl_unblock_requests(ctrl_info);
6012
6013 rc = pqi_enable_events(ctrl_info);
6014 if (rc) {
6015 dev_err(&ctrl_info->pci_dev->dev,
6016 "error configuring events\n");
6017 return rc;
6018 }
6019
6020 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6021 if (rc) {
6022 dev_err(&ctrl_info->pci_dev->dev,
6023 "error updating host wellness\n");
6024 return rc;
6025 }
6026
6027 pqi_schedule_update_time_worker(ctrl_info);
6028
6029 pqi_scan_scsi_devices(ctrl_info);
6030
6031 return 0;
6032}
6033
6034#endif /* CONFIG_PM */
6035
a81ed5f3
KB
6036static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6037 u16 timeout)
6038{
6039 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6040 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6041}
6042
6c223761
KB
6043static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6044{
6045 int rc;
6046 u64 mask;
6047
6048 rc = pci_enable_device(ctrl_info->pci_dev);
6049 if (rc) {
6050 dev_err(&ctrl_info->pci_dev->dev,
6051 "failed to enable PCI device\n");
6052 return rc;
6053 }
6054
6055 if (sizeof(dma_addr_t) > 4)
6056 mask = DMA_BIT_MASK(64);
6057 else
6058 mask = DMA_BIT_MASK(32);
6059
6060 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6061 if (rc) {
6062 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6063 goto disable_device;
6064 }
6065
6066 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6067 if (rc) {
6068 dev_err(&ctrl_info->pci_dev->dev,
6069 "failed to obtain PCI resources\n");
6070 goto disable_device;
6071 }
6072
6073 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6074 ctrl_info->pci_dev, 0),
6075 sizeof(struct pqi_ctrl_registers));
6076 if (!ctrl_info->iomem_base) {
6077 dev_err(&ctrl_info->pci_dev->dev,
6078 "failed to map memory for controller registers\n");
6079 rc = -ENOMEM;
6080 goto release_regions;
6081 }
6082
6083 ctrl_info->registers = ctrl_info->iomem_base;
6084 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6085
a81ed5f3
KB
6086#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6087
6088 /* Increase the PCIe completion timeout. */
6089 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6090 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6091 if (rc) {
6092 dev_err(&ctrl_info->pci_dev->dev,
6093 "failed to set PCIe completion timeout\n");
6094 goto release_regions;
6095 }
6096
6c223761
KB
6097 /* Enable bus mastering. */
6098 pci_set_master(ctrl_info->pci_dev);
6099
6100 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6101
6102 return 0;
6103
6104release_regions:
6105 pci_release_regions(ctrl_info->pci_dev);
6106disable_device:
6107 pci_disable_device(ctrl_info->pci_dev);
6108
6109 return rc;
6110}
6111
6112static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6113{
6114 iounmap(ctrl_info->iomem_base);
6115 pci_release_regions(ctrl_info->pci_dev);
6116 pci_disable_device(ctrl_info->pci_dev);
6117 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6118}
6119
6120static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6121{
6122 struct pqi_ctrl_info *ctrl_info;
6123
6124 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6125 GFP_KERNEL, numa_node);
6126 if (!ctrl_info)
6127 return NULL;
6128
6129 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 6130 mutex_init(&ctrl_info->lun_reset_mutex);
6c223761
KB
6131
6132 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6133 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6134
6135 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6136 atomic_set(&ctrl_info->num_interrupts, 0);
6137
6138 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6139 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6140
98f87667
KB
6141 init_timer(&ctrl_info->heartbeat_timer);
6142
6c223761
KB
6143 sema_init(&ctrl_info->sync_request_sem,
6144 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 6145 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761
KB
6146
6147 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 6148 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
6149 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6150
6151 return ctrl_info;
6152}
6153
6154static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6155{
6156 kfree(ctrl_info);
6157}
6158
6159static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6160{
98bf061b
KB
6161 pqi_free_irqs(ctrl_info);
6162 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
6163}
6164
6165static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6166{
6167 pqi_stop_heartbeat_timer(ctrl_info);
6168 pqi_free_interrupts(ctrl_info);
6169 if (ctrl_info->queue_memory_base)
6170 dma_free_coherent(&ctrl_info->pci_dev->dev,
6171 ctrl_info->queue_memory_length,
6172 ctrl_info->queue_memory_base,
6173 ctrl_info->queue_memory_base_dma_handle);
6174 if (ctrl_info->admin_queue_memory_base)
6175 dma_free_coherent(&ctrl_info->pci_dev->dev,
6176 ctrl_info->admin_queue_memory_length,
6177 ctrl_info->admin_queue_memory_base,
6178 ctrl_info->admin_queue_memory_base_dma_handle);
6179 pqi_free_all_io_requests(ctrl_info);
6180 if (ctrl_info->error_buffer)
6181 dma_free_coherent(&ctrl_info->pci_dev->dev,
6182 ctrl_info->error_buffer_length,
6183 ctrl_info->error_buffer,
6184 ctrl_info->error_buffer_dma_handle);
6185 if (ctrl_info->iomem_base)
6186 pqi_cleanup_pci_init(ctrl_info);
6187 pqi_free_ctrl_info(ctrl_info);
6188}
6189
6190static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6191{
061ef06a
KB
6192 pqi_cancel_rescan_worker(ctrl_info);
6193 pqi_cancel_update_time_worker(ctrl_info);
e57a1f9b
KB
6194 pqi_remove_all_scsi_devices(ctrl_info);
6195 pqi_unregister_scsi(ctrl_info);
162d7753
KB
6196 if (ctrl_info->pqi_mode_enabled)
6197 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
6198 pqi_free_ctrl_resources(ctrl_info);
6199}
6200
6201static void pqi_print_ctrl_info(struct pci_dev *pdev,
6202 const struct pci_device_id *id)
6203{
6204 char *ctrl_description;
6205
6206 if (id->driver_data) {
6207 ctrl_description = (char *)id->driver_data;
6208 } else {
6209 switch (id->subvendor) {
6210 case PCI_VENDOR_ID_HP:
6211 ctrl_description = hpe_branded_controller;
6212 break;
6213 case PCI_VENDOR_ID_ADAPTEC2:
6214 default:
6215 ctrl_description = microsemi_branded_controller;
6216 break;
6217 }
6218 }
6219
6220 dev_info(&pdev->dev, "%s found\n", ctrl_description);
6221}
6222
6223static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6224{
6225 int rc;
6226 int node;
6227 struct pqi_ctrl_info *ctrl_info;
6228
6229 pqi_print_ctrl_info(pdev, id);
6230
6231 if (pqi_disable_device_id_wildcards &&
6232 id->subvendor == PCI_ANY_ID &&
6233 id->subdevice == PCI_ANY_ID) {
6234 dev_warn(&pdev->dev,
6235 "controller not probed because device ID wildcards are disabled\n");
6236 return -ENODEV;
6237 }
6238
6239 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
6240 dev_warn(&pdev->dev,
6241 "controller device ID matched using wildcards\n");
6242
6243 node = dev_to_node(&pdev->dev);
6244 if (node == NUMA_NO_NODE)
6245 set_dev_node(&pdev->dev, 0);
6246
6247 ctrl_info = pqi_alloc_ctrl_info(node);
6248 if (!ctrl_info) {
6249 dev_err(&pdev->dev,
6250 "failed to allocate controller info block\n");
6251 return -ENOMEM;
6252 }
6253
6254 ctrl_info->pci_dev = pdev;
6255
6256 rc = pqi_pci_init(ctrl_info);
6257 if (rc)
6258 goto error;
6259
6260 rc = pqi_ctrl_init(ctrl_info);
6261 if (rc)
6262 goto error;
6263
6264 return 0;
6265
6266error:
6267 pqi_remove_ctrl(ctrl_info);
6268
6269 return rc;
6270}
6271
6272static void pqi_pci_remove(struct pci_dev *pdev)
6273{
6274 struct pqi_ctrl_info *ctrl_info;
6275
6276 ctrl_info = pci_get_drvdata(pdev);
6277 if (!ctrl_info)
6278 return;
6279
6280 pqi_remove_ctrl(ctrl_info);
6281}
6282
6283static void pqi_shutdown(struct pci_dev *pdev)
6284{
6285 int rc;
6286 struct pqi_ctrl_info *ctrl_info;
6287
6288 ctrl_info = pci_get_drvdata(pdev);
6289 if (!ctrl_info)
6290 goto error;
6291
6292 /*
6293 * Write all data in the controller's battery-backed cache to
6294 * storage.
6295 */
6296 rc = pqi_flush_cache(ctrl_info);
6297 if (rc == 0)
6298 return;
6299
6300error:
6301 dev_warn(&pdev->dev,
6302 "unable to flush controller cache\n");
6303}
6304
061ef06a
KB
6305#if defined(CONFIG_PM)
6306
6307static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
6308{
6309 struct pqi_ctrl_info *ctrl_info;
6310
6311 ctrl_info = pci_get_drvdata(pci_dev);
6312
6313 pqi_disable_events(ctrl_info);
6314 pqi_cancel_update_time_worker(ctrl_info);
6315 pqi_cancel_rescan_worker(ctrl_info);
6316 pqi_wait_until_scan_finished(ctrl_info);
6317 pqi_wait_until_lun_reset_finished(ctrl_info);
6318 pqi_flush_cache(ctrl_info);
6319 pqi_ctrl_block_requests(ctrl_info);
6320 pqi_ctrl_wait_until_quiesced(ctrl_info);
6321 pqi_wait_until_inbound_queues_empty(ctrl_info);
6322 pqi_ctrl_wait_for_pending_io(ctrl_info);
6323 pqi_stop_heartbeat_timer(ctrl_info);
6324
6325 if (state.event == PM_EVENT_FREEZE)
6326 return 0;
6327
6328 pci_save_state(pci_dev);
6329 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6330
6331 ctrl_info->controller_online = false;
6332 ctrl_info->pqi_mode_enabled = false;
6333
6334 return 0;
6335}
6336
6337static int pqi_resume(struct pci_dev *pci_dev)
6338{
6339 int rc;
6340 struct pqi_ctrl_info *ctrl_info;
6341
6342 ctrl_info = pci_get_drvdata(pci_dev);
6343
6344 if (pci_dev->current_state != PCI_D0) {
6345 ctrl_info->max_hw_queue_index = 0;
6346 pqi_free_interrupts(ctrl_info);
6347 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6348 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6349 IRQF_SHARED, DRIVER_NAME_SHORT,
6350 &ctrl_info->queue_groups[0]);
6351 if (rc) {
6352 dev_err(&ctrl_info->pci_dev->dev,
6353 "irq %u init failed with error %d\n",
6354 pci_dev->irq, rc);
6355 return rc;
6356 }
6357 pqi_start_heartbeat_timer(ctrl_info);
6358 pqi_ctrl_unblock_requests(ctrl_info);
6359 return 0;
6360 }
6361
6362 pci_set_power_state(pci_dev, PCI_D0);
6363 pci_restore_state(pci_dev);
6364
6365 return pqi_ctrl_init_resume(ctrl_info);
6366}
6367
6368#endif /* CONFIG_PM */
6369
6c223761
KB
6370/* Define the PCI IDs for the controllers that we support. */
6371static const struct pci_device_id pqi_pci_id_table[] = {
6372 {
6373 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6374 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6375 },
6376 {
6377 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6378 PCI_VENDOR_ID_HP, 0x0600)
6379 },
6380 {
6381 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6382 PCI_VENDOR_ID_HP, 0x0601)
6383 },
6384 {
6385 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6386 PCI_VENDOR_ID_HP, 0x0602)
6387 },
6388 {
6389 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6390 PCI_VENDOR_ID_HP, 0x0603)
6391 },
6392 {
6393 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6394 PCI_VENDOR_ID_HP, 0x0650)
6395 },
6396 {
6397 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6398 PCI_VENDOR_ID_HP, 0x0651)
6399 },
6400 {
6401 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6402 PCI_VENDOR_ID_HP, 0x0652)
6403 },
6404 {
6405 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6406 PCI_VENDOR_ID_HP, 0x0653)
6407 },
6408 {
6409 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6410 PCI_VENDOR_ID_HP, 0x0654)
6411 },
6412 {
6413 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6414 PCI_VENDOR_ID_HP, 0x0655)
6415 },
6416 {
6417 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6418 PCI_VENDOR_ID_HP, 0x0700)
6419 },
6420 {
6421 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6422 PCI_VENDOR_ID_HP, 0x0701)
6423 },
6424 {
6425 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6426 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6427 },
6428 {
6429 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6430 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6431 },
6432 {
6433 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6434 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6435 },
6436 {
6437 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6438 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6439 },
6440 {
6441 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6442 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6443 },
6444 {
6445 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6446 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6447 },
6448 {
6449 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6450 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6451 },
6452 {
6453 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6454 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6455 },
6456 {
6457 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6458 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6459 },
6460 {
6461 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6462 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6463 },
6464 {
6465 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6466 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6467 },
6468 {
6469 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6470 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6471 },
6472 {
6473 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6474 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6475 },
6476 {
6477 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6478 PCI_VENDOR_ID_HP, 0x1001)
6479 },
6480 {
6481 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6482 PCI_VENDOR_ID_HP, 0x1100)
6483 },
6484 {
6485 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6486 PCI_VENDOR_ID_HP, 0x1101)
6487 },
6488 {
6489 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6490 PCI_VENDOR_ID_HP, 0x1102)
6491 },
6492 {
6493 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6494 PCI_VENDOR_ID_HP, 0x1150)
6495 },
6496 {
6497 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6498 PCI_ANY_ID, PCI_ANY_ID)
6499 },
6500 { 0 }
6501};
6502
6503MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
6504
6505static struct pci_driver pqi_pci_driver = {
6506 .name = DRIVER_NAME_SHORT,
6507 .id_table = pqi_pci_id_table,
6508 .probe = pqi_pci_probe,
6509 .remove = pqi_pci_remove,
6510 .shutdown = pqi_shutdown,
061ef06a
KB
6511#if defined(CONFIG_PM)
6512 .suspend = pqi_suspend,
6513 .resume = pqi_resume,
6514#endif
6c223761
KB
6515};
6516
6517static int __init pqi_init(void)
6518{
6519 int rc;
6520
6521 pr_info(DRIVER_NAME "\n");
6522
6523 pqi_sas_transport_template =
6524 sas_attach_transport(&pqi_sas_transport_functions);
6525 if (!pqi_sas_transport_template)
6526 return -ENODEV;
6527
6528 rc = pci_register_driver(&pqi_pci_driver);
6529 if (rc)
6530 sas_release_transport(pqi_sas_transport_template);
6531
6532 return rc;
6533}
6534
6535static void __exit pqi_cleanup(void)
6536{
6537 pci_unregister_driver(&pqi_pci_driver);
6538 sas_release_transport(pqi_sas_transport_template);
6539}
6540
6541module_init(pqi_init);
6542module_exit(pqi_cleanup);
6543
6544static void __attribute__((unused)) verify_structures(void)
6545{
6546 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6547 sis_host_to_ctrl_doorbell) != 0x20);
6548 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6549 sis_interrupt_mask) != 0x34);
6550 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6551 sis_ctrl_to_host_doorbell) != 0x9c);
6552 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6553 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
6554 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6555 sis_driver_scratch) != 0xb0);
6c223761
KB
6556 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6557 sis_firmware_status) != 0xbc);
6558 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6559 sis_mailbox) != 0x1000);
6560 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6561 pqi_registers) != 0x4000);
6562
6563 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6564 iu_type) != 0x0);
6565 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6566 iu_length) != 0x2);
6567 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6568 response_queue_id) != 0x4);
6569 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6570 work_area) != 0x6);
6571 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
6572
6573 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6574 status) != 0x0);
6575 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6576 service_response) != 0x1);
6577 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6578 data_present) != 0x2);
6579 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6580 reserved) != 0x3);
6581 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6582 residual_count) != 0x4);
6583 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6584 data_length) != 0x8);
6585 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6586 reserved1) != 0xa);
6587 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6588 data) != 0xc);
6589 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
6590
6591 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6592 data_in_result) != 0x0);
6593 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6594 data_out_result) != 0x1);
6595 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6596 reserved) != 0x2);
6597 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6598 status) != 0x5);
6599 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6600 status_qualifier) != 0x6);
6601 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6602 sense_data_length) != 0x8);
6603 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6604 response_data_length) != 0xa);
6605 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6606 data_in_transferred) != 0xc);
6607 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6608 data_out_transferred) != 0x10);
6609 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6610 data) != 0x14);
6611 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
6612
6613 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6614 signature) != 0x0);
6615 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6616 function_and_status_code) != 0x8);
6617 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6618 max_admin_iq_elements) != 0x10);
6619 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6620 max_admin_oq_elements) != 0x11);
6621 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6622 admin_iq_element_length) != 0x12);
6623 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6624 admin_oq_element_length) != 0x13);
6625 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6626 max_reset_timeout) != 0x14);
6627 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6628 legacy_intx_status) != 0x18);
6629 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6630 legacy_intx_mask_set) != 0x1c);
6631 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6632 legacy_intx_mask_clear) != 0x20);
6633 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6634 device_status) != 0x40);
6635 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6636 admin_iq_pi_offset) != 0x48);
6637 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6638 admin_oq_ci_offset) != 0x50);
6639 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6640 admin_iq_element_array_addr) != 0x58);
6641 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6642 admin_oq_element_array_addr) != 0x60);
6643 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6644 admin_iq_ci_addr) != 0x68);
6645 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6646 admin_oq_pi_addr) != 0x70);
6647 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6648 admin_iq_num_elements) != 0x78);
6649 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6650 admin_oq_num_elements) != 0x79);
6651 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6652 admin_queue_int_msg_num) != 0x7a);
6653 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6654 device_error) != 0x80);
6655 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6656 error_details) != 0x88);
6657 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6658 device_reset) != 0x90);
6659 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6660 power_action) != 0x94);
6661 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6662
6663 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6664 header.iu_type) != 0);
6665 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6666 header.iu_length) != 2);
6667 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6668 header.work_area) != 6);
6669 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6670 request_id) != 8);
6671 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6672 function_code) != 10);
6673 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6674 data.report_device_capability.buffer_length) != 44);
6675 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6676 data.report_device_capability.sg_descriptor) != 48);
6677 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6678 data.create_operational_iq.queue_id) != 12);
6679 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6680 data.create_operational_iq.element_array_addr) != 16);
6681 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6682 data.create_operational_iq.ci_addr) != 24);
6683 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6684 data.create_operational_iq.num_elements) != 32);
6685 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6686 data.create_operational_iq.element_length) != 34);
6687 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6688 data.create_operational_iq.queue_protocol) != 36);
6689 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6690 data.create_operational_oq.queue_id) != 12);
6691 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6692 data.create_operational_oq.element_array_addr) != 16);
6693 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6694 data.create_operational_oq.pi_addr) != 24);
6695 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6696 data.create_operational_oq.num_elements) != 32);
6697 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6698 data.create_operational_oq.element_length) != 34);
6699 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6700 data.create_operational_oq.queue_protocol) != 36);
6701 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6702 data.create_operational_oq.int_msg_num) != 40);
6703 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6704 data.create_operational_oq.coalescing_count) != 42);
6705 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6706 data.create_operational_oq.min_coalescing_time) != 44);
6707 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6708 data.create_operational_oq.max_coalescing_time) != 48);
6709 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6710 data.delete_operational_queue.queue_id) != 12);
6711 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6712 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6713 data.create_operational_iq) != 64 - 11);
6714 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6715 data.create_operational_oq) != 64 - 11);
6716 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6717 data.delete_operational_queue) != 64 - 11);
6718
6719 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6720 header.iu_type) != 0);
6721 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6722 header.iu_length) != 2);
6723 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6724 header.work_area) != 6);
6725 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6726 request_id) != 8);
6727 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6728 function_code) != 10);
6729 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6730 status) != 11);
6731 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6732 data.create_operational_iq.status_descriptor) != 12);
6733 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6734 data.create_operational_iq.iq_pi_offset) != 16);
6735 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6736 data.create_operational_oq.status_descriptor) != 12);
6737 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6738 data.create_operational_oq.oq_ci_offset) != 16);
6739 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6740
6741 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6742 header.iu_type) != 0);
6743 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6744 header.iu_length) != 2);
6745 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6746 header.response_queue_id) != 4);
6747 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6748 header.work_area) != 6);
6749 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6750 request_id) != 8);
6751 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6752 nexus_id) != 10);
6753 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6754 buffer_length) != 12);
6755 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6756 lun_number) != 16);
6757 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6758 protocol_specific) != 24);
6759 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6760 error_index) != 27);
6761 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6762 cdb) != 32);
6763 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6764 sg_descriptors) != 64);
6765 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6766 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6767
6768 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6769 header.iu_type) != 0);
6770 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6771 header.iu_length) != 2);
6772 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6773 header.response_queue_id) != 4);
6774 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6775 header.work_area) != 6);
6776 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6777 request_id) != 8);
6778 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6779 nexus_id) != 12);
6780 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6781 buffer_length) != 16);
6782 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6783 data_encryption_key_index) != 22);
6784 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6785 encrypt_tweak_lower) != 24);
6786 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6787 encrypt_tweak_upper) != 28);
6788 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6789 cdb) != 32);
6790 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6791 error_index) != 48);
6792 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6793 num_sg_descriptors) != 50);
6794 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6795 cdb_length) != 51);
6796 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6797 lun_number) != 52);
6798 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6799 sg_descriptors) != 64);
6800 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6801 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6802
6803 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6804 header.iu_type) != 0);
6805 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6806 header.iu_length) != 2);
6807 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6808 request_id) != 8);
6809 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6810 error_index) != 10);
6811
6812 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6813 header.iu_type) != 0);
6814 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6815 header.iu_length) != 2);
6816 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6817 header.response_queue_id) != 4);
6818 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6819 request_id) != 8);
6820 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6821 data.report_event_configuration.buffer_length) != 12);
6822 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6823 data.report_event_configuration.sg_descriptors) != 16);
6824 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6825 data.set_event_configuration.global_event_oq_id) != 10);
6826 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6827 data.set_event_configuration.buffer_length) != 12);
6828 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6829 data.set_event_configuration.sg_descriptors) != 16);
6830
6831 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6832 max_inbound_iu_length) != 6);
6833 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6834 max_outbound_iu_length) != 14);
6835 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6836
6837 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6838 data_length) != 0);
6839 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6840 iq_arbitration_priority_support_bitmask) != 8);
6841 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6842 maximum_aw_a) != 9);
6843 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6844 maximum_aw_b) != 10);
6845 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6846 maximum_aw_c) != 11);
6847 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6848 max_inbound_queues) != 16);
6849 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6850 max_elements_per_iq) != 18);
6851 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6852 max_iq_element_length) != 24);
6853 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6854 min_iq_element_length) != 26);
6855 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6856 max_outbound_queues) != 30);
6857 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6858 max_elements_per_oq) != 32);
6859 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6860 intr_coalescing_time_granularity) != 34);
6861 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6862 max_oq_element_length) != 36);
6863 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6864 min_oq_element_length) != 38);
6865 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6866 iu_layer_descriptors) != 64);
6867 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6868
6869 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6870 event_type) != 0);
6871 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6872 oq_id) != 2);
6873 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6874
6875 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6876 num_event_descriptors) != 2);
6877 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6878 descriptors) != 4);
6879
061ef06a
KB
6880 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
6881 ARRAY_SIZE(pqi_supported_event_types));
6882
6c223761
KB
6883 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6884 header.iu_type) != 0);
6885 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6886 header.iu_length) != 2);
6887 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6888 event_type) != 8);
6889 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6890 event_id) != 10);
6891 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6892 additional_event_id) != 12);
6893 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6894 data) != 16);
6895 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6896
6897 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6898 header.iu_type) != 0);
6899 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6900 header.iu_length) != 2);
6901 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6902 event_type) != 8);
6903 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6904 event_id) != 10);
6905 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6906 additional_event_id) != 12);
6907 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6908
6909 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6910 header.iu_type) != 0);
6911 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6912 header.iu_length) != 2);
6913 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6914 request_id) != 8);
6915 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6916 nexus_id) != 10);
6917 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6918 lun_number) != 16);
6919 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6920 protocol_specific) != 24);
6921 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6922 outbound_queue_id_to_manage) != 26);
6923 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6924 request_id_to_manage) != 28);
6925 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6926 task_management_function) != 30);
6927 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6928
6929 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6930 header.iu_type) != 0);
6931 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6932 header.iu_length) != 2);
6933 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6934 request_id) != 8);
6935 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6936 nexus_id) != 10);
6937 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6938 additional_response_info) != 12);
6939 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6940 response_code) != 15);
6941 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6942
6943 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6944 configured_logical_drive_count) != 0);
6945 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6946 configuration_signature) != 1);
6947 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6948 firmware_version) != 5);
6949 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6950 extended_logical_unit_count) != 154);
6951 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6952 firmware_build_number) != 190);
6953 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6954 controller_mode) != 292);
6955
6956 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6957 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6958 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6959 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6960 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6961 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6962 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6963 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6964 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6965 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6966 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6967 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6968
6969 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6970}