]>
Commit | Line | Data |
---|---|---|
6c223761 KB |
1 | /* |
2 | * driver for Microsemi PQI-based storage controllers | |
b805dbfe | 3 | * Copyright (c) 2016-2017 Microsemi Corporation |
6c223761 KB |
4 | * Copyright (c) 2016 PMC-Sierra, Inc. |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
13 | * NON INFRINGEMENT. See the GNU General Public License for more details. | |
14 | * | |
15 | * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com | |
16 | * | |
17 | */ | |
18 | ||
19 | #include <linux/module.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/pci.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/sched.h> | |
25 | #include <linux/rtc.h> | |
26 | #include <linux/bcd.h> | |
3c50976f | 27 | #include <linux/reboot.h> |
6c223761 | 28 | #include <linux/cciss_ioctl.h> |
52198226 | 29 | #include <linux/blk-mq-pci.h> |
6c223761 KB |
30 | #include <scsi/scsi_host.h> |
31 | #include <scsi/scsi_cmnd.h> | |
32 | #include <scsi/scsi_device.h> | |
33 | #include <scsi/scsi_eh.h> | |
34 | #include <scsi/scsi_transport_sas.h> | |
35 | #include <asm/unaligned.h> | |
36 | #include "smartpqi.h" | |
37 | #include "smartpqi_sis.h" | |
38 | ||
39 | #if !defined(BUILD_TIMESTAMP) | |
40 | #define BUILD_TIMESTAMP | |
41 | #endif | |
42 | ||
699bed75 | 43 | #define DRIVER_VERSION "0.9.13-370" |
6c223761 KB |
44 | #define DRIVER_MAJOR 0 |
45 | #define DRIVER_MINOR 9 | |
699bed75 KB |
46 | #define DRIVER_RELEASE 13 |
47 | #define DRIVER_REVISION 370 | |
6c223761 KB |
48 | |
49 | #define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")" | |
50 | #define DRIVER_NAME_SHORT "smartpqi" | |
51 | ||
e1d213bd KB |
52 | #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) |
53 | ||
6c223761 KB |
54 | MODULE_AUTHOR("Microsemi"); |
55 | MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " | |
56 | DRIVER_VERSION); | |
57 | MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); | |
58 | MODULE_VERSION(DRIVER_VERSION); | |
59 | MODULE_LICENSE("GPL"); | |
60 | ||
6c223761 KB |
61 | static char *hpe_branded_controller = "HPE Smart Array Controller"; |
62 | static char *microsemi_branded_controller = "Microsemi Smart Family Controller"; | |
63 | ||
64 | static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); | |
5f310425 | 65 | static void pqi_ctrl_offline_worker(struct work_struct *work); |
376fb880 | 66 | static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); |
6c223761 KB |
67 | static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); |
68 | static void pqi_scan_start(struct Scsi_Host *shost); | |
69 | static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, | |
70 | struct pqi_queue_group *queue_group, enum pqi_io_path path, | |
71 | struct pqi_io_request *io_request); | |
72 | static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, | |
73 | struct pqi_iu_header *request, unsigned int flags, | |
74 | struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); | |
75 | static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, | |
76 | struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, | |
77 | unsigned int cdb_length, struct pqi_queue_group *queue_group, | |
376fb880 | 78 | struct pqi_encryption_info *encryption_info, bool raid_bypass); |
6c223761 KB |
79 | |
80 | /* for flags argument to pqi_submit_raid_request_synchronous() */ | |
81 | #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 | |
82 | ||
83 | static struct scsi_transport_template *pqi_sas_transport_template; | |
84 | ||
85 | static atomic_t pqi_controller_count = ATOMIC_INIT(0); | |
86 | ||
3c50976f KB |
87 | enum pqi_lockup_action { |
88 | NONE, | |
89 | REBOOT, | |
90 | PANIC | |
91 | }; | |
92 | ||
93 | static enum pqi_lockup_action pqi_lockup_action = NONE; | |
94 | ||
95 | static struct { | |
96 | enum pqi_lockup_action action; | |
97 | char *name; | |
98 | } pqi_lockup_actions[] = { | |
99 | { | |
100 | .action = NONE, | |
101 | .name = "none", | |
102 | }, | |
103 | { | |
104 | .action = REBOOT, | |
105 | .name = "reboot", | |
106 | }, | |
107 | { | |
108 | .action = PANIC, | |
109 | .name = "panic", | |
110 | }, | |
111 | }; | |
112 | ||
6a50d6ad KB |
113 | static unsigned int pqi_supported_event_types[] = { |
114 | PQI_EVENT_TYPE_HOTPLUG, | |
115 | PQI_EVENT_TYPE_HARDWARE, | |
116 | PQI_EVENT_TYPE_PHYSICAL_DEVICE, | |
117 | PQI_EVENT_TYPE_LOGICAL_DEVICE, | |
118 | PQI_EVENT_TYPE_AIO_STATE_CHANGE, | |
119 | PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, | |
120 | }; | |
121 | ||
6c223761 KB |
122 | static int pqi_disable_device_id_wildcards; |
123 | module_param_named(disable_device_id_wildcards, | |
cbe0c7b1 | 124 | pqi_disable_device_id_wildcards, int, 0644); |
6c223761 KB |
125 | MODULE_PARM_DESC(disable_device_id_wildcards, |
126 | "Disable device ID wildcards."); | |
127 | ||
3c50976f KB |
128 | static char *pqi_lockup_action_param; |
129 | module_param_named(lockup_action, | |
130 | pqi_lockup_action_param, charp, 0644); | |
131 | MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" | |
132 | "\t\tSupported: none, reboot, panic\n" | |
133 | "\t\tDefault: none"); | |
134 | ||
6c223761 KB |
135 | static char *raid_levels[] = { |
136 | "RAID-0", | |
137 | "RAID-4", | |
138 | "RAID-1(1+0)", | |
139 | "RAID-5", | |
140 | "RAID-5+1", | |
141 | "RAID-ADG", | |
142 | "RAID-1(ADM)", | |
143 | }; | |
144 | ||
145 | static char *pqi_raid_level_to_string(u8 raid_level) | |
146 | { | |
147 | if (raid_level < ARRAY_SIZE(raid_levels)) | |
148 | return raid_levels[raid_level]; | |
149 | ||
150 | return ""; | |
151 | } | |
152 | ||
153 | #define SA_RAID_0 0 | |
154 | #define SA_RAID_4 1 | |
155 | #define SA_RAID_1 2 /* also used for RAID 10 */ | |
156 | #define SA_RAID_5 3 /* also used for RAID 50 */ | |
157 | #define SA_RAID_51 4 | |
158 | #define SA_RAID_6 5 /* also used for RAID 60 */ | |
159 | #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ | |
160 | #define SA_RAID_MAX SA_RAID_ADM | |
161 | #define SA_RAID_UNKNOWN 0xff | |
162 | ||
163 | static inline void pqi_scsi_done(struct scsi_cmnd *scmd) | |
164 | { | |
7561a7e4 | 165 | pqi_prep_for_scsi_done(scmd); |
6c223761 KB |
166 | scmd->scsi_done(scmd); |
167 | } | |
168 | ||
169 | static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) | |
170 | { | |
171 | return memcmp(scsi3addr1, scsi3addr2, 8) == 0; | |
172 | } | |
173 | ||
174 | static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) | |
175 | { | |
176 | void *hostdata = shost_priv(shost); | |
177 | ||
178 | return *((struct pqi_ctrl_info **)hostdata); | |
179 | } | |
180 | ||
181 | static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) | |
182 | { | |
183 | return !device->is_physical_device; | |
184 | } | |
185 | ||
bd10cf0b KB |
186 | static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) |
187 | { | |
188 | return scsi3addr[2] != 0; | |
189 | } | |
190 | ||
6c223761 KB |
191 | static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) |
192 | { | |
193 | return !ctrl_info->controller_online; | |
194 | } | |
195 | ||
196 | static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) | |
197 | { | |
198 | if (ctrl_info->controller_online) | |
199 | if (!sis_is_firmware_running(ctrl_info)) | |
200 | pqi_take_ctrl_offline(ctrl_info); | |
201 | } | |
202 | ||
203 | static inline bool pqi_is_hba_lunid(u8 *scsi3addr) | |
204 | { | |
205 | return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); | |
206 | } | |
207 | ||
ff6abb73 KB |
208 | static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( |
209 | struct pqi_ctrl_info *ctrl_info) | |
210 | { | |
211 | return sis_read_driver_scratch(ctrl_info); | |
212 | } | |
213 | ||
214 | static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, | |
215 | enum pqi_ctrl_mode mode) | |
216 | { | |
217 | sis_write_driver_scratch(ctrl_info, mode); | |
218 | } | |
219 | ||
7561a7e4 KB |
220 | static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) |
221 | { | |
222 | ctrl_info->block_requests = true; | |
223 | scsi_block_requests(ctrl_info->scsi_host); | |
224 | } | |
225 | ||
226 | static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) | |
227 | { | |
228 | ctrl_info->block_requests = false; | |
229 | wake_up_all(&ctrl_info->block_requests_wait); | |
376fb880 | 230 | pqi_retry_raid_bypass_requests(ctrl_info); |
7561a7e4 KB |
231 | scsi_unblock_requests(ctrl_info->scsi_host); |
232 | } | |
233 | ||
234 | static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) | |
235 | { | |
236 | return ctrl_info->block_requests; | |
237 | } | |
238 | ||
239 | static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, | |
240 | unsigned long timeout_msecs) | |
241 | { | |
242 | unsigned long remaining_msecs; | |
243 | ||
244 | if (!pqi_ctrl_blocked(ctrl_info)) | |
245 | return timeout_msecs; | |
246 | ||
247 | atomic_inc(&ctrl_info->num_blocked_threads); | |
248 | ||
249 | if (timeout_msecs == NO_TIMEOUT) { | |
250 | wait_event(ctrl_info->block_requests_wait, | |
251 | !pqi_ctrl_blocked(ctrl_info)); | |
252 | remaining_msecs = timeout_msecs; | |
253 | } else { | |
254 | unsigned long remaining_jiffies; | |
255 | ||
256 | remaining_jiffies = | |
257 | wait_event_timeout(ctrl_info->block_requests_wait, | |
258 | !pqi_ctrl_blocked(ctrl_info), | |
259 | msecs_to_jiffies(timeout_msecs)); | |
260 | remaining_msecs = jiffies_to_msecs(remaining_jiffies); | |
261 | } | |
262 | ||
263 | atomic_dec(&ctrl_info->num_blocked_threads); | |
264 | ||
265 | return remaining_msecs; | |
266 | } | |
267 | ||
268 | static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) | |
269 | { | |
270 | atomic_inc(&ctrl_info->num_busy_threads); | |
271 | } | |
272 | ||
273 | static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) | |
274 | { | |
275 | atomic_dec(&ctrl_info->num_busy_threads); | |
276 | } | |
277 | ||
278 | static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) | |
279 | { | |
280 | while (atomic_read(&ctrl_info->num_busy_threads) > | |
281 | atomic_read(&ctrl_info->num_blocked_threads)) | |
282 | usleep_range(1000, 2000); | |
283 | } | |
284 | ||
03b288cf KB |
285 | static inline bool pqi_device_offline(struct pqi_scsi_dev *device) |
286 | { | |
287 | return device->device_offline; | |
288 | } | |
289 | ||
7561a7e4 KB |
290 | static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) |
291 | { | |
292 | device->in_reset = true; | |
293 | } | |
294 | ||
295 | static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) | |
296 | { | |
297 | device->in_reset = false; | |
298 | } | |
299 | ||
300 | static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) | |
301 | { | |
302 | return device->in_reset; | |
303 | } | |
6c223761 | 304 | |
5f310425 KB |
305 | static inline void pqi_schedule_rescan_worker_with_delay( |
306 | struct pqi_ctrl_info *ctrl_info, unsigned long delay) | |
307 | { | |
308 | if (pqi_ctrl_offline(ctrl_info)) | |
309 | return; | |
310 | ||
311 | schedule_delayed_work(&ctrl_info->rescan_work, delay); | |
312 | } | |
313 | ||
6c223761 KB |
314 | static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) |
315 | { | |
5f310425 KB |
316 | pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); |
317 | } | |
318 | ||
319 | #define PQI_RESCAN_WORK_DELAY (10 * HZ) | |
320 | ||
321 | static inline void pqi_schedule_rescan_worker_delayed( | |
322 | struct pqi_ctrl_info *ctrl_info) | |
323 | { | |
324 | pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); | |
6c223761 KB |
325 | } |
326 | ||
061ef06a KB |
327 | static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) |
328 | { | |
329 | cancel_delayed_work_sync(&ctrl_info->rescan_work); | |
330 | } | |
331 | ||
98f87667 KB |
332 | static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) |
333 | { | |
334 | if (!ctrl_info->heartbeat_counter) | |
335 | return 0; | |
336 | ||
337 | return readl(ctrl_info->heartbeat_counter); | |
338 | } | |
339 | ||
6c223761 KB |
340 | static int pqi_map_single(struct pci_dev *pci_dev, |
341 | struct pqi_sg_descriptor *sg_descriptor, void *buffer, | |
342 | size_t buffer_length, int data_direction) | |
343 | { | |
344 | dma_addr_t bus_address; | |
345 | ||
346 | if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE) | |
347 | return 0; | |
348 | ||
349 | bus_address = pci_map_single(pci_dev, buffer, buffer_length, | |
350 | data_direction); | |
351 | if (pci_dma_mapping_error(pci_dev, bus_address)) | |
352 | return -ENOMEM; | |
353 | ||
354 | put_unaligned_le64((u64)bus_address, &sg_descriptor->address); | |
355 | put_unaligned_le32(buffer_length, &sg_descriptor->length); | |
356 | put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); | |
357 | ||
358 | return 0; | |
359 | } | |
360 | ||
361 | static void pqi_pci_unmap(struct pci_dev *pci_dev, | |
362 | struct pqi_sg_descriptor *descriptors, int num_descriptors, | |
363 | int data_direction) | |
364 | { | |
365 | int i; | |
366 | ||
367 | if (data_direction == PCI_DMA_NONE) | |
368 | return; | |
369 | ||
370 | for (i = 0; i < num_descriptors; i++) | |
371 | pci_unmap_single(pci_dev, | |
372 | (dma_addr_t)get_unaligned_le64(&descriptors[i].address), | |
373 | get_unaligned_le32(&descriptors[i].length), | |
374 | data_direction); | |
375 | } | |
376 | ||
377 | static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, | |
378 | struct pqi_raid_path_request *request, u8 cmd, | |
379 | u8 *scsi3addr, void *buffer, size_t buffer_length, | |
380 | u16 vpd_page, int *pci_direction) | |
381 | { | |
382 | u8 *cdb; | |
383 | int pci_dir; | |
384 | ||
385 | memset(request, 0, sizeof(*request)); | |
386 | ||
387 | request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; | |
388 | put_unaligned_le16(offsetof(struct pqi_raid_path_request, | |
389 | sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, | |
390 | &request->header.iu_length); | |
391 | put_unaligned_le32(buffer_length, &request->buffer_length); | |
392 | memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); | |
393 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
394 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; | |
395 | ||
396 | cdb = request->cdb; | |
397 | ||
398 | switch (cmd) { | |
399 | case INQUIRY: | |
400 | request->data_direction = SOP_READ_FLAG; | |
401 | cdb[0] = INQUIRY; | |
402 | if (vpd_page & VPD_PAGE) { | |
403 | cdb[1] = 0x1; | |
404 | cdb[2] = (u8)vpd_page; | |
405 | } | |
406 | cdb[4] = (u8)buffer_length; | |
407 | break; | |
408 | case CISS_REPORT_LOG: | |
409 | case CISS_REPORT_PHYS: | |
410 | request->data_direction = SOP_READ_FLAG; | |
411 | cdb[0] = cmd; | |
412 | if (cmd == CISS_REPORT_PHYS) | |
413 | cdb[1] = CISS_REPORT_PHYS_EXTENDED; | |
414 | else | |
415 | cdb[1] = CISS_REPORT_LOG_EXTENDED; | |
416 | put_unaligned_be32(buffer_length, &cdb[6]); | |
417 | break; | |
418 | case CISS_GET_RAID_MAP: | |
419 | request->data_direction = SOP_READ_FLAG; | |
420 | cdb[0] = CISS_READ; | |
421 | cdb[1] = CISS_GET_RAID_MAP; | |
422 | put_unaligned_be32(buffer_length, &cdb[6]); | |
423 | break; | |
424 | case SA_CACHE_FLUSH: | |
425 | request->data_direction = SOP_WRITE_FLAG; | |
426 | cdb[0] = BMIC_WRITE; | |
427 | cdb[6] = BMIC_CACHE_FLUSH; | |
428 | put_unaligned_be16(buffer_length, &cdb[7]); | |
429 | break; | |
430 | case BMIC_IDENTIFY_CONTROLLER: | |
431 | case BMIC_IDENTIFY_PHYSICAL_DEVICE: | |
432 | request->data_direction = SOP_READ_FLAG; | |
433 | cdb[0] = BMIC_READ; | |
434 | cdb[6] = cmd; | |
435 | put_unaligned_be16(buffer_length, &cdb[7]); | |
436 | break; | |
437 | case BMIC_WRITE_HOST_WELLNESS: | |
438 | request->data_direction = SOP_WRITE_FLAG; | |
439 | cdb[0] = BMIC_WRITE; | |
440 | cdb[6] = cmd; | |
441 | put_unaligned_be16(buffer_length, &cdb[7]); | |
442 | break; | |
443 | default: | |
444 | dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", | |
445 | cmd); | |
6c223761 KB |
446 | break; |
447 | } | |
448 | ||
449 | switch (request->data_direction) { | |
450 | case SOP_READ_FLAG: | |
451 | pci_dir = PCI_DMA_FROMDEVICE; | |
452 | break; | |
453 | case SOP_WRITE_FLAG: | |
454 | pci_dir = PCI_DMA_TODEVICE; | |
455 | break; | |
456 | case SOP_NO_DIRECTION_FLAG: | |
457 | pci_dir = PCI_DMA_NONE; | |
458 | break; | |
459 | default: | |
460 | pci_dir = PCI_DMA_BIDIRECTIONAL; | |
461 | break; | |
462 | } | |
463 | ||
464 | *pci_direction = pci_dir; | |
465 | ||
466 | return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], | |
467 | buffer, buffer_length, pci_dir); | |
468 | } | |
469 | ||
376fb880 KB |
470 | static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) |
471 | { | |
472 | io_request->scmd = NULL; | |
473 | io_request->status = 0; | |
474 | io_request->error_info = NULL; | |
475 | io_request->raid_bypass = false; | |
476 | } | |
477 | ||
6c223761 KB |
478 | static struct pqi_io_request *pqi_alloc_io_request( |
479 | struct pqi_ctrl_info *ctrl_info) | |
480 | { | |
481 | struct pqi_io_request *io_request; | |
482 | u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ | |
483 | ||
484 | while (1) { | |
485 | io_request = &ctrl_info->io_request_pool[i]; | |
486 | if (atomic_inc_return(&io_request->refcount) == 1) | |
487 | break; | |
488 | atomic_dec(&io_request->refcount); | |
489 | i = (i + 1) % ctrl_info->max_io_slots; | |
490 | } | |
491 | ||
492 | /* benignly racy */ | |
493 | ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; | |
494 | ||
376fb880 | 495 | pqi_reinit_io_request(io_request); |
6c223761 KB |
496 | |
497 | return io_request; | |
498 | } | |
499 | ||
500 | static void pqi_free_io_request(struct pqi_io_request *io_request) | |
501 | { | |
502 | atomic_dec(&io_request->refcount); | |
503 | } | |
504 | ||
505 | static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, | |
506 | struct bmic_identify_controller *buffer) | |
507 | { | |
508 | int rc; | |
509 | int pci_direction; | |
510 | struct pqi_raid_path_request request; | |
511 | ||
512 | rc = pqi_build_raid_path_request(ctrl_info, &request, | |
513 | BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer, | |
514 | sizeof(*buffer), 0, &pci_direction); | |
515 | if (rc) | |
516 | return rc; | |
517 | ||
518 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, | |
519 | NULL, NO_TIMEOUT); | |
520 | ||
521 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, | |
522 | pci_direction); | |
523 | ||
524 | return rc; | |
525 | } | |
526 | ||
527 | static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, | |
528 | u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) | |
529 | { | |
530 | int rc; | |
531 | int pci_direction; | |
532 | struct pqi_raid_path_request request; | |
533 | ||
534 | rc = pqi_build_raid_path_request(ctrl_info, &request, | |
535 | INQUIRY, scsi3addr, buffer, buffer_length, vpd_page, | |
536 | &pci_direction); | |
537 | if (rc) | |
538 | return rc; | |
539 | ||
540 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, | |
541 | NULL, NO_TIMEOUT); | |
542 | ||
543 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, | |
544 | pci_direction); | |
545 | ||
546 | return rc; | |
547 | } | |
548 | ||
549 | static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, | |
550 | struct pqi_scsi_dev *device, | |
551 | struct bmic_identify_physical_device *buffer, | |
552 | size_t buffer_length) | |
553 | { | |
554 | int rc; | |
555 | int pci_direction; | |
556 | u16 bmic_device_index; | |
557 | struct pqi_raid_path_request request; | |
558 | ||
559 | rc = pqi_build_raid_path_request(ctrl_info, &request, | |
560 | BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, | |
561 | buffer_length, 0, &pci_direction); | |
562 | if (rc) | |
563 | return rc; | |
564 | ||
565 | bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); | |
566 | request.cdb[2] = (u8)bmic_device_index; | |
567 | request.cdb[9] = (u8)(bmic_device_index >> 8); | |
568 | ||
569 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, | |
570 | 0, NULL, NO_TIMEOUT); | |
571 | ||
572 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, | |
573 | pci_direction); | |
574 | ||
575 | return rc; | |
576 | } | |
577 | ||
578 | #define SA_CACHE_FLUSH_BUFFER_LENGTH 4 | |
6c223761 KB |
579 | |
580 | static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info) | |
581 | { | |
582 | int rc; | |
583 | struct pqi_raid_path_request request; | |
584 | int pci_direction; | |
585 | u8 *buffer; | |
586 | ||
587 | /* | |
588 | * Don't bother trying to flush the cache if the controller is | |
589 | * locked up. | |
590 | */ | |
591 | if (pqi_ctrl_offline(ctrl_info)) | |
592 | return -ENXIO; | |
593 | ||
594 | buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL); | |
595 | if (!buffer) | |
596 | return -ENOMEM; | |
597 | ||
598 | rc = pqi_build_raid_path_request(ctrl_info, &request, | |
599 | SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer, | |
600 | SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction); | |
601 | if (rc) | |
602 | goto out; | |
603 | ||
604 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, | |
d48f8fad | 605 | 0, NULL, NO_TIMEOUT); |
6c223761 KB |
606 | |
607 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, | |
608 | pci_direction); | |
609 | ||
610 | out: | |
611 | kfree(buffer); | |
612 | ||
613 | return rc; | |
614 | } | |
615 | ||
616 | static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, | |
617 | void *buffer, size_t buffer_length) | |
618 | { | |
619 | int rc; | |
620 | struct pqi_raid_path_request request; | |
621 | int pci_direction; | |
622 | ||
623 | rc = pqi_build_raid_path_request(ctrl_info, &request, | |
624 | BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer, | |
625 | buffer_length, 0, &pci_direction); | |
626 | if (rc) | |
627 | return rc; | |
628 | ||
629 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, | |
630 | 0, NULL, NO_TIMEOUT); | |
631 | ||
632 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, | |
633 | pci_direction); | |
634 | ||
635 | return rc; | |
636 | } | |
637 | ||
638 | #pragma pack(1) | |
639 | ||
640 | struct bmic_host_wellness_driver_version { | |
641 | u8 start_tag[4]; | |
642 | u8 driver_version_tag[2]; | |
643 | __le16 driver_version_length; | |
644 | char driver_version[32]; | |
645 | u8 end_tag[2]; | |
646 | }; | |
647 | ||
648 | #pragma pack() | |
649 | ||
650 | static int pqi_write_driver_version_to_host_wellness( | |
651 | struct pqi_ctrl_info *ctrl_info) | |
652 | { | |
653 | int rc; | |
654 | struct bmic_host_wellness_driver_version *buffer; | |
655 | size_t buffer_length; | |
656 | ||
657 | buffer_length = sizeof(*buffer); | |
658 | ||
659 | buffer = kmalloc(buffer_length, GFP_KERNEL); | |
660 | if (!buffer) | |
661 | return -ENOMEM; | |
662 | ||
663 | buffer->start_tag[0] = '<'; | |
664 | buffer->start_tag[1] = 'H'; | |
665 | buffer->start_tag[2] = 'W'; | |
666 | buffer->start_tag[3] = '>'; | |
667 | buffer->driver_version_tag[0] = 'D'; | |
668 | buffer->driver_version_tag[1] = 'V'; | |
669 | put_unaligned_le16(sizeof(buffer->driver_version), | |
670 | &buffer->driver_version_length); | |
061ef06a | 671 | strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, |
6c223761 KB |
672 | sizeof(buffer->driver_version) - 1); |
673 | buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; | |
674 | buffer->end_tag[0] = 'Z'; | |
675 | buffer->end_tag[1] = 'Z'; | |
676 | ||
677 | rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); | |
678 | ||
679 | kfree(buffer); | |
680 | ||
681 | return rc; | |
682 | } | |
683 | ||
684 | #pragma pack(1) | |
685 | ||
686 | struct bmic_host_wellness_time { | |
687 | u8 start_tag[4]; | |
688 | u8 time_tag[2]; | |
689 | __le16 time_length; | |
690 | u8 time[8]; | |
691 | u8 dont_write_tag[2]; | |
692 | u8 end_tag[2]; | |
693 | }; | |
694 | ||
695 | #pragma pack() | |
696 | ||
697 | static int pqi_write_current_time_to_host_wellness( | |
698 | struct pqi_ctrl_info *ctrl_info) | |
699 | { | |
700 | int rc; | |
701 | struct bmic_host_wellness_time *buffer; | |
702 | size_t buffer_length; | |
703 | time64_t local_time; | |
704 | unsigned int year; | |
ed10858e | 705 | struct tm tm; |
6c223761 KB |
706 | |
707 | buffer_length = sizeof(*buffer); | |
708 | ||
709 | buffer = kmalloc(buffer_length, GFP_KERNEL); | |
710 | if (!buffer) | |
711 | return -ENOMEM; | |
712 | ||
713 | buffer->start_tag[0] = '<'; | |
714 | buffer->start_tag[1] = 'H'; | |
715 | buffer->start_tag[2] = 'W'; | |
716 | buffer->start_tag[3] = '>'; | |
717 | buffer->time_tag[0] = 'T'; | |
718 | buffer->time_tag[1] = 'D'; | |
719 | put_unaligned_le16(sizeof(buffer->time), | |
720 | &buffer->time_length); | |
721 | ||
ed10858e AB |
722 | local_time = ktime_get_real_seconds(); |
723 | time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); | |
6c223761 KB |
724 | year = tm.tm_year + 1900; |
725 | ||
726 | buffer->time[0] = bin2bcd(tm.tm_hour); | |
727 | buffer->time[1] = bin2bcd(tm.tm_min); | |
728 | buffer->time[2] = bin2bcd(tm.tm_sec); | |
729 | buffer->time[3] = 0; | |
730 | buffer->time[4] = bin2bcd(tm.tm_mon + 1); | |
731 | buffer->time[5] = bin2bcd(tm.tm_mday); | |
732 | buffer->time[6] = bin2bcd(year / 100); | |
733 | buffer->time[7] = bin2bcd(year % 100); | |
734 | ||
735 | buffer->dont_write_tag[0] = 'D'; | |
736 | buffer->dont_write_tag[1] = 'W'; | |
737 | buffer->end_tag[0] = 'Z'; | |
738 | buffer->end_tag[1] = 'Z'; | |
739 | ||
740 | rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); | |
741 | ||
742 | kfree(buffer); | |
743 | ||
744 | return rc; | |
745 | } | |
746 | ||
747 | #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) | |
748 | ||
749 | static void pqi_update_time_worker(struct work_struct *work) | |
750 | { | |
751 | int rc; | |
752 | struct pqi_ctrl_info *ctrl_info; | |
753 | ||
754 | ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, | |
755 | update_time_work); | |
756 | ||
5f310425 KB |
757 | if (pqi_ctrl_offline(ctrl_info)) |
758 | return; | |
759 | ||
6c223761 KB |
760 | rc = pqi_write_current_time_to_host_wellness(ctrl_info); |
761 | if (rc) | |
762 | dev_warn(&ctrl_info->pci_dev->dev, | |
763 | "error updating time on controller\n"); | |
764 | ||
765 | schedule_delayed_work(&ctrl_info->update_time_work, | |
766 | PQI_UPDATE_TIME_WORK_INTERVAL); | |
767 | } | |
768 | ||
769 | static inline void pqi_schedule_update_time_worker( | |
4fbebf1a | 770 | struct pqi_ctrl_info *ctrl_info) |
6c223761 | 771 | { |
4fbebf1a | 772 | schedule_delayed_work(&ctrl_info->update_time_work, 0); |
061ef06a KB |
773 | } |
774 | ||
775 | static inline void pqi_cancel_update_time_worker( | |
776 | struct pqi_ctrl_info *ctrl_info) | |
777 | { | |
061ef06a | 778 | cancel_delayed_work_sync(&ctrl_info->update_time_work); |
6c223761 KB |
779 | } |
780 | ||
781 | static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, | |
782 | void *buffer, size_t buffer_length) | |
783 | { | |
784 | int rc; | |
785 | int pci_direction; | |
786 | struct pqi_raid_path_request request; | |
787 | ||
788 | rc = pqi_build_raid_path_request(ctrl_info, &request, | |
789 | cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction); | |
790 | if (rc) | |
791 | return rc; | |
792 | ||
793 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, | |
794 | NULL, NO_TIMEOUT); | |
795 | ||
796 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, | |
797 | pci_direction); | |
798 | ||
799 | return rc; | |
800 | } | |
801 | ||
802 | static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, | |
803 | void **buffer) | |
804 | { | |
805 | int rc; | |
806 | size_t lun_list_length; | |
807 | size_t lun_data_length; | |
808 | size_t new_lun_list_length; | |
809 | void *lun_data = NULL; | |
810 | struct report_lun_header *report_lun_header; | |
811 | ||
812 | report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); | |
813 | if (!report_lun_header) { | |
814 | rc = -ENOMEM; | |
815 | goto out; | |
816 | } | |
817 | ||
818 | rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, | |
819 | sizeof(*report_lun_header)); | |
820 | if (rc) | |
821 | goto out; | |
822 | ||
823 | lun_list_length = get_unaligned_be32(&report_lun_header->list_length); | |
824 | ||
825 | again: | |
826 | lun_data_length = sizeof(struct report_lun_header) + lun_list_length; | |
827 | ||
828 | lun_data = kmalloc(lun_data_length, GFP_KERNEL); | |
829 | if (!lun_data) { | |
830 | rc = -ENOMEM; | |
831 | goto out; | |
832 | } | |
833 | ||
834 | if (lun_list_length == 0) { | |
835 | memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); | |
836 | goto out; | |
837 | } | |
838 | ||
839 | rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); | |
840 | if (rc) | |
841 | goto out; | |
842 | ||
843 | new_lun_list_length = get_unaligned_be32( | |
844 | &((struct report_lun_header *)lun_data)->list_length); | |
845 | ||
846 | if (new_lun_list_length > lun_list_length) { | |
847 | lun_list_length = new_lun_list_length; | |
848 | kfree(lun_data); | |
849 | goto again; | |
850 | } | |
851 | ||
852 | out: | |
853 | kfree(report_lun_header); | |
854 | ||
855 | if (rc) { | |
856 | kfree(lun_data); | |
857 | lun_data = NULL; | |
858 | } | |
859 | ||
860 | *buffer = lun_data; | |
861 | ||
862 | return rc; | |
863 | } | |
864 | ||
865 | static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, | |
866 | void **buffer) | |
867 | { | |
868 | return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, | |
869 | buffer); | |
870 | } | |
871 | ||
872 | static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, | |
873 | void **buffer) | |
874 | { | |
875 | return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); | |
876 | } | |
877 | ||
878 | static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, | |
879 | struct report_phys_lun_extended **physdev_list, | |
880 | struct report_log_lun_extended **logdev_list) | |
881 | { | |
882 | int rc; | |
883 | size_t logdev_list_length; | |
884 | size_t logdev_data_length; | |
885 | struct report_log_lun_extended *internal_logdev_list; | |
886 | struct report_log_lun_extended *logdev_data; | |
887 | struct report_lun_header report_lun_header; | |
888 | ||
889 | rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); | |
890 | if (rc) | |
891 | dev_err(&ctrl_info->pci_dev->dev, | |
892 | "report physical LUNs failed\n"); | |
893 | ||
894 | rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); | |
895 | if (rc) | |
896 | dev_err(&ctrl_info->pci_dev->dev, | |
897 | "report logical LUNs failed\n"); | |
898 | ||
899 | /* | |
900 | * Tack the controller itself onto the end of the logical device list. | |
901 | */ | |
902 | ||
903 | logdev_data = *logdev_list; | |
904 | ||
905 | if (logdev_data) { | |
906 | logdev_list_length = | |
907 | get_unaligned_be32(&logdev_data->header.list_length); | |
908 | } else { | |
909 | memset(&report_lun_header, 0, sizeof(report_lun_header)); | |
910 | logdev_data = | |
911 | (struct report_log_lun_extended *)&report_lun_header; | |
912 | logdev_list_length = 0; | |
913 | } | |
914 | ||
915 | logdev_data_length = sizeof(struct report_lun_header) + | |
916 | logdev_list_length; | |
917 | ||
918 | internal_logdev_list = kmalloc(logdev_data_length + | |
919 | sizeof(struct report_log_lun_extended), GFP_KERNEL); | |
920 | if (!internal_logdev_list) { | |
921 | kfree(*logdev_list); | |
922 | *logdev_list = NULL; | |
923 | return -ENOMEM; | |
924 | } | |
925 | ||
926 | memcpy(internal_logdev_list, logdev_data, logdev_data_length); | |
927 | memset((u8 *)internal_logdev_list + logdev_data_length, 0, | |
928 | sizeof(struct report_log_lun_extended_entry)); | |
929 | put_unaligned_be32(logdev_list_length + | |
930 | sizeof(struct report_log_lun_extended_entry), | |
931 | &internal_logdev_list->header.list_length); | |
932 | ||
933 | kfree(*logdev_list); | |
934 | *logdev_list = internal_logdev_list; | |
935 | ||
936 | return 0; | |
937 | } | |
938 | ||
939 | static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, | |
940 | int bus, int target, int lun) | |
941 | { | |
942 | device->bus = bus; | |
943 | device->target = target; | |
944 | device->lun = lun; | |
945 | } | |
946 | ||
947 | static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) | |
948 | { | |
949 | u8 *scsi3addr; | |
950 | u32 lunid; | |
bd10cf0b KB |
951 | int bus; |
952 | int target; | |
953 | int lun; | |
6c223761 KB |
954 | |
955 | scsi3addr = device->scsi3addr; | |
956 | lunid = get_unaligned_le32(scsi3addr); | |
957 | ||
958 | if (pqi_is_hba_lunid(scsi3addr)) { | |
959 | /* The specified device is the controller. */ | |
960 | pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); | |
961 | device->target_lun_valid = true; | |
962 | return; | |
963 | } | |
964 | ||
965 | if (pqi_is_logical_device(device)) { | |
bd10cf0b KB |
966 | if (device->is_external_raid_device) { |
967 | bus = PQI_EXTERNAL_RAID_VOLUME_BUS; | |
968 | target = (lunid >> 16) & 0x3fff; | |
969 | lun = lunid & 0xff; | |
970 | } else { | |
971 | bus = PQI_RAID_VOLUME_BUS; | |
972 | target = 0; | |
973 | lun = lunid & 0x3fff; | |
974 | } | |
975 | pqi_set_bus_target_lun(device, bus, target, lun); | |
6c223761 KB |
976 | device->target_lun_valid = true; |
977 | return; | |
978 | } | |
979 | ||
980 | /* | |
981 | * Defer target and LUN assignment for non-controller physical devices | |
982 | * because the SAS transport layer will make these assignments later. | |
983 | */ | |
984 | pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); | |
985 | } | |
986 | ||
987 | static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, | |
988 | struct pqi_scsi_dev *device) | |
989 | { | |
990 | int rc; | |
991 | u8 raid_level; | |
992 | u8 *buffer; | |
993 | ||
994 | raid_level = SA_RAID_UNKNOWN; | |
995 | ||
996 | buffer = kmalloc(64, GFP_KERNEL); | |
997 | if (buffer) { | |
998 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, | |
999 | VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); | |
1000 | if (rc == 0) { | |
1001 | raid_level = buffer[8]; | |
1002 | if (raid_level > SA_RAID_MAX) | |
1003 | raid_level = SA_RAID_UNKNOWN; | |
1004 | } | |
1005 | kfree(buffer); | |
1006 | } | |
1007 | ||
1008 | device->raid_level = raid_level; | |
1009 | } | |
1010 | ||
1011 | static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, | |
1012 | struct pqi_scsi_dev *device, struct raid_map *raid_map) | |
1013 | { | |
1014 | char *err_msg; | |
1015 | u32 raid_map_size; | |
1016 | u32 r5or6_blocks_per_row; | |
1017 | unsigned int num_phys_disks; | |
1018 | unsigned int num_raid_map_entries; | |
1019 | ||
1020 | raid_map_size = get_unaligned_le32(&raid_map->structure_size); | |
1021 | ||
1022 | if (raid_map_size < offsetof(struct raid_map, disk_data)) { | |
1023 | err_msg = "RAID map too small"; | |
1024 | goto bad_raid_map; | |
1025 | } | |
1026 | ||
1027 | if (raid_map_size > sizeof(*raid_map)) { | |
1028 | err_msg = "RAID map too large"; | |
1029 | goto bad_raid_map; | |
1030 | } | |
1031 | ||
1032 | num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) * | |
1033 | (get_unaligned_le16(&raid_map->data_disks_per_row) + | |
1034 | get_unaligned_le16(&raid_map->metadata_disks_per_row)); | |
1035 | num_raid_map_entries = num_phys_disks * | |
1036 | get_unaligned_le16(&raid_map->row_cnt); | |
1037 | ||
1038 | if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) { | |
1039 | err_msg = "invalid number of map entries in RAID map"; | |
1040 | goto bad_raid_map; | |
1041 | } | |
1042 | ||
1043 | if (device->raid_level == SA_RAID_1) { | |
1044 | if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { | |
1045 | err_msg = "invalid RAID-1 map"; | |
1046 | goto bad_raid_map; | |
1047 | } | |
1048 | } else if (device->raid_level == SA_RAID_ADM) { | |
1049 | if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { | |
1050 | err_msg = "invalid RAID-1(ADM) map"; | |
1051 | goto bad_raid_map; | |
1052 | } | |
1053 | } else if ((device->raid_level == SA_RAID_5 || | |
1054 | device->raid_level == SA_RAID_6) && | |
1055 | get_unaligned_le16(&raid_map->layout_map_count) > 1) { | |
1056 | /* RAID 50/60 */ | |
1057 | r5or6_blocks_per_row = | |
1058 | get_unaligned_le16(&raid_map->strip_size) * | |
1059 | get_unaligned_le16(&raid_map->data_disks_per_row); | |
1060 | if (r5or6_blocks_per_row == 0) { | |
1061 | err_msg = "invalid RAID-5 or RAID-6 map"; | |
1062 | goto bad_raid_map; | |
1063 | } | |
1064 | } | |
1065 | ||
1066 | return 0; | |
1067 | ||
1068 | bad_raid_map: | |
d87d5474 KB |
1069 | dev_warn(&ctrl_info->pci_dev->dev, |
1070 | "scsi %d:%d:%d:%d %s\n", | |
1071 | ctrl_info->scsi_host->host_no, | |
1072 | device->bus, device->target, device->lun, err_msg); | |
6c223761 KB |
1073 | |
1074 | return -EINVAL; | |
1075 | } | |
1076 | ||
1077 | static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, | |
1078 | struct pqi_scsi_dev *device) | |
1079 | { | |
1080 | int rc; | |
1081 | int pci_direction; | |
1082 | struct pqi_raid_path_request request; | |
1083 | struct raid_map *raid_map; | |
1084 | ||
1085 | raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); | |
1086 | if (!raid_map) | |
1087 | return -ENOMEM; | |
1088 | ||
1089 | rc = pqi_build_raid_path_request(ctrl_info, &request, | |
1090 | CISS_GET_RAID_MAP, device->scsi3addr, raid_map, | |
1091 | sizeof(*raid_map), 0, &pci_direction); | |
1092 | if (rc) | |
1093 | goto error; | |
1094 | ||
1095 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, | |
1096 | NULL, NO_TIMEOUT); | |
1097 | ||
1098 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, | |
1099 | pci_direction); | |
1100 | ||
1101 | if (rc) | |
1102 | goto error; | |
1103 | ||
1104 | rc = pqi_validate_raid_map(ctrl_info, device, raid_map); | |
1105 | if (rc) | |
1106 | goto error; | |
1107 | ||
1108 | device->raid_map = raid_map; | |
1109 | ||
1110 | return 0; | |
1111 | ||
1112 | error: | |
1113 | kfree(raid_map); | |
1114 | ||
1115 | return rc; | |
1116 | } | |
1117 | ||
1118 | static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info, | |
1119 | struct pqi_scsi_dev *device) | |
1120 | { | |
1121 | int rc; | |
1122 | u8 *buffer; | |
1123 | u8 offload_status; | |
1124 | ||
1125 | buffer = kmalloc(64, GFP_KERNEL); | |
1126 | if (!buffer) | |
1127 | return; | |
1128 | ||
1129 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, | |
1130 | VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64); | |
1131 | if (rc) | |
1132 | goto out; | |
1133 | ||
1134 | #define OFFLOAD_STATUS_BYTE 4 | |
1135 | #define OFFLOAD_CONFIGURED_BIT 0x1 | |
1136 | #define OFFLOAD_ENABLED_BIT 0x2 | |
1137 | ||
1138 | offload_status = buffer[OFFLOAD_STATUS_BYTE]; | |
1139 | device->offload_configured = | |
1140 | !!(offload_status & OFFLOAD_CONFIGURED_BIT); | |
1141 | if (device->offload_configured) { | |
1142 | device->offload_enabled_pending = | |
1143 | !!(offload_status & OFFLOAD_ENABLED_BIT); | |
1144 | if (pqi_get_raid_map(ctrl_info, device)) | |
1145 | device->offload_enabled_pending = false; | |
1146 | } | |
1147 | ||
1148 | out: | |
1149 | kfree(buffer); | |
1150 | } | |
1151 | ||
1152 | /* | |
1153 | * Use vendor-specific VPD to determine online/offline status of a volume. | |
1154 | */ | |
1155 | ||
1156 | static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, | |
1157 | struct pqi_scsi_dev *device) | |
1158 | { | |
1159 | int rc; | |
1160 | size_t page_length; | |
1161 | u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; | |
1162 | bool volume_offline = true; | |
1163 | u32 volume_flags; | |
1164 | struct ciss_vpd_logical_volume_status *vpd; | |
1165 | ||
1166 | vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); | |
1167 | if (!vpd) | |
1168 | goto no_buffer; | |
1169 | ||
1170 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, | |
1171 | VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); | |
1172 | if (rc) | |
1173 | goto out; | |
1174 | ||
1175 | page_length = offsetof(struct ciss_vpd_logical_volume_status, | |
1176 | volume_status) + vpd->page_length; | |
1177 | if (page_length < sizeof(*vpd)) | |
1178 | goto out; | |
1179 | ||
1180 | volume_status = vpd->volume_status; | |
1181 | volume_flags = get_unaligned_be32(&vpd->flags); | |
1182 | volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; | |
1183 | ||
1184 | out: | |
1185 | kfree(vpd); | |
1186 | no_buffer: | |
1187 | device->volume_status = volume_status; | |
1188 | device->volume_offline = volume_offline; | |
1189 | } | |
1190 | ||
1191 | static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, | |
1192 | struct pqi_scsi_dev *device) | |
1193 | { | |
1194 | int rc; | |
1195 | u8 *buffer; | |
1196 | ||
1197 | buffer = kmalloc(64, GFP_KERNEL); | |
1198 | if (!buffer) | |
1199 | return -ENOMEM; | |
1200 | ||
1201 | /* Send an inquiry to the device to see what it is. */ | |
1202 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); | |
1203 | if (rc) | |
1204 | goto out; | |
1205 | ||
1206 | scsi_sanitize_inquiry_string(&buffer[8], 8); | |
1207 | scsi_sanitize_inquiry_string(&buffer[16], 16); | |
1208 | ||
1209 | device->devtype = buffer[0] & 0x1f; | |
cbe0c7b1 KB |
1210 | memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); |
1211 | memcpy(device->model, &buffer[16], sizeof(device->model)); | |
6c223761 KB |
1212 | |
1213 | if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { | |
bd10cf0b KB |
1214 | if (device->is_external_raid_device) { |
1215 | device->raid_level = SA_RAID_UNKNOWN; | |
1216 | device->volume_status = CISS_LV_OK; | |
1217 | device->volume_offline = false; | |
1218 | } else { | |
1219 | pqi_get_raid_level(ctrl_info, device); | |
1220 | pqi_get_offload_status(ctrl_info, device); | |
1221 | pqi_get_volume_status(ctrl_info, device); | |
1222 | } | |
6c223761 KB |
1223 | } |
1224 | ||
1225 | out: | |
1226 | kfree(buffer); | |
1227 | ||
1228 | return rc; | |
1229 | } | |
1230 | ||
1231 | static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, | |
1232 | struct pqi_scsi_dev *device, | |
1233 | struct bmic_identify_physical_device *id_phys) | |
1234 | { | |
1235 | int rc; | |
1236 | ||
1237 | memset(id_phys, 0, sizeof(*id_phys)); | |
1238 | ||
1239 | rc = pqi_identify_physical_device(ctrl_info, device, | |
1240 | id_phys, sizeof(*id_phys)); | |
1241 | if (rc) { | |
1242 | device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; | |
1243 | return; | |
1244 | } | |
1245 | ||
1246 | device->queue_depth = | |
1247 | get_unaligned_le16(&id_phys->current_queue_depth_limit); | |
1248 | device->device_type = id_phys->device_type; | |
1249 | device->active_path_index = id_phys->active_path_number; | |
1250 | device->path_map = id_phys->redundant_path_present_map; | |
1251 | memcpy(&device->box, | |
1252 | &id_phys->alternate_paths_phys_box_on_port, | |
1253 | sizeof(device->box)); | |
1254 | memcpy(&device->phys_connector, | |
1255 | &id_phys->alternate_paths_phys_connector, | |
1256 | sizeof(device->phys_connector)); | |
1257 | device->bay = id_phys->phys_bay_in_box; | |
1258 | } | |
1259 | ||
1260 | static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, | |
1261 | struct pqi_scsi_dev *device) | |
1262 | { | |
1263 | char *status; | |
1264 | static const char unknown_state_str[] = | |
1265 | "Volume is in an unknown state (%u)"; | |
1266 | char unknown_state_buffer[sizeof(unknown_state_str) + 10]; | |
1267 | ||
1268 | switch (device->volume_status) { | |
1269 | case CISS_LV_OK: | |
1270 | status = "Volume online"; | |
1271 | break; | |
1272 | case CISS_LV_FAILED: | |
1273 | status = "Volume failed"; | |
1274 | break; | |
1275 | case CISS_LV_NOT_CONFIGURED: | |
1276 | status = "Volume not configured"; | |
1277 | break; | |
1278 | case CISS_LV_DEGRADED: | |
1279 | status = "Volume degraded"; | |
1280 | break; | |
1281 | case CISS_LV_READY_FOR_RECOVERY: | |
1282 | status = "Volume ready for recovery operation"; | |
1283 | break; | |
1284 | case CISS_LV_UNDERGOING_RECOVERY: | |
1285 | status = "Volume undergoing recovery"; | |
1286 | break; | |
1287 | case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: | |
1288 | status = "Wrong physical drive was replaced"; | |
1289 | break; | |
1290 | case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: | |
1291 | status = "A physical drive not properly connected"; | |
1292 | break; | |
1293 | case CISS_LV_HARDWARE_OVERHEATING: | |
1294 | status = "Hardware is overheating"; | |
1295 | break; | |
1296 | case CISS_LV_HARDWARE_HAS_OVERHEATED: | |
1297 | status = "Hardware has overheated"; | |
1298 | break; | |
1299 | case CISS_LV_UNDERGOING_EXPANSION: | |
1300 | status = "Volume undergoing expansion"; | |
1301 | break; | |
1302 | case CISS_LV_NOT_AVAILABLE: | |
1303 | status = "Volume waiting for transforming volume"; | |
1304 | break; | |
1305 | case CISS_LV_QUEUED_FOR_EXPANSION: | |
1306 | status = "Volume queued for expansion"; | |
1307 | break; | |
1308 | case CISS_LV_DISABLED_SCSI_ID_CONFLICT: | |
1309 | status = "Volume disabled due to SCSI ID conflict"; | |
1310 | break; | |
1311 | case CISS_LV_EJECTED: | |
1312 | status = "Volume has been ejected"; | |
1313 | break; | |
1314 | case CISS_LV_UNDERGOING_ERASE: | |
1315 | status = "Volume undergoing background erase"; | |
1316 | break; | |
1317 | case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: | |
1318 | status = "Volume ready for predictive spare rebuild"; | |
1319 | break; | |
1320 | case CISS_LV_UNDERGOING_RPI: | |
1321 | status = "Volume undergoing rapid parity initialization"; | |
1322 | break; | |
1323 | case CISS_LV_PENDING_RPI: | |
1324 | status = "Volume queued for rapid parity initialization"; | |
1325 | break; | |
1326 | case CISS_LV_ENCRYPTED_NO_KEY: | |
1327 | status = "Encrypted volume inaccessible - key not present"; | |
1328 | break; | |
1329 | case CISS_LV_UNDERGOING_ENCRYPTION: | |
1330 | status = "Volume undergoing encryption process"; | |
1331 | break; | |
1332 | case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: | |
1333 | status = "Volume undergoing encryption re-keying process"; | |
1334 | break; | |
1335 | case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: | |
d87d5474 | 1336 | status = "Volume encrypted but encryption is disabled"; |
6c223761 KB |
1337 | break; |
1338 | case CISS_LV_PENDING_ENCRYPTION: | |
1339 | status = "Volume pending migration to encrypted state"; | |
1340 | break; | |
1341 | case CISS_LV_PENDING_ENCRYPTION_REKEYING: | |
1342 | status = "Volume pending encryption rekeying"; | |
1343 | break; | |
1344 | case CISS_LV_NOT_SUPPORTED: | |
1345 | status = "Volume not supported on this controller"; | |
1346 | break; | |
1347 | case CISS_LV_STATUS_UNAVAILABLE: | |
1348 | status = "Volume status not available"; | |
1349 | break; | |
1350 | default: | |
1351 | snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), | |
1352 | unknown_state_str, device->volume_status); | |
1353 | status = unknown_state_buffer; | |
1354 | break; | |
1355 | } | |
1356 | ||
1357 | dev_info(&ctrl_info->pci_dev->dev, | |
1358 | "scsi %d:%d:%d:%d %s\n", | |
1359 | ctrl_info->scsi_host->host_no, | |
1360 | device->bus, device->target, device->lun, status); | |
1361 | } | |
1362 | ||
6c223761 KB |
1363 | static void pqi_rescan_worker(struct work_struct *work) |
1364 | { | |
1365 | struct pqi_ctrl_info *ctrl_info; | |
1366 | ||
1367 | ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, | |
1368 | rescan_work); | |
1369 | ||
1370 | pqi_scan_scsi_devices(ctrl_info); | |
1371 | } | |
1372 | ||
1373 | static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, | |
1374 | struct pqi_scsi_dev *device) | |
1375 | { | |
1376 | int rc; | |
1377 | ||
1378 | if (pqi_is_logical_device(device)) | |
1379 | rc = scsi_add_device(ctrl_info->scsi_host, device->bus, | |
1380 | device->target, device->lun); | |
1381 | else | |
1382 | rc = pqi_add_sas_device(ctrl_info->sas_host, device); | |
1383 | ||
1384 | return rc; | |
1385 | } | |
1386 | ||
1387 | static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, | |
1388 | struct pqi_scsi_dev *device) | |
1389 | { | |
1390 | if (pqi_is_logical_device(device)) | |
1391 | scsi_remove_device(device->sdev); | |
1392 | else | |
1393 | pqi_remove_sas_device(device); | |
1394 | } | |
1395 | ||
1396 | /* Assumes the SCSI device list lock is held. */ | |
1397 | ||
1398 | static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, | |
1399 | int bus, int target, int lun) | |
1400 | { | |
1401 | struct pqi_scsi_dev *device; | |
1402 | ||
1403 | list_for_each_entry(device, &ctrl_info->scsi_device_list, | |
1404 | scsi_device_list_entry) | |
1405 | if (device->bus == bus && device->target == target && | |
1406 | device->lun == lun) | |
1407 | return device; | |
1408 | ||
1409 | return NULL; | |
1410 | } | |
1411 | ||
1412 | static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, | |
1413 | struct pqi_scsi_dev *dev2) | |
1414 | { | |
1415 | if (dev1->is_physical_device != dev2->is_physical_device) | |
1416 | return false; | |
1417 | ||
1418 | if (dev1->is_physical_device) | |
1419 | return dev1->wwid == dev2->wwid; | |
1420 | ||
1421 | return memcmp(dev1->volume_id, dev2->volume_id, | |
1422 | sizeof(dev1->volume_id)) == 0; | |
1423 | } | |
1424 | ||
1425 | enum pqi_find_result { | |
1426 | DEVICE_NOT_FOUND, | |
1427 | DEVICE_CHANGED, | |
1428 | DEVICE_SAME, | |
1429 | }; | |
1430 | ||
1431 | static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, | |
1432 | struct pqi_scsi_dev *device_to_find, | |
1433 | struct pqi_scsi_dev **matching_device) | |
1434 | { | |
1435 | struct pqi_scsi_dev *device; | |
1436 | ||
1437 | list_for_each_entry(device, &ctrl_info->scsi_device_list, | |
1438 | scsi_device_list_entry) { | |
1439 | if (pqi_scsi3addr_equal(device_to_find->scsi3addr, | |
1440 | device->scsi3addr)) { | |
1441 | *matching_device = device; | |
1442 | if (pqi_device_equal(device_to_find, device)) { | |
1443 | if (device_to_find->volume_offline) | |
1444 | return DEVICE_CHANGED; | |
1445 | return DEVICE_SAME; | |
1446 | } | |
1447 | return DEVICE_CHANGED; | |
1448 | } | |
1449 | } | |
1450 | ||
1451 | return DEVICE_NOT_FOUND; | |
1452 | } | |
1453 | ||
1454 | static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, | |
1455 | char *action, struct pqi_scsi_dev *device) | |
1456 | { | |
1457 | dev_info(&ctrl_info->pci_dev->dev, | |
94086f5b | 1458 | "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c qd=%d\n", |
6c223761 KB |
1459 | action, |
1460 | ctrl_info->scsi_host->host_no, | |
1461 | device->bus, | |
1462 | device->target, | |
1463 | device->lun, | |
1464 | scsi_device_type(device->devtype), | |
1465 | device->vendor, | |
1466 | device->model, | |
bd10cf0b KB |
1467 | pqi_is_logical_device(device) ? |
1468 | pqi_raid_level_to_string(device->raid_level) : "", | |
6c223761 KB |
1469 | device->offload_configured ? '+' : '-', |
1470 | device->offload_enabled_pending ? '+' : '-', | |
6c223761 KB |
1471 | device->queue_depth); |
1472 | } | |
1473 | ||
1474 | /* Assumes the SCSI device list lock is held. */ | |
1475 | ||
1476 | static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, | |
1477 | struct pqi_scsi_dev *new_device) | |
1478 | { | |
1479 | existing_device->devtype = new_device->devtype; | |
1480 | existing_device->device_type = new_device->device_type; | |
1481 | existing_device->bus = new_device->bus; | |
1482 | if (new_device->target_lun_valid) { | |
1483 | existing_device->target = new_device->target; | |
1484 | existing_device->lun = new_device->lun; | |
1485 | existing_device->target_lun_valid = true; | |
1486 | } | |
1487 | ||
1488 | /* By definition, the scsi3addr and wwid fields are already the same. */ | |
1489 | ||
1490 | existing_device->is_physical_device = new_device->is_physical_device; | |
bd10cf0b KB |
1491 | existing_device->is_external_raid_device = |
1492 | new_device->is_external_raid_device; | |
6c223761 KB |
1493 | existing_device->aio_enabled = new_device->aio_enabled; |
1494 | memcpy(existing_device->vendor, new_device->vendor, | |
1495 | sizeof(existing_device->vendor)); | |
1496 | memcpy(existing_device->model, new_device->model, | |
1497 | sizeof(existing_device->model)); | |
1498 | existing_device->sas_address = new_device->sas_address; | |
1499 | existing_device->raid_level = new_device->raid_level; | |
1500 | existing_device->queue_depth = new_device->queue_depth; | |
1501 | existing_device->aio_handle = new_device->aio_handle; | |
1502 | existing_device->volume_status = new_device->volume_status; | |
1503 | existing_device->active_path_index = new_device->active_path_index; | |
1504 | existing_device->path_map = new_device->path_map; | |
1505 | existing_device->bay = new_device->bay; | |
1506 | memcpy(existing_device->box, new_device->box, | |
1507 | sizeof(existing_device->box)); | |
1508 | memcpy(existing_device->phys_connector, new_device->phys_connector, | |
1509 | sizeof(existing_device->phys_connector)); | |
1510 | existing_device->offload_configured = new_device->offload_configured; | |
1511 | existing_device->offload_enabled = false; | |
1512 | existing_device->offload_enabled_pending = | |
1513 | new_device->offload_enabled_pending; | |
1514 | existing_device->offload_to_mirror = 0; | |
1515 | kfree(existing_device->raid_map); | |
1516 | existing_device->raid_map = new_device->raid_map; | |
1517 | ||
1518 | /* To prevent this from being freed later. */ | |
1519 | new_device->raid_map = NULL; | |
1520 | } | |
1521 | ||
1522 | static inline void pqi_free_device(struct pqi_scsi_dev *device) | |
1523 | { | |
1524 | if (device) { | |
1525 | kfree(device->raid_map); | |
1526 | kfree(device); | |
1527 | } | |
1528 | } | |
1529 | ||
1530 | /* | |
1531 | * Called when exposing a new device to the OS fails in order to re-adjust | |
1532 | * our internal SCSI device list to match the SCSI ML's view. | |
1533 | */ | |
1534 | ||
1535 | static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, | |
1536 | struct pqi_scsi_dev *device) | |
1537 | { | |
1538 | unsigned long flags; | |
1539 | ||
1540 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
1541 | list_del(&device->scsi_device_list_entry); | |
1542 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
1543 | ||
1544 | /* Allow the device structure to be freed later. */ | |
1545 | device->keep_device = false; | |
1546 | } | |
1547 | ||
1548 | static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, | |
1549 | struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) | |
1550 | { | |
1551 | int rc; | |
1552 | unsigned int i; | |
1553 | unsigned long flags; | |
1554 | enum pqi_find_result find_result; | |
1555 | struct pqi_scsi_dev *device; | |
1556 | struct pqi_scsi_dev *next; | |
1557 | struct pqi_scsi_dev *matching_device; | |
1558 | struct list_head add_list; | |
1559 | struct list_head delete_list; | |
1560 | ||
1561 | INIT_LIST_HEAD(&add_list); | |
1562 | INIT_LIST_HEAD(&delete_list); | |
1563 | ||
1564 | /* | |
1565 | * The idea here is to do as little work as possible while holding the | |
1566 | * spinlock. That's why we go to great pains to defer anything other | |
1567 | * than updating the internal device list until after we release the | |
1568 | * spinlock. | |
1569 | */ | |
1570 | ||
1571 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
1572 | ||
1573 | /* Assume that all devices in the existing list have gone away. */ | |
1574 | list_for_each_entry(device, &ctrl_info->scsi_device_list, | |
1575 | scsi_device_list_entry) | |
1576 | device->device_gone = true; | |
1577 | ||
1578 | for (i = 0; i < num_new_devices; i++) { | |
1579 | device = new_device_list[i]; | |
1580 | ||
1581 | find_result = pqi_scsi_find_entry(ctrl_info, device, | |
1582 | &matching_device); | |
1583 | ||
1584 | switch (find_result) { | |
1585 | case DEVICE_SAME: | |
1586 | /* | |
1587 | * The newly found device is already in the existing | |
1588 | * device list. | |
1589 | */ | |
1590 | device->new_device = false; | |
1591 | matching_device->device_gone = false; | |
1592 | pqi_scsi_update_device(matching_device, device); | |
1593 | break; | |
1594 | case DEVICE_NOT_FOUND: | |
1595 | /* | |
1596 | * The newly found device is NOT in the existing device | |
1597 | * list. | |
1598 | */ | |
1599 | device->new_device = true; | |
1600 | break; | |
1601 | case DEVICE_CHANGED: | |
1602 | /* | |
1603 | * The original device has gone away and we need to add | |
1604 | * the new device. | |
1605 | */ | |
1606 | device->new_device = true; | |
1607 | break; | |
6c223761 KB |
1608 | } |
1609 | } | |
1610 | ||
1611 | /* Process all devices that have gone away. */ | |
1612 | list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, | |
1613 | scsi_device_list_entry) { | |
1614 | if (device->device_gone) { | |
1615 | list_del(&device->scsi_device_list_entry); | |
1616 | list_add_tail(&device->delete_list_entry, &delete_list); | |
1617 | } | |
1618 | } | |
1619 | ||
1620 | /* Process all new devices. */ | |
1621 | for (i = 0; i < num_new_devices; i++) { | |
1622 | device = new_device_list[i]; | |
1623 | if (!device->new_device) | |
1624 | continue; | |
1625 | if (device->volume_offline) | |
1626 | continue; | |
1627 | list_add_tail(&device->scsi_device_list_entry, | |
1628 | &ctrl_info->scsi_device_list); | |
1629 | list_add_tail(&device->add_list_entry, &add_list); | |
1630 | /* To prevent this device structure from being freed later. */ | |
1631 | device->keep_device = true; | |
1632 | } | |
1633 | ||
6c223761 KB |
1634 | list_for_each_entry(device, &ctrl_info->scsi_device_list, |
1635 | scsi_device_list_entry) | |
1636 | device->offload_enabled = | |
1637 | device->offload_enabled_pending; | |
1638 | ||
1639 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
1640 | ||
1641 | /* Remove all devices that have gone away. */ | |
1642 | list_for_each_entry_safe(device, next, &delete_list, | |
1643 | delete_list_entry) { | |
1644 | if (device->sdev) | |
1645 | pqi_remove_device(ctrl_info, device); | |
1646 | if (device->volume_offline) { | |
1647 | pqi_dev_info(ctrl_info, "offline", device); | |
1648 | pqi_show_volume_status(ctrl_info, device); | |
1649 | } else { | |
1650 | pqi_dev_info(ctrl_info, "removed", device); | |
1651 | } | |
1652 | list_del(&device->delete_list_entry); | |
1653 | pqi_free_device(device); | |
1654 | } | |
1655 | ||
1656 | /* | |
1657 | * Notify the SCSI ML if the queue depth of any existing device has | |
1658 | * changed. | |
1659 | */ | |
1660 | list_for_each_entry(device, &ctrl_info->scsi_device_list, | |
1661 | scsi_device_list_entry) { | |
1662 | if (device->sdev && device->queue_depth != | |
1663 | device->advertised_queue_depth) { | |
1664 | device->advertised_queue_depth = device->queue_depth; | |
1665 | scsi_change_queue_depth(device->sdev, | |
1666 | device->advertised_queue_depth); | |
1667 | } | |
1668 | } | |
1669 | ||
1670 | /* Expose any new devices. */ | |
1671 | list_for_each_entry_safe(device, next, &add_list, add_list_entry) { | |
94086f5b | 1672 | if (!device->sdev) { |
6c223761 KB |
1673 | rc = pqi_add_device(ctrl_info, device); |
1674 | if (rc) { | |
1675 | dev_warn(&ctrl_info->pci_dev->dev, | |
1676 | "scsi %d:%d:%d:%d addition failed, device not added\n", | |
1677 | ctrl_info->scsi_host->host_no, | |
1678 | device->bus, device->target, | |
1679 | device->lun); | |
1680 | pqi_fixup_botched_add(ctrl_info, device); | |
1681 | continue; | |
1682 | } | |
1683 | } | |
1684 | pqi_dev_info(ctrl_info, "added", device); | |
1685 | } | |
1686 | } | |
1687 | ||
1688 | static bool pqi_is_supported_device(struct pqi_scsi_dev *device) | |
1689 | { | |
1690 | bool is_supported = false; | |
1691 | ||
1692 | switch (device->devtype) { | |
1693 | case TYPE_DISK: | |
1694 | case TYPE_ZBC: | |
1695 | case TYPE_TAPE: | |
1696 | case TYPE_MEDIUM_CHANGER: | |
1697 | case TYPE_ENCLOSURE: | |
1698 | is_supported = true; | |
1699 | break; | |
1700 | case TYPE_RAID: | |
1701 | /* | |
1702 | * Only support the HBA controller itself as a RAID | |
1703 | * controller. If it's a RAID controller other than | |
376fb880 KB |
1704 | * the HBA itself (an external RAID controller, for |
1705 | * example), we don't support it. | |
6c223761 KB |
1706 | */ |
1707 | if (pqi_is_hba_lunid(device->scsi3addr)) | |
1708 | is_supported = true; | |
1709 | break; | |
1710 | } | |
1711 | ||
1712 | return is_supported; | |
1713 | } | |
1714 | ||
94086f5b | 1715 | static inline bool pqi_skip_device(u8 *scsi3addr) |
6c223761 | 1716 | { |
94086f5b KB |
1717 | /* Ignore all masked devices. */ |
1718 | if (MASKED_DEVICE(scsi3addr)) | |
6c223761 | 1719 | return true; |
6c223761 KB |
1720 | |
1721 | return false; | |
1722 | } | |
1723 | ||
6c223761 KB |
1724 | static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) |
1725 | { | |
1726 | int i; | |
1727 | int rc; | |
1728 | struct list_head new_device_list_head; | |
1729 | struct report_phys_lun_extended *physdev_list = NULL; | |
1730 | struct report_log_lun_extended *logdev_list = NULL; | |
1731 | struct report_phys_lun_extended_entry *phys_lun_ext_entry; | |
1732 | struct report_log_lun_extended_entry *log_lun_ext_entry; | |
1733 | struct bmic_identify_physical_device *id_phys = NULL; | |
1734 | u32 num_physicals; | |
1735 | u32 num_logicals; | |
1736 | struct pqi_scsi_dev **new_device_list = NULL; | |
1737 | struct pqi_scsi_dev *device; | |
1738 | struct pqi_scsi_dev *next; | |
1739 | unsigned int num_new_devices; | |
1740 | unsigned int num_valid_devices; | |
1741 | bool is_physical_device; | |
1742 | u8 *scsi3addr; | |
1743 | static char *out_of_memory_msg = | |
1744 | "out of memory, device discovery stopped"; | |
1745 | ||
1746 | INIT_LIST_HEAD(&new_device_list_head); | |
1747 | ||
1748 | rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); | |
1749 | if (rc) | |
1750 | goto out; | |
1751 | ||
1752 | if (physdev_list) | |
1753 | num_physicals = | |
1754 | get_unaligned_be32(&physdev_list->header.list_length) | |
1755 | / sizeof(physdev_list->lun_entries[0]); | |
1756 | else | |
1757 | num_physicals = 0; | |
1758 | ||
1759 | if (logdev_list) | |
1760 | num_logicals = | |
1761 | get_unaligned_be32(&logdev_list->header.list_length) | |
1762 | / sizeof(logdev_list->lun_entries[0]); | |
1763 | else | |
1764 | num_logicals = 0; | |
1765 | ||
1766 | if (num_physicals) { | |
1767 | /* | |
1768 | * We need this buffer for calls to pqi_get_physical_disk_info() | |
1769 | * below. We allocate it here instead of inside | |
1770 | * pqi_get_physical_disk_info() because it's a fairly large | |
1771 | * buffer. | |
1772 | */ | |
1773 | id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); | |
1774 | if (!id_phys) { | |
1775 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", | |
1776 | out_of_memory_msg); | |
1777 | rc = -ENOMEM; | |
1778 | goto out; | |
1779 | } | |
1780 | } | |
1781 | ||
1782 | num_new_devices = num_physicals + num_logicals; | |
1783 | ||
1784 | new_device_list = kmalloc(sizeof(*new_device_list) * | |
1785 | num_new_devices, GFP_KERNEL); | |
1786 | if (!new_device_list) { | |
1787 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); | |
1788 | rc = -ENOMEM; | |
1789 | goto out; | |
1790 | } | |
1791 | ||
1792 | for (i = 0; i < num_new_devices; i++) { | |
1793 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
1794 | if (!device) { | |
1795 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", | |
1796 | out_of_memory_msg); | |
1797 | rc = -ENOMEM; | |
1798 | goto out; | |
1799 | } | |
1800 | list_add_tail(&device->new_device_list_entry, | |
1801 | &new_device_list_head); | |
1802 | } | |
1803 | ||
1804 | device = NULL; | |
1805 | num_valid_devices = 0; | |
1806 | ||
1807 | for (i = 0; i < num_new_devices; i++) { | |
1808 | ||
1809 | if (i < num_physicals) { | |
1810 | is_physical_device = true; | |
1811 | phys_lun_ext_entry = &physdev_list->lun_entries[i]; | |
1812 | log_lun_ext_entry = NULL; | |
1813 | scsi3addr = phys_lun_ext_entry->lunid; | |
1814 | } else { | |
1815 | is_physical_device = false; | |
1816 | phys_lun_ext_entry = NULL; | |
1817 | log_lun_ext_entry = | |
1818 | &logdev_list->lun_entries[i - num_physicals]; | |
1819 | scsi3addr = log_lun_ext_entry->lunid; | |
1820 | } | |
1821 | ||
94086f5b | 1822 | if (is_physical_device && pqi_skip_device(scsi3addr)) |
6c223761 KB |
1823 | continue; |
1824 | ||
1825 | if (device) | |
1826 | device = list_next_entry(device, new_device_list_entry); | |
1827 | else | |
1828 | device = list_first_entry(&new_device_list_head, | |
1829 | struct pqi_scsi_dev, new_device_list_entry); | |
1830 | ||
1831 | memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); | |
1832 | device->is_physical_device = is_physical_device; | |
bd10cf0b KB |
1833 | if (!is_physical_device) |
1834 | device->is_external_raid_device = | |
1835 | pqi_is_external_raid_addr(scsi3addr); | |
6c223761 KB |
1836 | |
1837 | /* Gather information about the device. */ | |
1838 | rc = pqi_get_device_info(ctrl_info, device); | |
1839 | if (rc == -ENOMEM) { | |
1840 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", | |
1841 | out_of_memory_msg); | |
1842 | goto out; | |
1843 | } | |
1844 | if (rc) { | |
1845 | dev_warn(&ctrl_info->pci_dev->dev, | |
1846 | "obtaining device info failed, skipping device %016llx\n", | |
1847 | get_unaligned_be64(device->scsi3addr)); | |
1848 | rc = 0; | |
1849 | continue; | |
1850 | } | |
1851 | ||
1852 | if (!pqi_is_supported_device(device)) | |
1853 | continue; | |
1854 | ||
1855 | pqi_assign_bus_target_lun(device); | |
1856 | ||
6c223761 KB |
1857 | if (device->is_physical_device) { |
1858 | device->wwid = phys_lun_ext_entry->wwid; | |
1859 | if ((phys_lun_ext_entry->device_flags & | |
1860 | REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && | |
1861 | phys_lun_ext_entry->aio_handle) | |
1862 | device->aio_enabled = true; | |
1863 | } else { | |
1864 | memcpy(device->volume_id, log_lun_ext_entry->volume_id, | |
1865 | sizeof(device->volume_id)); | |
1866 | } | |
1867 | ||
1868 | switch (device->devtype) { | |
1869 | case TYPE_DISK: | |
1870 | case TYPE_ZBC: | |
1871 | case TYPE_ENCLOSURE: | |
1872 | if (device->is_physical_device) { | |
1873 | device->sas_address = | |
1874 | get_unaligned_be64(&device->wwid); | |
1875 | if (device->devtype == TYPE_DISK || | |
1876 | device->devtype == TYPE_ZBC) { | |
1877 | device->aio_handle = | |
1878 | phys_lun_ext_entry->aio_handle; | |
1879 | pqi_get_physical_disk_info(ctrl_info, | |
1880 | device, id_phys); | |
1881 | } | |
1882 | } | |
1883 | break; | |
1884 | } | |
1885 | ||
1886 | new_device_list[num_valid_devices++] = device; | |
1887 | } | |
1888 | ||
1889 | pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); | |
1890 | ||
1891 | out: | |
1892 | list_for_each_entry_safe(device, next, &new_device_list_head, | |
1893 | new_device_list_entry) { | |
1894 | if (device->keep_device) | |
1895 | continue; | |
1896 | list_del(&device->new_device_list_entry); | |
1897 | pqi_free_device(device); | |
1898 | } | |
1899 | ||
1900 | kfree(new_device_list); | |
1901 | kfree(physdev_list); | |
1902 | kfree(logdev_list); | |
1903 | kfree(id_phys); | |
1904 | ||
1905 | return rc; | |
1906 | } | |
1907 | ||
1908 | static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) | |
1909 | { | |
1910 | unsigned long flags; | |
1911 | struct pqi_scsi_dev *device; | |
6c223761 | 1912 | |
a37ef745 KB |
1913 | while (1) { |
1914 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
1915 | ||
1916 | device = list_first_entry_or_null(&ctrl_info->scsi_device_list, | |
1917 | struct pqi_scsi_dev, scsi_device_list_entry); | |
1918 | if (device) | |
1919 | list_del(&device->scsi_device_list_entry); | |
1920 | ||
1921 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, | |
1922 | flags); | |
1923 | ||
1924 | if (!device) | |
1925 | break; | |
6c223761 | 1926 | |
6c223761 KB |
1927 | if (device->sdev) |
1928 | pqi_remove_device(ctrl_info, device); | |
6c223761 KB |
1929 | pqi_free_device(device); |
1930 | } | |
6c223761 KB |
1931 | } |
1932 | ||
1933 | static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) | |
1934 | { | |
1935 | int rc; | |
1936 | ||
1937 | if (pqi_ctrl_offline(ctrl_info)) | |
1938 | return -ENXIO; | |
1939 | ||
1940 | mutex_lock(&ctrl_info->scan_mutex); | |
1941 | ||
1942 | rc = pqi_update_scsi_devices(ctrl_info); | |
1943 | if (rc) | |
5f310425 | 1944 | pqi_schedule_rescan_worker_delayed(ctrl_info); |
6c223761 KB |
1945 | |
1946 | mutex_unlock(&ctrl_info->scan_mutex); | |
1947 | ||
1948 | return rc; | |
1949 | } | |
1950 | ||
1951 | static void pqi_scan_start(struct Scsi_Host *shost) | |
1952 | { | |
1953 | pqi_scan_scsi_devices(shost_to_hba(shost)); | |
1954 | } | |
1955 | ||
1956 | /* Returns TRUE if scan is finished. */ | |
1957 | ||
1958 | static int pqi_scan_finished(struct Scsi_Host *shost, | |
1959 | unsigned long elapsed_time) | |
1960 | { | |
1961 | struct pqi_ctrl_info *ctrl_info; | |
1962 | ||
1963 | ctrl_info = shost_priv(shost); | |
1964 | ||
1965 | return !mutex_is_locked(&ctrl_info->scan_mutex); | |
1966 | } | |
1967 | ||
061ef06a KB |
1968 | static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) |
1969 | { | |
1970 | mutex_lock(&ctrl_info->scan_mutex); | |
1971 | mutex_unlock(&ctrl_info->scan_mutex); | |
1972 | } | |
1973 | ||
1974 | static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) | |
1975 | { | |
1976 | mutex_lock(&ctrl_info->lun_reset_mutex); | |
1977 | mutex_unlock(&ctrl_info->lun_reset_mutex); | |
1978 | } | |
1979 | ||
6c223761 KB |
1980 | static inline void pqi_set_encryption_info( |
1981 | struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, | |
1982 | u64 first_block) | |
1983 | { | |
1984 | u32 volume_blk_size; | |
1985 | ||
1986 | /* | |
1987 | * Set the encryption tweak values based on logical block address. | |
1988 | * If the block size is 512, the tweak value is equal to the LBA. | |
1989 | * For other block sizes, tweak value is (LBA * block size) / 512. | |
1990 | */ | |
1991 | volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); | |
1992 | if (volume_blk_size != 512) | |
1993 | first_block = (first_block * volume_blk_size) / 512; | |
1994 | ||
1995 | encryption_info->data_encryption_key_index = | |
1996 | get_unaligned_le16(&raid_map->data_encryption_key_index); | |
1997 | encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); | |
1998 | encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); | |
1999 | } | |
2000 | ||
2001 | /* | |
2002 | * Attempt to perform offload RAID mapping for a logical volume I/O. | |
2003 | */ | |
2004 | ||
2005 | #define PQI_RAID_BYPASS_INELIGIBLE 1 | |
2006 | ||
2007 | static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, | |
2008 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, | |
2009 | struct pqi_queue_group *queue_group) | |
2010 | { | |
2011 | struct raid_map *raid_map; | |
2012 | bool is_write = false; | |
2013 | u32 map_index; | |
2014 | u64 first_block; | |
2015 | u64 last_block; | |
2016 | u32 block_cnt; | |
2017 | u32 blocks_per_row; | |
2018 | u64 first_row; | |
2019 | u64 last_row; | |
2020 | u32 first_row_offset; | |
2021 | u32 last_row_offset; | |
2022 | u32 first_column; | |
2023 | u32 last_column; | |
2024 | u64 r0_first_row; | |
2025 | u64 r0_last_row; | |
2026 | u32 r5or6_blocks_per_row; | |
2027 | u64 r5or6_first_row; | |
2028 | u64 r5or6_last_row; | |
2029 | u32 r5or6_first_row_offset; | |
2030 | u32 r5or6_last_row_offset; | |
2031 | u32 r5or6_first_column; | |
2032 | u32 r5or6_last_column; | |
2033 | u16 data_disks_per_row; | |
2034 | u32 total_disks_per_row; | |
2035 | u16 layout_map_count; | |
2036 | u32 stripesize; | |
2037 | u16 strip_size; | |
2038 | u32 first_group; | |
2039 | u32 last_group; | |
2040 | u32 current_group; | |
2041 | u32 map_row; | |
2042 | u32 aio_handle; | |
2043 | u64 disk_block; | |
2044 | u32 disk_block_cnt; | |
2045 | u8 cdb[16]; | |
2046 | u8 cdb_length; | |
2047 | int offload_to_mirror; | |
2048 | struct pqi_encryption_info *encryption_info_ptr; | |
2049 | struct pqi_encryption_info encryption_info; | |
2050 | #if BITS_PER_LONG == 32 | |
2051 | u64 tmpdiv; | |
2052 | #endif | |
2053 | ||
2054 | /* Check for valid opcode, get LBA and block count. */ | |
2055 | switch (scmd->cmnd[0]) { | |
2056 | case WRITE_6: | |
2057 | is_write = true; | |
2058 | /* fall through */ | |
2059 | case READ_6: | |
e018ef57 B |
2060 | first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | |
2061 | (scmd->cmnd[2] << 8) | scmd->cmnd[3]); | |
6c223761 KB |
2062 | block_cnt = (u32)scmd->cmnd[4]; |
2063 | if (block_cnt == 0) | |
2064 | block_cnt = 256; | |
2065 | break; | |
2066 | case WRITE_10: | |
2067 | is_write = true; | |
2068 | /* fall through */ | |
2069 | case READ_10: | |
2070 | first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); | |
2071 | block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); | |
2072 | break; | |
2073 | case WRITE_12: | |
2074 | is_write = true; | |
2075 | /* fall through */ | |
2076 | case READ_12: | |
2077 | first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); | |
2078 | block_cnt = get_unaligned_be32(&scmd->cmnd[6]); | |
2079 | break; | |
2080 | case WRITE_16: | |
2081 | is_write = true; | |
2082 | /* fall through */ | |
2083 | case READ_16: | |
2084 | first_block = get_unaligned_be64(&scmd->cmnd[2]); | |
2085 | block_cnt = get_unaligned_be32(&scmd->cmnd[10]); | |
2086 | break; | |
2087 | default: | |
2088 | /* Process via normal I/O path. */ | |
2089 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2090 | } | |
2091 | ||
2092 | /* Check for write to non-RAID-0. */ | |
2093 | if (is_write && device->raid_level != SA_RAID_0) | |
2094 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2095 | ||
2096 | if (unlikely(block_cnt == 0)) | |
2097 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2098 | ||
2099 | last_block = first_block + block_cnt - 1; | |
2100 | raid_map = device->raid_map; | |
2101 | ||
2102 | /* Check for invalid block or wraparound. */ | |
2103 | if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || | |
2104 | last_block < first_block) | |
2105 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2106 | ||
2107 | data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); | |
2108 | strip_size = get_unaligned_le16(&raid_map->strip_size); | |
2109 | layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); | |
2110 | ||
2111 | /* Calculate stripe information for the request. */ | |
2112 | blocks_per_row = data_disks_per_row * strip_size; | |
2113 | #if BITS_PER_LONG == 32 | |
2114 | tmpdiv = first_block; | |
2115 | do_div(tmpdiv, blocks_per_row); | |
2116 | first_row = tmpdiv; | |
2117 | tmpdiv = last_block; | |
2118 | do_div(tmpdiv, blocks_per_row); | |
2119 | last_row = tmpdiv; | |
2120 | first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); | |
2121 | last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); | |
2122 | tmpdiv = first_row_offset; | |
2123 | do_div(tmpdiv, strip_size); | |
2124 | first_column = tmpdiv; | |
2125 | tmpdiv = last_row_offset; | |
2126 | do_div(tmpdiv, strip_size); | |
2127 | last_column = tmpdiv; | |
2128 | #else | |
2129 | first_row = first_block / blocks_per_row; | |
2130 | last_row = last_block / blocks_per_row; | |
2131 | first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); | |
2132 | last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); | |
2133 | first_column = first_row_offset / strip_size; | |
2134 | last_column = last_row_offset / strip_size; | |
2135 | #endif | |
2136 | ||
2137 | /* If this isn't a single row/column then give to the controller. */ | |
2138 | if (first_row != last_row || first_column != last_column) | |
2139 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2140 | ||
2141 | /* Proceeding with driver mapping. */ | |
2142 | total_disks_per_row = data_disks_per_row + | |
2143 | get_unaligned_le16(&raid_map->metadata_disks_per_row); | |
2144 | map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % | |
2145 | get_unaligned_le16(&raid_map->row_cnt); | |
2146 | map_index = (map_row * total_disks_per_row) + first_column; | |
2147 | ||
2148 | /* RAID 1 */ | |
2149 | if (device->raid_level == SA_RAID_1) { | |
2150 | if (device->offload_to_mirror) | |
2151 | map_index += data_disks_per_row; | |
2152 | device->offload_to_mirror = !device->offload_to_mirror; | |
2153 | } else if (device->raid_level == SA_RAID_ADM) { | |
2154 | /* RAID ADM */ | |
2155 | /* | |
2156 | * Handles N-way mirrors (R1-ADM) and R10 with # of drives | |
2157 | * divisible by 3. | |
2158 | */ | |
2159 | offload_to_mirror = device->offload_to_mirror; | |
2160 | if (offload_to_mirror == 0) { | |
2161 | /* use physical disk in the first mirrored group. */ | |
2162 | map_index %= data_disks_per_row; | |
2163 | } else { | |
2164 | do { | |
2165 | /* | |
2166 | * Determine mirror group that map_index | |
2167 | * indicates. | |
2168 | */ | |
2169 | current_group = map_index / data_disks_per_row; | |
2170 | ||
2171 | if (offload_to_mirror != current_group) { | |
2172 | if (current_group < | |
2173 | layout_map_count - 1) { | |
2174 | /* | |
2175 | * Select raid index from | |
2176 | * next group. | |
2177 | */ | |
2178 | map_index += data_disks_per_row; | |
2179 | current_group++; | |
2180 | } else { | |
2181 | /* | |
2182 | * Select raid index from first | |
2183 | * group. | |
2184 | */ | |
2185 | map_index %= data_disks_per_row; | |
2186 | current_group = 0; | |
2187 | } | |
2188 | } | |
2189 | } while (offload_to_mirror != current_group); | |
2190 | } | |
2191 | ||
2192 | /* Set mirror group to use next time. */ | |
2193 | offload_to_mirror = | |
2194 | (offload_to_mirror >= layout_map_count - 1) ? | |
2195 | 0 : offload_to_mirror + 1; | |
2196 | WARN_ON(offload_to_mirror >= layout_map_count); | |
2197 | device->offload_to_mirror = offload_to_mirror; | |
2198 | /* | |
2199 | * Avoid direct use of device->offload_to_mirror within this | |
2200 | * function since multiple threads might simultaneously | |
2201 | * increment it beyond the range of device->layout_map_count -1. | |
2202 | */ | |
2203 | } else if ((device->raid_level == SA_RAID_5 || | |
2204 | device->raid_level == SA_RAID_6) && layout_map_count > 1) { | |
2205 | /* RAID 50/60 */ | |
2206 | /* Verify first and last block are in same RAID group */ | |
2207 | r5or6_blocks_per_row = strip_size * data_disks_per_row; | |
2208 | stripesize = r5or6_blocks_per_row * layout_map_count; | |
2209 | #if BITS_PER_LONG == 32 | |
2210 | tmpdiv = first_block; | |
2211 | first_group = do_div(tmpdiv, stripesize); | |
2212 | tmpdiv = first_group; | |
2213 | do_div(tmpdiv, r5or6_blocks_per_row); | |
2214 | first_group = tmpdiv; | |
2215 | tmpdiv = last_block; | |
2216 | last_group = do_div(tmpdiv, stripesize); | |
2217 | tmpdiv = last_group; | |
2218 | do_div(tmpdiv, r5or6_blocks_per_row); | |
2219 | last_group = tmpdiv; | |
2220 | #else | |
2221 | first_group = (first_block % stripesize) / r5or6_blocks_per_row; | |
2222 | last_group = (last_block % stripesize) / r5or6_blocks_per_row; | |
2223 | #endif | |
2224 | if (first_group != last_group) | |
2225 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2226 | ||
2227 | /* Verify request is in a single row of RAID 5/6 */ | |
2228 | #if BITS_PER_LONG == 32 | |
2229 | tmpdiv = first_block; | |
2230 | do_div(tmpdiv, stripesize); | |
2231 | first_row = r5or6_first_row = r0_first_row = tmpdiv; | |
2232 | tmpdiv = last_block; | |
2233 | do_div(tmpdiv, stripesize); | |
2234 | r5or6_last_row = r0_last_row = tmpdiv; | |
2235 | #else | |
2236 | first_row = r5or6_first_row = r0_first_row = | |
2237 | first_block / stripesize; | |
2238 | r5or6_last_row = r0_last_row = last_block / stripesize; | |
2239 | #endif | |
2240 | if (r5or6_first_row != r5or6_last_row) | |
2241 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2242 | ||
2243 | /* Verify request is in a single column */ | |
2244 | #if BITS_PER_LONG == 32 | |
2245 | tmpdiv = first_block; | |
2246 | first_row_offset = do_div(tmpdiv, stripesize); | |
2247 | tmpdiv = first_row_offset; | |
2248 | first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); | |
2249 | r5or6_first_row_offset = first_row_offset; | |
2250 | tmpdiv = last_block; | |
2251 | r5or6_last_row_offset = do_div(tmpdiv, stripesize); | |
2252 | tmpdiv = r5or6_last_row_offset; | |
2253 | r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); | |
2254 | tmpdiv = r5or6_first_row_offset; | |
2255 | do_div(tmpdiv, strip_size); | |
2256 | first_column = r5or6_first_column = tmpdiv; | |
2257 | tmpdiv = r5or6_last_row_offset; | |
2258 | do_div(tmpdiv, strip_size); | |
2259 | r5or6_last_column = tmpdiv; | |
2260 | #else | |
2261 | first_row_offset = r5or6_first_row_offset = | |
2262 | (u32)((first_block % stripesize) % | |
2263 | r5or6_blocks_per_row); | |
2264 | ||
2265 | r5or6_last_row_offset = | |
2266 | (u32)((last_block % stripesize) % | |
2267 | r5or6_blocks_per_row); | |
2268 | ||
2269 | first_column = r5or6_first_row_offset / strip_size; | |
2270 | r5or6_first_column = first_column; | |
2271 | r5or6_last_column = r5or6_last_row_offset / strip_size; | |
2272 | #endif | |
2273 | if (r5or6_first_column != r5or6_last_column) | |
2274 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2275 | ||
2276 | /* Request is eligible */ | |
2277 | map_row = | |
2278 | ((u32)(first_row >> raid_map->parity_rotation_shift)) % | |
2279 | get_unaligned_le16(&raid_map->row_cnt); | |
2280 | ||
2281 | map_index = (first_group * | |
2282 | (get_unaligned_le16(&raid_map->row_cnt) * | |
2283 | total_disks_per_row)) + | |
2284 | (map_row * total_disks_per_row) + first_column; | |
2285 | } | |
2286 | ||
2287 | if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) | |
2288 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2289 | ||
2290 | aio_handle = raid_map->disk_data[map_index].aio_handle; | |
2291 | disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + | |
2292 | first_row * strip_size + | |
2293 | (first_row_offset - first_column * strip_size); | |
2294 | disk_block_cnt = block_cnt; | |
2295 | ||
2296 | /* Handle differing logical/physical block sizes. */ | |
2297 | if (raid_map->phys_blk_shift) { | |
2298 | disk_block <<= raid_map->phys_blk_shift; | |
2299 | disk_block_cnt <<= raid_map->phys_blk_shift; | |
2300 | } | |
2301 | ||
2302 | if (unlikely(disk_block_cnt > 0xffff)) | |
2303 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2304 | ||
2305 | /* Build the new CDB for the physical disk I/O. */ | |
2306 | if (disk_block > 0xffffffff) { | |
2307 | cdb[0] = is_write ? WRITE_16 : READ_16; | |
2308 | cdb[1] = 0; | |
2309 | put_unaligned_be64(disk_block, &cdb[2]); | |
2310 | put_unaligned_be32(disk_block_cnt, &cdb[10]); | |
2311 | cdb[14] = 0; | |
2312 | cdb[15] = 0; | |
2313 | cdb_length = 16; | |
2314 | } else { | |
2315 | cdb[0] = is_write ? WRITE_10 : READ_10; | |
2316 | cdb[1] = 0; | |
2317 | put_unaligned_be32((u32)disk_block, &cdb[2]); | |
2318 | cdb[6] = 0; | |
2319 | put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); | |
2320 | cdb[9] = 0; | |
2321 | cdb_length = 10; | |
2322 | } | |
2323 | ||
2324 | if (get_unaligned_le16(&raid_map->flags) & | |
2325 | RAID_MAP_ENCRYPTION_ENABLED) { | |
2326 | pqi_set_encryption_info(&encryption_info, raid_map, | |
2327 | first_block); | |
2328 | encryption_info_ptr = &encryption_info; | |
2329 | } else { | |
2330 | encryption_info_ptr = NULL; | |
2331 | } | |
2332 | ||
2333 | return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, | |
376fb880 | 2334 | cdb, cdb_length, queue_group, encryption_info_ptr, true); |
6c223761 KB |
2335 | } |
2336 | ||
2337 | #define PQI_STATUS_IDLE 0x0 | |
2338 | ||
2339 | #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 | |
2340 | #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 | |
2341 | ||
2342 | #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 | |
2343 | #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 | |
2344 | #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 | |
2345 | #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 | |
2346 | #define PQI_DEVICE_STATE_ERROR 0x4 | |
2347 | ||
2348 | #define PQI_MODE_READY_TIMEOUT_SECS 30 | |
2349 | #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 | |
2350 | ||
2351 | static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) | |
2352 | { | |
2353 | struct pqi_device_registers __iomem *pqi_registers; | |
2354 | unsigned long timeout; | |
2355 | u64 signature; | |
2356 | u8 status; | |
2357 | ||
2358 | pqi_registers = ctrl_info->pqi_registers; | |
2359 | timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; | |
2360 | ||
2361 | while (1) { | |
2362 | signature = readq(&pqi_registers->signature); | |
2363 | if (memcmp(&signature, PQI_DEVICE_SIGNATURE, | |
2364 | sizeof(signature)) == 0) | |
2365 | break; | |
2366 | if (time_after(jiffies, timeout)) { | |
2367 | dev_err(&ctrl_info->pci_dev->dev, | |
2368 | "timed out waiting for PQI signature\n"); | |
2369 | return -ETIMEDOUT; | |
2370 | } | |
2371 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); | |
2372 | } | |
2373 | ||
2374 | while (1) { | |
2375 | status = readb(&pqi_registers->function_and_status_code); | |
2376 | if (status == PQI_STATUS_IDLE) | |
2377 | break; | |
2378 | if (time_after(jiffies, timeout)) { | |
2379 | dev_err(&ctrl_info->pci_dev->dev, | |
2380 | "timed out waiting for PQI IDLE\n"); | |
2381 | return -ETIMEDOUT; | |
2382 | } | |
2383 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); | |
2384 | } | |
2385 | ||
2386 | while (1) { | |
2387 | if (readl(&pqi_registers->device_status) == | |
2388 | PQI_DEVICE_STATE_ALL_REGISTERS_READY) | |
2389 | break; | |
2390 | if (time_after(jiffies, timeout)) { | |
2391 | dev_err(&ctrl_info->pci_dev->dev, | |
2392 | "timed out waiting for PQI all registers ready\n"); | |
2393 | return -ETIMEDOUT; | |
2394 | } | |
2395 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); | |
2396 | } | |
2397 | ||
2398 | return 0; | |
2399 | } | |
2400 | ||
2401 | static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) | |
2402 | { | |
2403 | struct pqi_scsi_dev *device; | |
2404 | ||
2405 | device = io_request->scmd->device->hostdata; | |
2406 | device->offload_enabled = false; | |
376fb880 | 2407 | device->aio_enabled = false; |
6c223761 KB |
2408 | } |
2409 | ||
d87d5474 | 2410 | static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) |
6c223761 KB |
2411 | { |
2412 | struct pqi_ctrl_info *ctrl_info; | |
e58081a7 | 2413 | struct pqi_scsi_dev *device; |
6c223761 | 2414 | |
03b288cf KB |
2415 | device = sdev->hostdata; |
2416 | if (device->device_offline) | |
2417 | return; | |
2418 | ||
2419 | device->device_offline = true; | |
2420 | scsi_device_set_state(sdev, SDEV_OFFLINE); | |
2421 | ctrl_info = shost_to_hba(sdev->host); | |
2422 | pqi_schedule_rescan_worker(ctrl_info); | |
2423 | dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", | |
2424 | path, ctrl_info->scsi_host->host_no, device->bus, | |
2425 | device->target, device->lun); | |
6c223761 KB |
2426 | } |
2427 | ||
2428 | static void pqi_process_raid_io_error(struct pqi_io_request *io_request) | |
2429 | { | |
2430 | u8 scsi_status; | |
2431 | u8 host_byte; | |
2432 | struct scsi_cmnd *scmd; | |
2433 | struct pqi_raid_error_info *error_info; | |
2434 | size_t sense_data_length; | |
2435 | int residual_count; | |
2436 | int xfer_count; | |
2437 | struct scsi_sense_hdr sshdr; | |
2438 | ||
2439 | scmd = io_request->scmd; | |
2440 | if (!scmd) | |
2441 | return; | |
2442 | ||
2443 | error_info = io_request->error_info; | |
2444 | scsi_status = error_info->status; | |
2445 | host_byte = DID_OK; | |
2446 | ||
2447 | if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) { | |
2448 | xfer_count = | |
2449 | get_unaligned_le32(&error_info->data_out_transferred); | |
2450 | residual_count = scsi_bufflen(scmd) - xfer_count; | |
2451 | scsi_set_resid(scmd, residual_count); | |
2452 | if (xfer_count < scmd->underflow) | |
2453 | host_byte = DID_SOFT_ERROR; | |
2454 | } | |
2455 | ||
2456 | sense_data_length = get_unaligned_le16(&error_info->sense_data_length); | |
2457 | if (sense_data_length == 0) | |
2458 | sense_data_length = | |
2459 | get_unaligned_le16(&error_info->response_data_length); | |
2460 | if (sense_data_length) { | |
2461 | if (sense_data_length > sizeof(error_info->data)) | |
2462 | sense_data_length = sizeof(error_info->data); | |
2463 | ||
2464 | if (scsi_status == SAM_STAT_CHECK_CONDITION && | |
2465 | scsi_normalize_sense(error_info->data, | |
2466 | sense_data_length, &sshdr) && | |
2467 | sshdr.sense_key == HARDWARE_ERROR && | |
2468 | sshdr.asc == 0x3e && | |
2469 | sshdr.ascq == 0x1) { | |
d87d5474 | 2470 | pqi_take_device_offline(scmd->device, "RAID"); |
6c223761 KB |
2471 | host_byte = DID_NO_CONNECT; |
2472 | } | |
2473 | ||
2474 | if (sense_data_length > SCSI_SENSE_BUFFERSIZE) | |
2475 | sense_data_length = SCSI_SENSE_BUFFERSIZE; | |
2476 | memcpy(scmd->sense_buffer, error_info->data, | |
2477 | sense_data_length); | |
2478 | } | |
2479 | ||
2480 | scmd->result = scsi_status; | |
2481 | set_host_byte(scmd, host_byte); | |
2482 | } | |
2483 | ||
2484 | static void pqi_process_aio_io_error(struct pqi_io_request *io_request) | |
2485 | { | |
2486 | u8 scsi_status; | |
2487 | u8 host_byte; | |
2488 | struct scsi_cmnd *scmd; | |
2489 | struct pqi_aio_error_info *error_info; | |
2490 | size_t sense_data_length; | |
2491 | int residual_count; | |
2492 | int xfer_count; | |
2493 | bool device_offline; | |
2494 | ||
2495 | scmd = io_request->scmd; | |
2496 | error_info = io_request->error_info; | |
2497 | host_byte = DID_OK; | |
2498 | sense_data_length = 0; | |
2499 | device_offline = false; | |
2500 | ||
2501 | switch (error_info->service_response) { | |
2502 | case PQI_AIO_SERV_RESPONSE_COMPLETE: | |
2503 | scsi_status = error_info->status; | |
2504 | break; | |
2505 | case PQI_AIO_SERV_RESPONSE_FAILURE: | |
2506 | switch (error_info->status) { | |
2507 | case PQI_AIO_STATUS_IO_ABORTED: | |
2508 | scsi_status = SAM_STAT_TASK_ABORTED; | |
2509 | break; | |
2510 | case PQI_AIO_STATUS_UNDERRUN: | |
2511 | scsi_status = SAM_STAT_GOOD; | |
2512 | residual_count = get_unaligned_le32( | |
2513 | &error_info->residual_count); | |
2514 | scsi_set_resid(scmd, residual_count); | |
2515 | xfer_count = scsi_bufflen(scmd) - residual_count; | |
2516 | if (xfer_count < scmd->underflow) | |
2517 | host_byte = DID_SOFT_ERROR; | |
2518 | break; | |
2519 | case PQI_AIO_STATUS_OVERRUN: | |
2520 | scsi_status = SAM_STAT_GOOD; | |
2521 | break; | |
2522 | case PQI_AIO_STATUS_AIO_PATH_DISABLED: | |
2523 | pqi_aio_path_disabled(io_request); | |
2524 | scsi_status = SAM_STAT_GOOD; | |
2525 | io_request->status = -EAGAIN; | |
2526 | break; | |
2527 | case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: | |
2528 | case PQI_AIO_STATUS_INVALID_DEVICE: | |
376fb880 KB |
2529 | if (!io_request->raid_bypass) { |
2530 | device_offline = true; | |
2531 | pqi_take_device_offline(scmd->device, "AIO"); | |
2532 | host_byte = DID_NO_CONNECT; | |
2533 | } | |
6c223761 KB |
2534 | scsi_status = SAM_STAT_CHECK_CONDITION; |
2535 | break; | |
2536 | case PQI_AIO_STATUS_IO_ERROR: | |
2537 | default: | |
2538 | scsi_status = SAM_STAT_CHECK_CONDITION; | |
2539 | break; | |
2540 | } | |
2541 | break; | |
2542 | case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: | |
2543 | case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: | |
2544 | scsi_status = SAM_STAT_GOOD; | |
2545 | break; | |
2546 | case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: | |
2547 | case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: | |
2548 | default: | |
2549 | scsi_status = SAM_STAT_CHECK_CONDITION; | |
2550 | break; | |
2551 | } | |
2552 | ||
2553 | if (error_info->data_present) { | |
2554 | sense_data_length = | |
2555 | get_unaligned_le16(&error_info->data_length); | |
2556 | if (sense_data_length) { | |
2557 | if (sense_data_length > sizeof(error_info->data)) | |
2558 | sense_data_length = sizeof(error_info->data); | |
2559 | if (sense_data_length > SCSI_SENSE_BUFFERSIZE) | |
2560 | sense_data_length = SCSI_SENSE_BUFFERSIZE; | |
2561 | memcpy(scmd->sense_buffer, error_info->data, | |
2562 | sense_data_length); | |
2563 | } | |
2564 | } | |
2565 | ||
2566 | if (device_offline && sense_data_length == 0) | |
2567 | scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, | |
2568 | 0x3e, 0x1); | |
2569 | ||
2570 | scmd->result = scsi_status; | |
2571 | set_host_byte(scmd, host_byte); | |
2572 | } | |
2573 | ||
2574 | static void pqi_process_io_error(unsigned int iu_type, | |
2575 | struct pqi_io_request *io_request) | |
2576 | { | |
2577 | switch (iu_type) { | |
2578 | case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: | |
2579 | pqi_process_raid_io_error(io_request); | |
2580 | break; | |
2581 | case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: | |
2582 | pqi_process_aio_io_error(io_request); | |
2583 | break; | |
2584 | } | |
2585 | } | |
2586 | ||
2587 | static int pqi_interpret_task_management_response( | |
2588 | struct pqi_task_management_response *response) | |
2589 | { | |
2590 | int rc; | |
2591 | ||
2592 | switch (response->response_code) { | |
b17f0486 KB |
2593 | case SOP_TMF_COMPLETE: |
2594 | case SOP_TMF_FUNCTION_SUCCEEDED: | |
6c223761 KB |
2595 | rc = 0; |
2596 | break; | |
2597 | default: | |
2598 | rc = -EIO; | |
2599 | break; | |
2600 | } | |
2601 | ||
2602 | return rc; | |
2603 | } | |
2604 | ||
2605 | static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, | |
2606 | struct pqi_queue_group *queue_group) | |
2607 | { | |
2608 | unsigned int num_responses; | |
2609 | pqi_index_t oq_pi; | |
2610 | pqi_index_t oq_ci; | |
2611 | struct pqi_io_request *io_request; | |
2612 | struct pqi_io_response *response; | |
2613 | u16 request_id; | |
2614 | ||
2615 | num_responses = 0; | |
2616 | oq_ci = queue_group->oq_ci_copy; | |
2617 | ||
2618 | while (1) { | |
2619 | oq_pi = *queue_group->oq_pi; | |
2620 | if (oq_pi == oq_ci) | |
2621 | break; | |
2622 | ||
2623 | num_responses++; | |
2624 | response = queue_group->oq_element_array + | |
2625 | (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); | |
2626 | ||
2627 | request_id = get_unaligned_le16(&response->request_id); | |
2628 | WARN_ON(request_id >= ctrl_info->max_io_slots); | |
2629 | ||
2630 | io_request = &ctrl_info->io_request_pool[request_id]; | |
2631 | WARN_ON(atomic_read(&io_request->refcount) == 0); | |
2632 | ||
2633 | switch (response->header.iu_type) { | |
2634 | case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: | |
2635 | case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: | |
2636 | case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: | |
2637 | break; | |
2638 | case PQI_RESPONSE_IU_TASK_MANAGEMENT: | |
2639 | io_request->status = | |
2640 | pqi_interpret_task_management_response( | |
2641 | (void *)response); | |
2642 | break; | |
2643 | case PQI_RESPONSE_IU_AIO_PATH_DISABLED: | |
2644 | pqi_aio_path_disabled(io_request); | |
2645 | io_request->status = -EAGAIN; | |
2646 | break; | |
2647 | case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: | |
2648 | case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: | |
2649 | io_request->error_info = ctrl_info->error_buffer + | |
2650 | (get_unaligned_le16(&response->error_index) * | |
2651 | PQI_ERROR_BUFFER_ELEMENT_LENGTH); | |
2652 | pqi_process_io_error(response->header.iu_type, | |
2653 | io_request); | |
2654 | break; | |
2655 | default: | |
2656 | dev_err(&ctrl_info->pci_dev->dev, | |
2657 | "unexpected IU type: 0x%x\n", | |
2658 | response->header.iu_type); | |
6c223761 KB |
2659 | break; |
2660 | } | |
2661 | ||
2662 | io_request->io_complete_callback(io_request, | |
2663 | io_request->context); | |
2664 | ||
2665 | /* | |
2666 | * Note that the I/O request structure CANNOT BE TOUCHED after | |
2667 | * returning from the I/O completion callback! | |
2668 | */ | |
2669 | ||
2670 | oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; | |
2671 | } | |
2672 | ||
2673 | if (num_responses) { | |
2674 | queue_group->oq_ci_copy = oq_ci; | |
2675 | writel(oq_ci, queue_group->oq_ci); | |
2676 | } | |
2677 | ||
2678 | return num_responses; | |
2679 | } | |
2680 | ||
2681 | static inline unsigned int pqi_num_elements_free(unsigned int pi, | |
df7a1fcf | 2682 | unsigned int ci, unsigned int elements_in_queue) |
6c223761 KB |
2683 | { |
2684 | unsigned int num_elements_used; | |
2685 | ||
2686 | if (pi >= ci) | |
2687 | num_elements_used = pi - ci; | |
2688 | else | |
2689 | num_elements_used = elements_in_queue - ci + pi; | |
2690 | ||
2691 | return elements_in_queue - num_elements_used - 1; | |
2692 | } | |
2693 | ||
98f87667 | 2694 | static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, |
6c223761 KB |
2695 | struct pqi_event_acknowledge_request *iu, size_t iu_length) |
2696 | { | |
2697 | pqi_index_t iq_pi; | |
2698 | pqi_index_t iq_ci; | |
2699 | unsigned long flags; | |
2700 | void *next_element; | |
6c223761 KB |
2701 | struct pqi_queue_group *queue_group; |
2702 | ||
2703 | queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; | |
2704 | put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); | |
2705 | ||
6c223761 KB |
2706 | while (1) { |
2707 | spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); | |
2708 | ||
2709 | iq_pi = queue_group->iq_pi_copy[RAID_PATH]; | |
2710 | iq_ci = *queue_group->iq_ci[RAID_PATH]; | |
2711 | ||
2712 | if (pqi_num_elements_free(iq_pi, iq_ci, | |
2713 | ctrl_info->num_elements_per_iq)) | |
2714 | break; | |
2715 | ||
2716 | spin_unlock_irqrestore( | |
2717 | &queue_group->submit_lock[RAID_PATH], flags); | |
2718 | ||
98f87667 | 2719 | if (pqi_ctrl_offline(ctrl_info)) |
6c223761 | 2720 | return; |
6c223761 KB |
2721 | } |
2722 | ||
2723 | next_element = queue_group->iq_element_array[RAID_PATH] + | |
2724 | (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
2725 | ||
2726 | memcpy(next_element, iu, iu_length); | |
2727 | ||
2728 | iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; | |
6c223761 KB |
2729 | queue_group->iq_pi_copy[RAID_PATH] = iq_pi; |
2730 | ||
2731 | /* | |
2732 | * This write notifies the controller that an IU is available to be | |
2733 | * processed. | |
2734 | */ | |
2735 | writel(iq_pi, queue_group->iq_pi[RAID_PATH]); | |
2736 | ||
2737 | spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); | |
6c223761 KB |
2738 | } |
2739 | ||
2740 | static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, | |
2741 | struct pqi_event *event) | |
2742 | { | |
2743 | struct pqi_event_acknowledge_request request; | |
2744 | ||
2745 | memset(&request, 0, sizeof(request)); | |
2746 | ||
2747 | request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; | |
2748 | put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, | |
2749 | &request.header.iu_length); | |
2750 | request.event_type = event->event_type; | |
2751 | request.event_id = event->event_id; | |
2752 | request.additional_event_id = event->additional_event_id; | |
2753 | ||
98f87667 | 2754 | pqi_send_event_ack(ctrl_info, &request, sizeof(request)); |
6c223761 KB |
2755 | } |
2756 | ||
2757 | static void pqi_event_worker(struct work_struct *work) | |
2758 | { | |
2759 | unsigned int i; | |
2760 | struct pqi_ctrl_info *ctrl_info; | |
6a50d6ad | 2761 | struct pqi_event *event; |
6c223761 KB |
2762 | |
2763 | ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); | |
2764 | ||
7561a7e4 KB |
2765 | pqi_ctrl_busy(ctrl_info); |
2766 | pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); | |
5f310425 KB |
2767 | if (pqi_ctrl_offline(ctrl_info)) |
2768 | goto out; | |
2769 | ||
2770 | pqi_schedule_rescan_worker_delayed(ctrl_info); | |
7561a7e4 | 2771 | |
6a50d6ad | 2772 | event = ctrl_info->events; |
6c223761 | 2773 | for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { |
6a50d6ad KB |
2774 | if (event->pending) { |
2775 | event->pending = false; | |
2776 | pqi_acknowledge_event(ctrl_info, event); | |
6c223761 | 2777 | } |
6a50d6ad | 2778 | event++; |
6c223761 KB |
2779 | } |
2780 | ||
5f310425 | 2781 | out: |
7561a7e4 | 2782 | pqi_ctrl_unbusy(ctrl_info); |
6c223761 KB |
2783 | } |
2784 | ||
98f87667 | 2785 | #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) |
6c223761 KB |
2786 | |
2787 | static void pqi_heartbeat_timer_handler(unsigned long data) | |
2788 | { | |
2789 | int num_interrupts; | |
98f87667 | 2790 | u32 heartbeat_count; |
6c223761 KB |
2791 | struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data; |
2792 | ||
98f87667 KB |
2793 | pqi_check_ctrl_health(ctrl_info); |
2794 | if (pqi_ctrl_offline(ctrl_info)) | |
061ef06a KB |
2795 | return; |
2796 | ||
6c223761 | 2797 | num_interrupts = atomic_read(&ctrl_info->num_interrupts); |
98f87667 | 2798 | heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); |
6c223761 KB |
2799 | |
2800 | if (num_interrupts == ctrl_info->previous_num_interrupts) { | |
98f87667 KB |
2801 | if (heartbeat_count == ctrl_info->previous_heartbeat_count) { |
2802 | dev_err(&ctrl_info->pci_dev->dev, | |
2803 | "no heartbeat detected - last heartbeat count: %u\n", | |
2804 | heartbeat_count); | |
6c223761 KB |
2805 | pqi_take_ctrl_offline(ctrl_info); |
2806 | return; | |
2807 | } | |
6c223761 | 2808 | } else { |
98f87667 | 2809 | ctrl_info->previous_num_interrupts = num_interrupts; |
6c223761 KB |
2810 | } |
2811 | ||
98f87667 | 2812 | ctrl_info->previous_heartbeat_count = heartbeat_count; |
6c223761 KB |
2813 | mod_timer(&ctrl_info->heartbeat_timer, |
2814 | jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); | |
2815 | } | |
2816 | ||
2817 | static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) | |
2818 | { | |
98f87667 KB |
2819 | if (!ctrl_info->heartbeat_counter) |
2820 | return; | |
2821 | ||
6c223761 KB |
2822 | ctrl_info->previous_num_interrupts = |
2823 | atomic_read(&ctrl_info->num_interrupts); | |
98f87667 KB |
2824 | ctrl_info->previous_heartbeat_count = |
2825 | pqi_read_heartbeat_counter(ctrl_info); | |
6c223761 | 2826 | |
6c223761 KB |
2827 | ctrl_info->heartbeat_timer.expires = |
2828 | jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; | |
2829 | ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info; | |
2830 | ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler; | |
061ef06a | 2831 | add_timer(&ctrl_info->heartbeat_timer); |
6c223761 KB |
2832 | } |
2833 | ||
2834 | static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) | |
2835 | { | |
98f87667 | 2836 | del_timer_sync(&ctrl_info->heartbeat_timer); |
6c223761 KB |
2837 | } |
2838 | ||
6a50d6ad | 2839 | static inline int pqi_event_type_to_event_index(unsigned int event_type) |
6c223761 KB |
2840 | { |
2841 | int index; | |
2842 | ||
6a50d6ad KB |
2843 | for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) |
2844 | if (event_type == pqi_supported_event_types[index]) | |
2845 | return index; | |
6c223761 | 2846 | |
6a50d6ad KB |
2847 | return -1; |
2848 | } | |
2849 | ||
2850 | static inline bool pqi_is_supported_event(unsigned int event_type) | |
2851 | { | |
2852 | return pqi_event_type_to_event_index(event_type) != -1; | |
6c223761 KB |
2853 | } |
2854 | ||
2855 | static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) | |
2856 | { | |
2857 | unsigned int num_events; | |
2858 | pqi_index_t oq_pi; | |
2859 | pqi_index_t oq_ci; | |
2860 | struct pqi_event_queue *event_queue; | |
2861 | struct pqi_event_response *response; | |
6a50d6ad | 2862 | struct pqi_event *event; |
6c223761 KB |
2863 | int event_index; |
2864 | ||
2865 | event_queue = &ctrl_info->event_queue; | |
2866 | num_events = 0; | |
6c223761 KB |
2867 | oq_ci = event_queue->oq_ci_copy; |
2868 | ||
2869 | while (1) { | |
2870 | oq_pi = *event_queue->oq_pi; | |
2871 | if (oq_pi == oq_ci) | |
2872 | break; | |
2873 | ||
2874 | num_events++; | |
2875 | response = event_queue->oq_element_array + | |
2876 | (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); | |
2877 | ||
2878 | event_index = | |
2879 | pqi_event_type_to_event_index(response->event_type); | |
2880 | ||
2881 | if (event_index >= 0) { | |
2882 | if (response->request_acknowlege) { | |
6a50d6ad KB |
2883 | event = &ctrl_info->events[event_index]; |
2884 | event->pending = true; | |
2885 | event->event_type = response->event_type; | |
2886 | event->event_id = response->event_id; | |
2887 | event->additional_event_id = | |
6c223761 | 2888 | response->additional_event_id; |
6c223761 KB |
2889 | } |
2890 | } | |
2891 | ||
2892 | oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; | |
2893 | } | |
2894 | ||
2895 | if (num_events) { | |
2896 | event_queue->oq_ci_copy = oq_ci; | |
2897 | writel(oq_ci, event_queue->oq_ci); | |
98f87667 | 2898 | schedule_work(&ctrl_info->event_work); |
6c223761 KB |
2899 | } |
2900 | ||
2901 | return num_events; | |
2902 | } | |
2903 | ||
061ef06a KB |
2904 | #define PQI_LEGACY_INTX_MASK 0x1 |
2905 | ||
2906 | static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, | |
2907 | bool enable_intx) | |
2908 | { | |
2909 | u32 intx_mask; | |
2910 | struct pqi_device_registers __iomem *pqi_registers; | |
2911 | volatile void __iomem *register_addr; | |
2912 | ||
2913 | pqi_registers = ctrl_info->pqi_registers; | |
2914 | ||
2915 | if (enable_intx) | |
2916 | register_addr = &pqi_registers->legacy_intx_mask_clear; | |
2917 | else | |
2918 | register_addr = &pqi_registers->legacy_intx_mask_set; | |
2919 | ||
2920 | intx_mask = readl(register_addr); | |
2921 | intx_mask |= PQI_LEGACY_INTX_MASK; | |
2922 | writel(intx_mask, register_addr); | |
2923 | } | |
2924 | ||
2925 | static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, | |
2926 | enum pqi_irq_mode new_mode) | |
2927 | { | |
2928 | switch (ctrl_info->irq_mode) { | |
2929 | case IRQ_MODE_MSIX: | |
2930 | switch (new_mode) { | |
2931 | case IRQ_MODE_MSIX: | |
2932 | break; | |
2933 | case IRQ_MODE_INTX: | |
2934 | pqi_configure_legacy_intx(ctrl_info, true); | |
2935 | sis_disable_msix(ctrl_info); | |
2936 | sis_enable_intx(ctrl_info); | |
2937 | break; | |
2938 | case IRQ_MODE_NONE: | |
2939 | sis_disable_msix(ctrl_info); | |
2940 | break; | |
2941 | } | |
2942 | break; | |
2943 | case IRQ_MODE_INTX: | |
2944 | switch (new_mode) { | |
2945 | case IRQ_MODE_MSIX: | |
2946 | pqi_configure_legacy_intx(ctrl_info, false); | |
2947 | sis_disable_intx(ctrl_info); | |
2948 | sis_enable_msix(ctrl_info); | |
2949 | break; | |
2950 | case IRQ_MODE_INTX: | |
2951 | break; | |
2952 | case IRQ_MODE_NONE: | |
2953 | pqi_configure_legacy_intx(ctrl_info, false); | |
2954 | sis_disable_intx(ctrl_info); | |
2955 | break; | |
2956 | } | |
2957 | break; | |
2958 | case IRQ_MODE_NONE: | |
2959 | switch (new_mode) { | |
2960 | case IRQ_MODE_MSIX: | |
2961 | sis_enable_msix(ctrl_info); | |
2962 | break; | |
2963 | case IRQ_MODE_INTX: | |
2964 | pqi_configure_legacy_intx(ctrl_info, true); | |
2965 | sis_enable_intx(ctrl_info); | |
2966 | break; | |
2967 | case IRQ_MODE_NONE: | |
2968 | break; | |
2969 | } | |
2970 | break; | |
2971 | } | |
2972 | ||
2973 | ctrl_info->irq_mode = new_mode; | |
2974 | } | |
2975 | ||
2976 | #define PQI_LEGACY_INTX_PENDING 0x1 | |
2977 | ||
2978 | static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) | |
2979 | { | |
2980 | bool valid_irq; | |
2981 | u32 intx_status; | |
2982 | ||
2983 | switch (ctrl_info->irq_mode) { | |
2984 | case IRQ_MODE_MSIX: | |
2985 | valid_irq = true; | |
2986 | break; | |
2987 | case IRQ_MODE_INTX: | |
2988 | intx_status = | |
2989 | readl(&ctrl_info->pqi_registers->legacy_intx_status); | |
2990 | if (intx_status & PQI_LEGACY_INTX_PENDING) | |
2991 | valid_irq = true; | |
2992 | else | |
2993 | valid_irq = false; | |
2994 | break; | |
2995 | case IRQ_MODE_NONE: | |
2996 | default: | |
2997 | valid_irq = false; | |
2998 | break; | |
2999 | } | |
3000 | ||
3001 | return valid_irq; | |
3002 | } | |
3003 | ||
6c223761 KB |
3004 | static irqreturn_t pqi_irq_handler(int irq, void *data) |
3005 | { | |
3006 | struct pqi_ctrl_info *ctrl_info; | |
3007 | struct pqi_queue_group *queue_group; | |
3008 | unsigned int num_responses_handled; | |
3009 | ||
3010 | queue_group = data; | |
3011 | ctrl_info = queue_group->ctrl_info; | |
3012 | ||
061ef06a | 3013 | if (!pqi_is_valid_irq(ctrl_info)) |
6c223761 KB |
3014 | return IRQ_NONE; |
3015 | ||
3016 | num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); | |
3017 | ||
3018 | if (irq == ctrl_info->event_irq) | |
3019 | num_responses_handled += pqi_process_event_intr(ctrl_info); | |
3020 | ||
3021 | if (num_responses_handled) | |
3022 | atomic_inc(&ctrl_info->num_interrupts); | |
3023 | ||
3024 | pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); | |
3025 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); | |
3026 | ||
3027 | return IRQ_HANDLED; | |
3028 | } | |
3029 | ||
3030 | static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) | |
3031 | { | |
d91d7820 | 3032 | struct pci_dev *pci_dev = ctrl_info->pci_dev; |
6c223761 KB |
3033 | int i; |
3034 | int rc; | |
3035 | ||
d91d7820 | 3036 | ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); |
6c223761 KB |
3037 | |
3038 | for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { | |
d91d7820 | 3039 | rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, |
52198226 | 3040 | DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); |
6c223761 | 3041 | if (rc) { |
d91d7820 | 3042 | dev_err(&pci_dev->dev, |
6c223761 | 3043 | "irq %u init failed with error %d\n", |
d91d7820 | 3044 | pci_irq_vector(pci_dev, i), rc); |
6c223761 KB |
3045 | return rc; |
3046 | } | |
3047 | ctrl_info->num_msix_vectors_initialized++; | |
3048 | } | |
3049 | ||
3050 | return 0; | |
3051 | } | |
3052 | ||
98bf061b KB |
3053 | static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) |
3054 | { | |
3055 | int i; | |
3056 | ||
3057 | for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) | |
3058 | free_irq(pci_irq_vector(ctrl_info->pci_dev, i), | |
3059 | &ctrl_info->queue_groups[i]); | |
3060 | ||
3061 | ctrl_info->num_msix_vectors_initialized = 0; | |
3062 | } | |
3063 | ||
6c223761 KB |
3064 | static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) |
3065 | { | |
98bf061b | 3066 | int num_vectors_enabled; |
6c223761 | 3067 | |
98bf061b | 3068 | num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, |
52198226 CH |
3069 | PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, |
3070 | PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); | |
98bf061b | 3071 | if (num_vectors_enabled < 0) { |
6c223761 | 3072 | dev_err(&ctrl_info->pci_dev->dev, |
98bf061b KB |
3073 | "MSI-X init failed with error %d\n", |
3074 | num_vectors_enabled); | |
3075 | return num_vectors_enabled; | |
6c223761 KB |
3076 | } |
3077 | ||
98bf061b | 3078 | ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; |
061ef06a | 3079 | ctrl_info->irq_mode = IRQ_MODE_MSIX; |
6c223761 KB |
3080 | return 0; |
3081 | } | |
3082 | ||
98bf061b KB |
3083 | static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) |
3084 | { | |
3085 | if (ctrl_info->num_msix_vectors_enabled) { | |
3086 | pci_free_irq_vectors(ctrl_info->pci_dev); | |
3087 | ctrl_info->num_msix_vectors_enabled = 0; | |
3088 | } | |
3089 | } | |
3090 | ||
6c223761 KB |
3091 | static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) |
3092 | { | |
3093 | unsigned int i; | |
3094 | size_t alloc_length; | |
3095 | size_t element_array_length_per_iq; | |
3096 | size_t element_array_length_per_oq; | |
3097 | void *element_array; | |
3098 | void *next_queue_index; | |
3099 | void *aligned_pointer; | |
3100 | unsigned int num_inbound_queues; | |
3101 | unsigned int num_outbound_queues; | |
3102 | unsigned int num_queue_indexes; | |
3103 | struct pqi_queue_group *queue_group; | |
3104 | ||
3105 | element_array_length_per_iq = | |
3106 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * | |
3107 | ctrl_info->num_elements_per_iq; | |
3108 | element_array_length_per_oq = | |
3109 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * | |
3110 | ctrl_info->num_elements_per_oq; | |
3111 | num_inbound_queues = ctrl_info->num_queue_groups * 2; | |
3112 | num_outbound_queues = ctrl_info->num_queue_groups; | |
3113 | num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; | |
3114 | ||
3115 | aligned_pointer = NULL; | |
3116 | ||
3117 | for (i = 0; i < num_inbound_queues; i++) { | |
3118 | aligned_pointer = PTR_ALIGN(aligned_pointer, | |
3119 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3120 | aligned_pointer += element_array_length_per_iq; | |
3121 | } | |
3122 | ||
3123 | for (i = 0; i < num_outbound_queues; i++) { | |
3124 | aligned_pointer = PTR_ALIGN(aligned_pointer, | |
3125 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3126 | aligned_pointer += element_array_length_per_oq; | |
3127 | } | |
3128 | ||
3129 | aligned_pointer = PTR_ALIGN(aligned_pointer, | |
3130 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3131 | aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * | |
3132 | PQI_EVENT_OQ_ELEMENT_LENGTH; | |
3133 | ||
3134 | for (i = 0; i < num_queue_indexes; i++) { | |
3135 | aligned_pointer = PTR_ALIGN(aligned_pointer, | |
3136 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | |
3137 | aligned_pointer += sizeof(pqi_index_t); | |
3138 | } | |
3139 | ||
3140 | alloc_length = (size_t)aligned_pointer + | |
3141 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; | |
3142 | ||
e1d213bd KB |
3143 | alloc_length += PQI_EXTRA_SGL_MEMORY; |
3144 | ||
6c223761 KB |
3145 | ctrl_info->queue_memory_base = |
3146 | dma_zalloc_coherent(&ctrl_info->pci_dev->dev, | |
3147 | alloc_length, | |
3148 | &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); | |
3149 | ||
d87d5474 | 3150 | if (!ctrl_info->queue_memory_base) |
6c223761 | 3151 | return -ENOMEM; |
6c223761 KB |
3152 | |
3153 | ctrl_info->queue_memory_length = alloc_length; | |
3154 | ||
3155 | element_array = PTR_ALIGN(ctrl_info->queue_memory_base, | |
3156 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3157 | ||
3158 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3159 | queue_group = &ctrl_info->queue_groups[i]; | |
3160 | queue_group->iq_element_array[RAID_PATH] = element_array; | |
3161 | queue_group->iq_element_array_bus_addr[RAID_PATH] = | |
3162 | ctrl_info->queue_memory_base_dma_handle + | |
3163 | (element_array - ctrl_info->queue_memory_base); | |
3164 | element_array += element_array_length_per_iq; | |
3165 | element_array = PTR_ALIGN(element_array, | |
3166 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3167 | queue_group->iq_element_array[AIO_PATH] = element_array; | |
3168 | queue_group->iq_element_array_bus_addr[AIO_PATH] = | |
3169 | ctrl_info->queue_memory_base_dma_handle + | |
3170 | (element_array - ctrl_info->queue_memory_base); | |
3171 | element_array += element_array_length_per_iq; | |
3172 | element_array = PTR_ALIGN(element_array, | |
3173 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3174 | } | |
3175 | ||
3176 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3177 | queue_group = &ctrl_info->queue_groups[i]; | |
3178 | queue_group->oq_element_array = element_array; | |
3179 | queue_group->oq_element_array_bus_addr = | |
3180 | ctrl_info->queue_memory_base_dma_handle + | |
3181 | (element_array - ctrl_info->queue_memory_base); | |
3182 | element_array += element_array_length_per_oq; | |
3183 | element_array = PTR_ALIGN(element_array, | |
3184 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3185 | } | |
3186 | ||
3187 | ctrl_info->event_queue.oq_element_array = element_array; | |
3188 | ctrl_info->event_queue.oq_element_array_bus_addr = | |
3189 | ctrl_info->queue_memory_base_dma_handle + | |
3190 | (element_array - ctrl_info->queue_memory_base); | |
3191 | element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * | |
3192 | PQI_EVENT_OQ_ELEMENT_LENGTH; | |
3193 | ||
3194 | next_queue_index = PTR_ALIGN(element_array, | |
3195 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | |
3196 | ||
3197 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3198 | queue_group = &ctrl_info->queue_groups[i]; | |
3199 | queue_group->iq_ci[RAID_PATH] = next_queue_index; | |
3200 | queue_group->iq_ci_bus_addr[RAID_PATH] = | |
3201 | ctrl_info->queue_memory_base_dma_handle + | |
3202 | (next_queue_index - ctrl_info->queue_memory_base); | |
3203 | next_queue_index += sizeof(pqi_index_t); | |
3204 | next_queue_index = PTR_ALIGN(next_queue_index, | |
3205 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | |
3206 | queue_group->iq_ci[AIO_PATH] = next_queue_index; | |
3207 | queue_group->iq_ci_bus_addr[AIO_PATH] = | |
3208 | ctrl_info->queue_memory_base_dma_handle + | |
3209 | (next_queue_index - ctrl_info->queue_memory_base); | |
3210 | next_queue_index += sizeof(pqi_index_t); | |
3211 | next_queue_index = PTR_ALIGN(next_queue_index, | |
3212 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | |
3213 | queue_group->oq_pi = next_queue_index; | |
3214 | queue_group->oq_pi_bus_addr = | |
3215 | ctrl_info->queue_memory_base_dma_handle + | |
3216 | (next_queue_index - ctrl_info->queue_memory_base); | |
3217 | next_queue_index += sizeof(pqi_index_t); | |
3218 | next_queue_index = PTR_ALIGN(next_queue_index, | |
3219 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | |
3220 | } | |
3221 | ||
3222 | ctrl_info->event_queue.oq_pi = next_queue_index; | |
3223 | ctrl_info->event_queue.oq_pi_bus_addr = | |
3224 | ctrl_info->queue_memory_base_dma_handle + | |
3225 | (next_queue_index - ctrl_info->queue_memory_base); | |
3226 | ||
3227 | return 0; | |
3228 | } | |
3229 | ||
3230 | static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) | |
3231 | { | |
3232 | unsigned int i; | |
3233 | u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; | |
3234 | u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; | |
3235 | ||
3236 | /* | |
3237 | * Initialize the backpointers to the controller structure in | |
3238 | * each operational queue group structure. | |
3239 | */ | |
3240 | for (i = 0; i < ctrl_info->num_queue_groups; i++) | |
3241 | ctrl_info->queue_groups[i].ctrl_info = ctrl_info; | |
3242 | ||
3243 | /* | |
3244 | * Assign IDs to all operational queues. Note that the IDs | |
3245 | * assigned to operational IQs are independent of the IDs | |
3246 | * assigned to operational OQs. | |
3247 | */ | |
3248 | ctrl_info->event_queue.oq_id = next_oq_id++; | |
3249 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3250 | ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; | |
3251 | ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; | |
3252 | ctrl_info->queue_groups[i].oq_id = next_oq_id++; | |
3253 | } | |
3254 | ||
3255 | /* | |
3256 | * Assign MSI-X table entry indexes to all queues. Note that the | |
3257 | * interrupt for the event queue is shared with the first queue group. | |
3258 | */ | |
3259 | ctrl_info->event_queue.int_msg_num = 0; | |
3260 | for (i = 0; i < ctrl_info->num_queue_groups; i++) | |
3261 | ctrl_info->queue_groups[i].int_msg_num = i; | |
3262 | ||
3263 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3264 | spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); | |
3265 | spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); | |
3266 | INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); | |
3267 | INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); | |
3268 | } | |
3269 | } | |
3270 | ||
3271 | static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) | |
3272 | { | |
3273 | size_t alloc_length; | |
3274 | struct pqi_admin_queues_aligned *admin_queues_aligned; | |
3275 | struct pqi_admin_queues *admin_queues; | |
3276 | ||
3277 | alloc_length = sizeof(struct pqi_admin_queues_aligned) + | |
3278 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; | |
3279 | ||
3280 | ctrl_info->admin_queue_memory_base = | |
3281 | dma_zalloc_coherent(&ctrl_info->pci_dev->dev, | |
3282 | alloc_length, | |
3283 | &ctrl_info->admin_queue_memory_base_dma_handle, | |
3284 | GFP_KERNEL); | |
3285 | ||
3286 | if (!ctrl_info->admin_queue_memory_base) | |
3287 | return -ENOMEM; | |
3288 | ||
3289 | ctrl_info->admin_queue_memory_length = alloc_length; | |
3290 | ||
3291 | admin_queues = &ctrl_info->admin_queues; | |
3292 | admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, | |
3293 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3294 | admin_queues->iq_element_array = | |
3295 | &admin_queues_aligned->iq_element_array; | |
3296 | admin_queues->oq_element_array = | |
3297 | &admin_queues_aligned->oq_element_array; | |
3298 | admin_queues->iq_ci = &admin_queues_aligned->iq_ci; | |
3299 | admin_queues->oq_pi = &admin_queues_aligned->oq_pi; | |
3300 | ||
3301 | admin_queues->iq_element_array_bus_addr = | |
3302 | ctrl_info->admin_queue_memory_base_dma_handle + | |
3303 | (admin_queues->iq_element_array - | |
3304 | ctrl_info->admin_queue_memory_base); | |
3305 | admin_queues->oq_element_array_bus_addr = | |
3306 | ctrl_info->admin_queue_memory_base_dma_handle + | |
3307 | (admin_queues->oq_element_array - | |
3308 | ctrl_info->admin_queue_memory_base); | |
3309 | admin_queues->iq_ci_bus_addr = | |
3310 | ctrl_info->admin_queue_memory_base_dma_handle + | |
3311 | ((void *)admin_queues->iq_ci - | |
3312 | ctrl_info->admin_queue_memory_base); | |
3313 | admin_queues->oq_pi_bus_addr = | |
3314 | ctrl_info->admin_queue_memory_base_dma_handle + | |
3315 | ((void *)admin_queues->oq_pi - | |
3316 | ctrl_info->admin_queue_memory_base); | |
3317 | ||
3318 | return 0; | |
3319 | } | |
3320 | ||
3321 | #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ | |
3322 | #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 | |
3323 | ||
3324 | static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) | |
3325 | { | |
3326 | struct pqi_device_registers __iomem *pqi_registers; | |
3327 | struct pqi_admin_queues *admin_queues; | |
3328 | unsigned long timeout; | |
3329 | u8 status; | |
3330 | u32 reg; | |
3331 | ||
3332 | pqi_registers = ctrl_info->pqi_registers; | |
3333 | admin_queues = &ctrl_info->admin_queues; | |
3334 | ||
3335 | writeq((u64)admin_queues->iq_element_array_bus_addr, | |
3336 | &pqi_registers->admin_iq_element_array_addr); | |
3337 | writeq((u64)admin_queues->oq_element_array_bus_addr, | |
3338 | &pqi_registers->admin_oq_element_array_addr); | |
3339 | writeq((u64)admin_queues->iq_ci_bus_addr, | |
3340 | &pqi_registers->admin_iq_ci_addr); | |
3341 | writeq((u64)admin_queues->oq_pi_bus_addr, | |
3342 | &pqi_registers->admin_oq_pi_addr); | |
3343 | ||
3344 | reg = PQI_ADMIN_IQ_NUM_ELEMENTS | | |
3345 | (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 | | |
3346 | (admin_queues->int_msg_num << 16); | |
3347 | writel(reg, &pqi_registers->admin_iq_num_elements); | |
3348 | writel(PQI_CREATE_ADMIN_QUEUE_PAIR, | |
3349 | &pqi_registers->function_and_status_code); | |
3350 | ||
3351 | timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; | |
3352 | while (1) { | |
3353 | status = readb(&pqi_registers->function_and_status_code); | |
3354 | if (status == PQI_STATUS_IDLE) | |
3355 | break; | |
3356 | if (time_after(jiffies, timeout)) | |
3357 | return -ETIMEDOUT; | |
3358 | msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); | |
3359 | } | |
3360 | ||
3361 | /* | |
3362 | * The offset registers are not initialized to the correct | |
3363 | * offsets until *after* the create admin queue pair command | |
3364 | * completes successfully. | |
3365 | */ | |
3366 | admin_queues->iq_pi = ctrl_info->iomem_base + | |
3367 | PQI_DEVICE_REGISTERS_OFFSET + | |
3368 | readq(&pqi_registers->admin_iq_pi_offset); | |
3369 | admin_queues->oq_ci = ctrl_info->iomem_base + | |
3370 | PQI_DEVICE_REGISTERS_OFFSET + | |
3371 | readq(&pqi_registers->admin_oq_ci_offset); | |
3372 | ||
3373 | return 0; | |
3374 | } | |
3375 | ||
3376 | static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, | |
3377 | struct pqi_general_admin_request *request) | |
3378 | { | |
3379 | struct pqi_admin_queues *admin_queues; | |
3380 | void *next_element; | |
3381 | pqi_index_t iq_pi; | |
3382 | ||
3383 | admin_queues = &ctrl_info->admin_queues; | |
3384 | iq_pi = admin_queues->iq_pi_copy; | |
3385 | ||
3386 | next_element = admin_queues->iq_element_array + | |
3387 | (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); | |
3388 | ||
3389 | memcpy(next_element, request, sizeof(*request)); | |
3390 | ||
3391 | iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; | |
3392 | admin_queues->iq_pi_copy = iq_pi; | |
3393 | ||
3394 | /* | |
3395 | * This write notifies the controller that an IU is available to be | |
3396 | * processed. | |
3397 | */ | |
3398 | writel(iq_pi, admin_queues->iq_pi); | |
3399 | } | |
3400 | ||
3401 | static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, | |
3402 | struct pqi_general_admin_response *response) | |
3403 | { | |
3404 | struct pqi_admin_queues *admin_queues; | |
3405 | pqi_index_t oq_pi; | |
3406 | pqi_index_t oq_ci; | |
3407 | unsigned long timeout; | |
3408 | ||
3409 | admin_queues = &ctrl_info->admin_queues; | |
3410 | oq_ci = admin_queues->oq_ci_copy; | |
3411 | ||
3412 | timeout = (3 * HZ) + jiffies; | |
3413 | ||
3414 | while (1) { | |
3415 | oq_pi = *admin_queues->oq_pi; | |
3416 | if (oq_pi != oq_ci) | |
3417 | break; | |
3418 | if (time_after(jiffies, timeout)) { | |
3419 | dev_err(&ctrl_info->pci_dev->dev, | |
3420 | "timed out waiting for admin response\n"); | |
3421 | return -ETIMEDOUT; | |
3422 | } | |
3423 | usleep_range(1000, 2000); | |
3424 | } | |
3425 | ||
3426 | memcpy(response, admin_queues->oq_element_array + | |
3427 | (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); | |
3428 | ||
3429 | oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; | |
3430 | admin_queues->oq_ci_copy = oq_ci; | |
3431 | writel(oq_ci, admin_queues->oq_ci); | |
3432 | ||
3433 | return 0; | |
3434 | } | |
3435 | ||
3436 | static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, | |
3437 | struct pqi_queue_group *queue_group, enum pqi_io_path path, | |
3438 | struct pqi_io_request *io_request) | |
3439 | { | |
3440 | struct pqi_io_request *next; | |
3441 | void *next_element; | |
3442 | pqi_index_t iq_pi; | |
3443 | pqi_index_t iq_ci; | |
3444 | size_t iu_length; | |
3445 | unsigned long flags; | |
3446 | unsigned int num_elements_needed; | |
3447 | unsigned int num_elements_to_end_of_queue; | |
3448 | size_t copy_count; | |
3449 | struct pqi_iu_header *request; | |
3450 | ||
3451 | spin_lock_irqsave(&queue_group->submit_lock[path], flags); | |
3452 | ||
376fb880 KB |
3453 | if (io_request) { |
3454 | io_request->queue_group = queue_group; | |
6c223761 KB |
3455 | list_add_tail(&io_request->request_list_entry, |
3456 | &queue_group->request_list[path]); | |
376fb880 | 3457 | } |
6c223761 KB |
3458 | |
3459 | iq_pi = queue_group->iq_pi_copy[path]; | |
3460 | ||
3461 | list_for_each_entry_safe(io_request, next, | |
3462 | &queue_group->request_list[path], request_list_entry) { | |
3463 | ||
3464 | request = io_request->iu; | |
3465 | ||
3466 | iu_length = get_unaligned_le16(&request->iu_length) + | |
3467 | PQI_REQUEST_HEADER_LENGTH; | |
3468 | num_elements_needed = | |
3469 | DIV_ROUND_UP(iu_length, | |
3470 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
3471 | ||
3472 | iq_ci = *queue_group->iq_ci[path]; | |
3473 | ||
3474 | if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, | |
3475 | ctrl_info->num_elements_per_iq)) | |
3476 | break; | |
3477 | ||
3478 | put_unaligned_le16(queue_group->oq_id, | |
3479 | &request->response_queue_id); | |
3480 | ||
3481 | next_element = queue_group->iq_element_array[path] + | |
3482 | (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
3483 | ||
3484 | num_elements_to_end_of_queue = | |
3485 | ctrl_info->num_elements_per_iq - iq_pi; | |
3486 | ||
3487 | if (num_elements_needed <= num_elements_to_end_of_queue) { | |
3488 | memcpy(next_element, request, iu_length); | |
3489 | } else { | |
3490 | copy_count = num_elements_to_end_of_queue * | |
3491 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; | |
3492 | memcpy(next_element, request, copy_count); | |
3493 | memcpy(queue_group->iq_element_array[path], | |
3494 | (u8 *)request + copy_count, | |
3495 | iu_length - copy_count); | |
3496 | } | |
3497 | ||
3498 | iq_pi = (iq_pi + num_elements_needed) % | |
3499 | ctrl_info->num_elements_per_iq; | |
3500 | ||
3501 | list_del(&io_request->request_list_entry); | |
3502 | } | |
3503 | ||
3504 | if (iq_pi != queue_group->iq_pi_copy[path]) { | |
3505 | queue_group->iq_pi_copy[path] = iq_pi; | |
3506 | /* | |
3507 | * This write notifies the controller that one or more IUs are | |
3508 | * available to be processed. | |
3509 | */ | |
3510 | writel(iq_pi, queue_group->iq_pi[path]); | |
3511 | } | |
3512 | ||
3513 | spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); | |
3514 | } | |
3515 | ||
1f37e992 KB |
3516 | #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 |
3517 | ||
3518 | static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, | |
3519 | struct completion *wait) | |
3520 | { | |
3521 | int rc; | |
1f37e992 KB |
3522 | |
3523 | while (1) { | |
3524 | if (wait_for_completion_io_timeout(wait, | |
3525 | PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { | |
3526 | rc = 0; | |
3527 | break; | |
3528 | } | |
3529 | ||
3530 | pqi_check_ctrl_health(ctrl_info); | |
3531 | if (pqi_ctrl_offline(ctrl_info)) { | |
3532 | rc = -ENXIO; | |
3533 | break; | |
3534 | } | |
1f37e992 KB |
3535 | } |
3536 | ||
3537 | return rc; | |
3538 | } | |
3539 | ||
6c223761 KB |
3540 | static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, |
3541 | void *context) | |
3542 | { | |
3543 | struct completion *waiting = context; | |
3544 | ||
3545 | complete(waiting); | |
3546 | } | |
3547 | ||
3548 | static int pqi_submit_raid_request_synchronous_with_io_request( | |
3549 | struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, | |
3550 | unsigned long timeout_msecs) | |
3551 | { | |
3552 | int rc = 0; | |
3553 | DECLARE_COMPLETION_ONSTACK(wait); | |
3554 | ||
3555 | io_request->io_complete_callback = pqi_raid_synchronous_complete; | |
3556 | io_request->context = &wait; | |
3557 | ||
3558 | pqi_start_io(ctrl_info, | |
3559 | &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, | |
3560 | io_request); | |
3561 | ||
3562 | if (timeout_msecs == NO_TIMEOUT) { | |
1f37e992 | 3563 | pqi_wait_for_completion_io(ctrl_info, &wait); |
6c223761 KB |
3564 | } else { |
3565 | if (!wait_for_completion_io_timeout(&wait, | |
3566 | msecs_to_jiffies(timeout_msecs))) { | |
3567 | dev_warn(&ctrl_info->pci_dev->dev, | |
3568 | "command timed out\n"); | |
3569 | rc = -ETIMEDOUT; | |
3570 | } | |
3571 | } | |
3572 | ||
3573 | return rc; | |
3574 | } | |
3575 | ||
3576 | static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, | |
3577 | struct pqi_iu_header *request, unsigned int flags, | |
3578 | struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) | |
3579 | { | |
3580 | int rc; | |
3581 | struct pqi_io_request *io_request; | |
3582 | unsigned long start_jiffies; | |
3583 | unsigned long msecs_blocked; | |
3584 | size_t iu_length; | |
3585 | ||
3586 | /* | |
3587 | * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value | |
3588 | * are mutually exclusive. | |
3589 | */ | |
3590 | ||
3591 | if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { | |
3592 | if (down_interruptible(&ctrl_info->sync_request_sem)) | |
3593 | return -ERESTARTSYS; | |
3594 | } else { | |
3595 | if (timeout_msecs == NO_TIMEOUT) { | |
3596 | down(&ctrl_info->sync_request_sem); | |
3597 | } else { | |
3598 | start_jiffies = jiffies; | |
3599 | if (down_timeout(&ctrl_info->sync_request_sem, | |
3600 | msecs_to_jiffies(timeout_msecs))) | |
3601 | return -ETIMEDOUT; | |
3602 | msecs_blocked = | |
3603 | jiffies_to_msecs(jiffies - start_jiffies); | |
3604 | if (msecs_blocked >= timeout_msecs) | |
3605 | return -ETIMEDOUT; | |
3606 | timeout_msecs -= msecs_blocked; | |
3607 | } | |
3608 | } | |
3609 | ||
7561a7e4 KB |
3610 | pqi_ctrl_busy(ctrl_info); |
3611 | timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); | |
3612 | if (timeout_msecs == 0) { | |
3613 | rc = -ETIMEDOUT; | |
3614 | goto out; | |
3615 | } | |
3616 | ||
376fb880 KB |
3617 | if (pqi_ctrl_offline(ctrl_info)) { |
3618 | rc = -ENXIO; | |
3619 | goto out; | |
3620 | } | |
3621 | ||
6c223761 KB |
3622 | io_request = pqi_alloc_io_request(ctrl_info); |
3623 | ||
3624 | put_unaligned_le16(io_request->index, | |
3625 | &(((struct pqi_raid_path_request *)request)->request_id)); | |
3626 | ||
3627 | if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) | |
3628 | ((struct pqi_raid_path_request *)request)->error_index = | |
3629 | ((struct pqi_raid_path_request *)request)->request_id; | |
3630 | ||
3631 | iu_length = get_unaligned_le16(&request->iu_length) + | |
3632 | PQI_REQUEST_HEADER_LENGTH; | |
3633 | memcpy(io_request->iu, request, iu_length); | |
3634 | ||
3635 | rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info, | |
3636 | io_request, timeout_msecs); | |
3637 | ||
3638 | if (error_info) { | |
3639 | if (io_request->error_info) | |
3640 | memcpy(error_info, io_request->error_info, | |
3641 | sizeof(*error_info)); | |
3642 | else | |
3643 | memset(error_info, 0, sizeof(*error_info)); | |
3644 | } else if (rc == 0 && io_request->error_info) { | |
3645 | u8 scsi_status; | |
3646 | struct pqi_raid_error_info *raid_error_info; | |
3647 | ||
3648 | raid_error_info = io_request->error_info; | |
3649 | scsi_status = raid_error_info->status; | |
3650 | ||
3651 | if (scsi_status == SAM_STAT_CHECK_CONDITION && | |
3652 | raid_error_info->data_out_result == | |
3653 | PQI_DATA_IN_OUT_UNDERFLOW) | |
3654 | scsi_status = SAM_STAT_GOOD; | |
3655 | ||
3656 | if (scsi_status != SAM_STAT_GOOD) | |
3657 | rc = -EIO; | |
3658 | } | |
3659 | ||
3660 | pqi_free_io_request(io_request); | |
3661 | ||
7561a7e4 KB |
3662 | out: |
3663 | pqi_ctrl_unbusy(ctrl_info); | |
6c223761 KB |
3664 | up(&ctrl_info->sync_request_sem); |
3665 | ||
3666 | return rc; | |
3667 | } | |
3668 | ||
3669 | static int pqi_validate_admin_response( | |
3670 | struct pqi_general_admin_response *response, u8 expected_function_code) | |
3671 | { | |
3672 | if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) | |
3673 | return -EINVAL; | |
3674 | ||
3675 | if (get_unaligned_le16(&response->header.iu_length) != | |
3676 | PQI_GENERAL_ADMIN_IU_LENGTH) | |
3677 | return -EINVAL; | |
3678 | ||
3679 | if (response->function_code != expected_function_code) | |
3680 | return -EINVAL; | |
3681 | ||
3682 | if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) | |
3683 | return -EINVAL; | |
3684 | ||
3685 | return 0; | |
3686 | } | |
3687 | ||
3688 | static int pqi_submit_admin_request_synchronous( | |
3689 | struct pqi_ctrl_info *ctrl_info, | |
3690 | struct pqi_general_admin_request *request, | |
3691 | struct pqi_general_admin_response *response) | |
3692 | { | |
3693 | int rc; | |
3694 | ||
3695 | pqi_submit_admin_request(ctrl_info, request); | |
3696 | ||
3697 | rc = pqi_poll_for_admin_response(ctrl_info, response); | |
3698 | ||
3699 | if (rc == 0) | |
3700 | rc = pqi_validate_admin_response(response, | |
3701 | request->function_code); | |
3702 | ||
3703 | return rc; | |
3704 | } | |
3705 | ||
3706 | static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) | |
3707 | { | |
3708 | int rc; | |
3709 | struct pqi_general_admin_request request; | |
3710 | struct pqi_general_admin_response response; | |
3711 | struct pqi_device_capability *capability; | |
3712 | struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; | |
3713 | ||
3714 | capability = kmalloc(sizeof(*capability), GFP_KERNEL); | |
3715 | if (!capability) | |
3716 | return -ENOMEM; | |
3717 | ||
3718 | memset(&request, 0, sizeof(request)); | |
3719 | ||
3720 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
3721 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
3722 | &request.header.iu_length); | |
3723 | request.function_code = | |
3724 | PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; | |
3725 | put_unaligned_le32(sizeof(*capability), | |
3726 | &request.data.report_device_capability.buffer_length); | |
3727 | ||
3728 | rc = pqi_map_single(ctrl_info->pci_dev, | |
3729 | &request.data.report_device_capability.sg_descriptor, | |
3730 | capability, sizeof(*capability), | |
3731 | PCI_DMA_FROMDEVICE); | |
3732 | if (rc) | |
3733 | goto out; | |
3734 | ||
3735 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
3736 | &response); | |
3737 | ||
3738 | pqi_pci_unmap(ctrl_info->pci_dev, | |
3739 | &request.data.report_device_capability.sg_descriptor, 1, | |
3740 | PCI_DMA_FROMDEVICE); | |
3741 | ||
3742 | if (rc) | |
3743 | goto out; | |
3744 | ||
3745 | if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { | |
3746 | rc = -EIO; | |
3747 | goto out; | |
3748 | } | |
3749 | ||
3750 | ctrl_info->max_inbound_queues = | |
3751 | get_unaligned_le16(&capability->max_inbound_queues); | |
3752 | ctrl_info->max_elements_per_iq = | |
3753 | get_unaligned_le16(&capability->max_elements_per_iq); | |
3754 | ctrl_info->max_iq_element_length = | |
3755 | get_unaligned_le16(&capability->max_iq_element_length) | |
3756 | * 16; | |
3757 | ctrl_info->max_outbound_queues = | |
3758 | get_unaligned_le16(&capability->max_outbound_queues); | |
3759 | ctrl_info->max_elements_per_oq = | |
3760 | get_unaligned_le16(&capability->max_elements_per_oq); | |
3761 | ctrl_info->max_oq_element_length = | |
3762 | get_unaligned_le16(&capability->max_oq_element_length) | |
3763 | * 16; | |
3764 | ||
3765 | sop_iu_layer_descriptor = | |
3766 | &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; | |
3767 | ||
3768 | ctrl_info->max_inbound_iu_length_per_firmware = | |
3769 | get_unaligned_le16( | |
3770 | &sop_iu_layer_descriptor->max_inbound_iu_length); | |
3771 | ctrl_info->inbound_spanning_supported = | |
3772 | sop_iu_layer_descriptor->inbound_spanning_supported; | |
3773 | ctrl_info->outbound_spanning_supported = | |
3774 | sop_iu_layer_descriptor->outbound_spanning_supported; | |
3775 | ||
3776 | out: | |
3777 | kfree(capability); | |
3778 | ||
3779 | return rc; | |
3780 | } | |
3781 | ||
3782 | static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) | |
3783 | { | |
3784 | if (ctrl_info->max_iq_element_length < | |
3785 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { | |
3786 | dev_err(&ctrl_info->pci_dev->dev, | |
3787 | "max. inbound queue element length of %d is less than the required length of %d\n", | |
3788 | ctrl_info->max_iq_element_length, | |
3789 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
3790 | return -EINVAL; | |
3791 | } | |
3792 | ||
3793 | if (ctrl_info->max_oq_element_length < | |
3794 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { | |
3795 | dev_err(&ctrl_info->pci_dev->dev, | |
3796 | "max. outbound queue element length of %d is less than the required length of %d\n", | |
3797 | ctrl_info->max_oq_element_length, | |
3798 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); | |
3799 | return -EINVAL; | |
3800 | } | |
3801 | ||
3802 | if (ctrl_info->max_inbound_iu_length_per_firmware < | |
3803 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { | |
3804 | dev_err(&ctrl_info->pci_dev->dev, | |
3805 | "max. inbound IU length of %u is less than the min. required length of %d\n", | |
3806 | ctrl_info->max_inbound_iu_length_per_firmware, | |
3807 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
3808 | return -EINVAL; | |
3809 | } | |
3810 | ||
77668f41 KB |
3811 | if (!ctrl_info->inbound_spanning_supported) { |
3812 | dev_err(&ctrl_info->pci_dev->dev, | |
3813 | "the controller does not support inbound spanning\n"); | |
3814 | return -EINVAL; | |
3815 | } | |
3816 | ||
3817 | if (ctrl_info->outbound_spanning_supported) { | |
3818 | dev_err(&ctrl_info->pci_dev->dev, | |
3819 | "the controller supports outbound spanning but this driver does not\n"); | |
3820 | return -EINVAL; | |
3821 | } | |
3822 | ||
6c223761 KB |
3823 | return 0; |
3824 | } | |
3825 | ||
3826 | static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info, | |
3827 | bool inbound_queue, u16 queue_id) | |
3828 | { | |
3829 | struct pqi_general_admin_request request; | |
3830 | struct pqi_general_admin_response response; | |
3831 | ||
3832 | memset(&request, 0, sizeof(request)); | |
3833 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
3834 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
3835 | &request.header.iu_length); | |
3836 | if (inbound_queue) | |
3837 | request.function_code = | |
3838 | PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ; | |
3839 | else | |
3840 | request.function_code = | |
3841 | PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ; | |
3842 | put_unaligned_le16(queue_id, | |
3843 | &request.data.delete_operational_queue.queue_id); | |
3844 | ||
3845 | return pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
3846 | &response); | |
3847 | } | |
3848 | ||
3849 | static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) | |
3850 | { | |
3851 | int rc; | |
3852 | struct pqi_event_queue *event_queue; | |
3853 | struct pqi_general_admin_request request; | |
3854 | struct pqi_general_admin_response response; | |
3855 | ||
3856 | event_queue = &ctrl_info->event_queue; | |
3857 | ||
3858 | /* | |
3859 | * Create OQ (Outbound Queue - device to host queue) to dedicate | |
3860 | * to events. | |
3861 | */ | |
3862 | memset(&request, 0, sizeof(request)); | |
3863 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
3864 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
3865 | &request.header.iu_length); | |
3866 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; | |
3867 | put_unaligned_le16(event_queue->oq_id, | |
3868 | &request.data.create_operational_oq.queue_id); | |
3869 | put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, | |
3870 | &request.data.create_operational_oq.element_array_addr); | |
3871 | put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, | |
3872 | &request.data.create_operational_oq.pi_addr); | |
3873 | put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, | |
3874 | &request.data.create_operational_oq.num_elements); | |
3875 | put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, | |
3876 | &request.data.create_operational_oq.element_length); | |
3877 | request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; | |
3878 | put_unaligned_le16(event_queue->int_msg_num, | |
3879 | &request.data.create_operational_oq.int_msg_num); | |
3880 | ||
3881 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
3882 | &response); | |
3883 | if (rc) | |
3884 | return rc; | |
3885 | ||
3886 | event_queue->oq_ci = ctrl_info->iomem_base + | |
3887 | PQI_DEVICE_REGISTERS_OFFSET + | |
3888 | get_unaligned_le64( | |
3889 | &response.data.create_operational_oq.oq_ci_offset); | |
3890 | ||
3891 | return 0; | |
3892 | } | |
3893 | ||
061ef06a KB |
3894 | static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, |
3895 | unsigned int group_number) | |
6c223761 | 3896 | { |
6c223761 KB |
3897 | int rc; |
3898 | struct pqi_queue_group *queue_group; | |
3899 | struct pqi_general_admin_request request; | |
3900 | struct pqi_general_admin_response response; | |
3901 | ||
061ef06a | 3902 | queue_group = &ctrl_info->queue_groups[group_number]; |
6c223761 KB |
3903 | |
3904 | /* | |
3905 | * Create IQ (Inbound Queue - host to device queue) for | |
3906 | * RAID path. | |
3907 | */ | |
3908 | memset(&request, 0, sizeof(request)); | |
3909 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
3910 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
3911 | &request.header.iu_length); | |
3912 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; | |
3913 | put_unaligned_le16(queue_group->iq_id[RAID_PATH], | |
3914 | &request.data.create_operational_iq.queue_id); | |
3915 | put_unaligned_le64( | |
3916 | (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], | |
3917 | &request.data.create_operational_iq.element_array_addr); | |
3918 | put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], | |
3919 | &request.data.create_operational_iq.ci_addr); | |
3920 | put_unaligned_le16(ctrl_info->num_elements_per_iq, | |
3921 | &request.data.create_operational_iq.num_elements); | |
3922 | put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, | |
3923 | &request.data.create_operational_iq.element_length); | |
3924 | request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; | |
3925 | ||
3926 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
3927 | &response); | |
3928 | if (rc) { | |
3929 | dev_err(&ctrl_info->pci_dev->dev, | |
3930 | "error creating inbound RAID queue\n"); | |
3931 | return rc; | |
3932 | } | |
3933 | ||
3934 | queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + | |
3935 | PQI_DEVICE_REGISTERS_OFFSET + | |
3936 | get_unaligned_le64( | |
3937 | &response.data.create_operational_iq.iq_pi_offset); | |
3938 | ||
3939 | /* | |
3940 | * Create IQ (Inbound Queue - host to device queue) for | |
3941 | * Advanced I/O (AIO) path. | |
3942 | */ | |
3943 | memset(&request, 0, sizeof(request)); | |
3944 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
3945 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
3946 | &request.header.iu_length); | |
3947 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; | |
3948 | put_unaligned_le16(queue_group->iq_id[AIO_PATH], | |
3949 | &request.data.create_operational_iq.queue_id); | |
3950 | put_unaligned_le64((u64)queue_group-> | |
3951 | iq_element_array_bus_addr[AIO_PATH], | |
3952 | &request.data.create_operational_iq.element_array_addr); | |
3953 | put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], | |
3954 | &request.data.create_operational_iq.ci_addr); | |
3955 | put_unaligned_le16(ctrl_info->num_elements_per_iq, | |
3956 | &request.data.create_operational_iq.num_elements); | |
3957 | put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, | |
3958 | &request.data.create_operational_iq.element_length); | |
3959 | request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; | |
3960 | ||
3961 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
3962 | &response); | |
3963 | if (rc) { | |
3964 | dev_err(&ctrl_info->pci_dev->dev, | |
3965 | "error creating inbound AIO queue\n"); | |
3966 | goto delete_inbound_queue_raid; | |
3967 | } | |
3968 | ||
3969 | queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + | |
3970 | PQI_DEVICE_REGISTERS_OFFSET + | |
3971 | get_unaligned_le64( | |
3972 | &response.data.create_operational_iq.iq_pi_offset); | |
3973 | ||
3974 | /* | |
3975 | * Designate the 2nd IQ as the AIO path. By default, all IQs are | |
3976 | * assumed to be for RAID path I/O unless we change the queue's | |
3977 | * property. | |
3978 | */ | |
3979 | memset(&request, 0, sizeof(request)); | |
3980 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
3981 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
3982 | &request.header.iu_length); | |
3983 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; | |
3984 | put_unaligned_le16(queue_group->iq_id[AIO_PATH], | |
3985 | &request.data.change_operational_iq_properties.queue_id); | |
3986 | put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, | |
3987 | &request.data.change_operational_iq_properties.vendor_specific); | |
3988 | ||
3989 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
3990 | &response); | |
3991 | if (rc) { | |
3992 | dev_err(&ctrl_info->pci_dev->dev, | |
3993 | "error changing queue property\n"); | |
3994 | goto delete_inbound_queue_aio; | |
3995 | } | |
3996 | ||
3997 | /* | |
3998 | * Create OQ (Outbound Queue - device to host queue). | |
3999 | */ | |
4000 | memset(&request, 0, sizeof(request)); | |
4001 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
4002 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
4003 | &request.header.iu_length); | |
4004 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; | |
4005 | put_unaligned_le16(queue_group->oq_id, | |
4006 | &request.data.create_operational_oq.queue_id); | |
4007 | put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, | |
4008 | &request.data.create_operational_oq.element_array_addr); | |
4009 | put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, | |
4010 | &request.data.create_operational_oq.pi_addr); | |
4011 | put_unaligned_le16(ctrl_info->num_elements_per_oq, | |
4012 | &request.data.create_operational_oq.num_elements); | |
4013 | put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, | |
4014 | &request.data.create_operational_oq.element_length); | |
4015 | request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; | |
4016 | put_unaligned_le16(queue_group->int_msg_num, | |
4017 | &request.data.create_operational_oq.int_msg_num); | |
4018 | ||
4019 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
4020 | &response); | |
4021 | if (rc) { | |
4022 | dev_err(&ctrl_info->pci_dev->dev, | |
4023 | "error creating outbound queue\n"); | |
4024 | goto delete_inbound_queue_aio; | |
4025 | } | |
4026 | ||
4027 | queue_group->oq_ci = ctrl_info->iomem_base + | |
4028 | PQI_DEVICE_REGISTERS_OFFSET + | |
4029 | get_unaligned_le64( | |
4030 | &response.data.create_operational_oq.oq_ci_offset); | |
4031 | ||
6c223761 KB |
4032 | return 0; |
4033 | ||
4034 | delete_inbound_queue_aio: | |
4035 | pqi_delete_operational_queue(ctrl_info, true, | |
4036 | queue_group->iq_id[AIO_PATH]); | |
4037 | ||
4038 | delete_inbound_queue_raid: | |
4039 | pqi_delete_operational_queue(ctrl_info, true, | |
4040 | queue_group->iq_id[RAID_PATH]); | |
4041 | ||
4042 | return rc; | |
4043 | } | |
4044 | ||
4045 | static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) | |
4046 | { | |
4047 | int rc; | |
4048 | unsigned int i; | |
4049 | ||
4050 | rc = pqi_create_event_queue(ctrl_info); | |
4051 | if (rc) { | |
4052 | dev_err(&ctrl_info->pci_dev->dev, | |
4053 | "error creating event queue\n"); | |
4054 | return rc; | |
4055 | } | |
4056 | ||
4057 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
061ef06a | 4058 | rc = pqi_create_queue_group(ctrl_info, i); |
6c223761 KB |
4059 | if (rc) { |
4060 | dev_err(&ctrl_info->pci_dev->dev, | |
4061 | "error creating queue group number %u/%u\n", | |
4062 | i, ctrl_info->num_queue_groups); | |
4063 | return rc; | |
4064 | } | |
4065 | } | |
4066 | ||
4067 | return 0; | |
4068 | } | |
4069 | ||
4070 | #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ | |
4071 | (offsetof(struct pqi_event_config, descriptors) + \ | |
4072 | (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) | |
4073 | ||
6a50d6ad KB |
4074 | static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, |
4075 | bool enable_events) | |
6c223761 KB |
4076 | { |
4077 | int rc; | |
4078 | unsigned int i; | |
4079 | struct pqi_event_config *event_config; | |
6a50d6ad | 4080 | struct pqi_event_descriptor *event_descriptor; |
6c223761 KB |
4081 | struct pqi_general_management_request request; |
4082 | ||
4083 | event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
4084 | GFP_KERNEL); | |
4085 | if (!event_config) | |
4086 | return -ENOMEM; | |
4087 | ||
4088 | memset(&request, 0, sizeof(request)); | |
4089 | ||
4090 | request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; | |
4091 | put_unaligned_le16(offsetof(struct pqi_general_management_request, | |
4092 | data.report_event_configuration.sg_descriptors[1]) - | |
4093 | PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); | |
4094 | put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
4095 | &request.data.report_event_configuration.buffer_length); | |
4096 | ||
4097 | rc = pqi_map_single(ctrl_info->pci_dev, | |
4098 | request.data.report_event_configuration.sg_descriptors, | |
4099 | event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
4100 | PCI_DMA_FROMDEVICE); | |
4101 | if (rc) | |
4102 | goto out; | |
4103 | ||
4104 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, | |
4105 | 0, NULL, NO_TIMEOUT); | |
4106 | ||
4107 | pqi_pci_unmap(ctrl_info->pci_dev, | |
4108 | request.data.report_event_configuration.sg_descriptors, 1, | |
4109 | PCI_DMA_FROMDEVICE); | |
4110 | ||
4111 | if (rc) | |
4112 | goto out; | |
4113 | ||
6a50d6ad KB |
4114 | for (i = 0; i < event_config->num_event_descriptors; i++) { |
4115 | event_descriptor = &event_config->descriptors[i]; | |
4116 | if (enable_events && | |
4117 | pqi_is_supported_event(event_descriptor->event_type)) | |
4118 | put_unaligned_le16(ctrl_info->event_queue.oq_id, | |
4119 | &event_descriptor->oq_id); | |
4120 | else | |
4121 | put_unaligned_le16(0, &event_descriptor->oq_id); | |
4122 | } | |
6c223761 KB |
4123 | |
4124 | memset(&request, 0, sizeof(request)); | |
4125 | ||
4126 | request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; | |
4127 | put_unaligned_le16(offsetof(struct pqi_general_management_request, | |
4128 | data.report_event_configuration.sg_descriptors[1]) - | |
4129 | PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); | |
4130 | put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
4131 | &request.data.report_event_configuration.buffer_length); | |
4132 | ||
4133 | rc = pqi_map_single(ctrl_info->pci_dev, | |
4134 | request.data.report_event_configuration.sg_descriptors, | |
4135 | event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
4136 | PCI_DMA_TODEVICE); | |
4137 | if (rc) | |
4138 | goto out; | |
4139 | ||
4140 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, | |
4141 | NULL, NO_TIMEOUT); | |
4142 | ||
4143 | pqi_pci_unmap(ctrl_info->pci_dev, | |
4144 | request.data.report_event_configuration.sg_descriptors, 1, | |
4145 | PCI_DMA_TODEVICE); | |
4146 | ||
4147 | out: | |
4148 | kfree(event_config); | |
4149 | ||
4150 | return rc; | |
4151 | } | |
4152 | ||
6a50d6ad KB |
4153 | static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) |
4154 | { | |
4155 | return pqi_configure_events(ctrl_info, true); | |
4156 | } | |
4157 | ||
4158 | static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) | |
4159 | { | |
4160 | return pqi_configure_events(ctrl_info, false); | |
4161 | } | |
4162 | ||
6c223761 KB |
4163 | static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) |
4164 | { | |
4165 | unsigned int i; | |
4166 | struct device *dev; | |
4167 | size_t sg_chain_buffer_length; | |
4168 | struct pqi_io_request *io_request; | |
4169 | ||
4170 | if (!ctrl_info->io_request_pool) | |
4171 | return; | |
4172 | ||
4173 | dev = &ctrl_info->pci_dev->dev; | |
4174 | sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; | |
4175 | io_request = ctrl_info->io_request_pool; | |
4176 | ||
4177 | for (i = 0; i < ctrl_info->max_io_slots; i++) { | |
4178 | kfree(io_request->iu); | |
4179 | if (!io_request->sg_chain_buffer) | |
4180 | break; | |
4181 | dma_free_coherent(dev, sg_chain_buffer_length, | |
4182 | io_request->sg_chain_buffer, | |
4183 | io_request->sg_chain_buffer_dma_handle); | |
4184 | io_request++; | |
4185 | } | |
4186 | ||
4187 | kfree(ctrl_info->io_request_pool); | |
4188 | ctrl_info->io_request_pool = NULL; | |
4189 | } | |
4190 | ||
4191 | static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) | |
4192 | { | |
4193 | ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, | |
4194 | ctrl_info->error_buffer_length, | |
4195 | &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); | |
4196 | ||
4197 | if (!ctrl_info->error_buffer) | |
4198 | return -ENOMEM; | |
4199 | ||
4200 | return 0; | |
4201 | } | |
4202 | ||
4203 | static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) | |
4204 | { | |
4205 | unsigned int i; | |
4206 | void *sg_chain_buffer; | |
4207 | size_t sg_chain_buffer_length; | |
4208 | dma_addr_t sg_chain_buffer_dma_handle; | |
4209 | struct device *dev; | |
4210 | struct pqi_io_request *io_request; | |
4211 | ||
4212 | ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots * | |
4213 | sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); | |
4214 | ||
4215 | if (!ctrl_info->io_request_pool) { | |
4216 | dev_err(&ctrl_info->pci_dev->dev, | |
4217 | "failed to allocate I/O request pool\n"); | |
4218 | goto error; | |
4219 | } | |
4220 | ||
4221 | dev = &ctrl_info->pci_dev->dev; | |
4222 | sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; | |
4223 | io_request = ctrl_info->io_request_pool; | |
4224 | ||
4225 | for (i = 0; i < ctrl_info->max_io_slots; i++) { | |
4226 | io_request->iu = | |
4227 | kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); | |
4228 | ||
4229 | if (!io_request->iu) { | |
4230 | dev_err(&ctrl_info->pci_dev->dev, | |
4231 | "failed to allocate IU buffers\n"); | |
4232 | goto error; | |
4233 | } | |
4234 | ||
4235 | sg_chain_buffer = dma_alloc_coherent(dev, | |
4236 | sg_chain_buffer_length, &sg_chain_buffer_dma_handle, | |
4237 | GFP_KERNEL); | |
4238 | ||
4239 | if (!sg_chain_buffer) { | |
4240 | dev_err(&ctrl_info->pci_dev->dev, | |
4241 | "failed to allocate PQI scatter-gather chain buffers\n"); | |
4242 | goto error; | |
4243 | } | |
4244 | ||
4245 | io_request->index = i; | |
4246 | io_request->sg_chain_buffer = sg_chain_buffer; | |
4247 | io_request->sg_chain_buffer_dma_handle = | |
4248 | sg_chain_buffer_dma_handle; | |
4249 | io_request++; | |
4250 | } | |
4251 | ||
4252 | return 0; | |
4253 | ||
4254 | error: | |
4255 | pqi_free_all_io_requests(ctrl_info); | |
4256 | ||
4257 | return -ENOMEM; | |
4258 | } | |
4259 | ||
4260 | /* | |
4261 | * Calculate required resources that are sized based on max. outstanding | |
4262 | * requests and max. transfer size. | |
4263 | */ | |
4264 | ||
4265 | static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) | |
4266 | { | |
4267 | u32 max_transfer_size; | |
4268 | u32 max_sg_entries; | |
4269 | ||
4270 | ctrl_info->scsi_ml_can_queue = | |
4271 | ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; | |
4272 | ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; | |
4273 | ||
4274 | ctrl_info->error_buffer_length = | |
4275 | ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; | |
4276 | ||
d727a776 KB |
4277 | if (reset_devices) |
4278 | max_transfer_size = min(ctrl_info->max_transfer_size, | |
4279 | PQI_MAX_TRANSFER_SIZE_KDUMP); | |
4280 | else | |
4281 | max_transfer_size = min(ctrl_info->max_transfer_size, | |
4282 | PQI_MAX_TRANSFER_SIZE); | |
6c223761 KB |
4283 | |
4284 | max_sg_entries = max_transfer_size / PAGE_SIZE; | |
4285 | ||
4286 | /* +1 to cover when the buffer is not page-aligned. */ | |
4287 | max_sg_entries++; | |
4288 | ||
4289 | max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); | |
4290 | ||
4291 | max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; | |
4292 | ||
4293 | ctrl_info->sg_chain_buffer_length = | |
e1d213bd KB |
4294 | (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + |
4295 | PQI_EXTRA_SGL_MEMORY; | |
6c223761 KB |
4296 | ctrl_info->sg_tablesize = max_sg_entries; |
4297 | ctrl_info->max_sectors = max_transfer_size / 512; | |
4298 | } | |
4299 | ||
4300 | static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) | |
4301 | { | |
6c223761 KB |
4302 | int num_queue_groups; |
4303 | u16 num_elements_per_iq; | |
4304 | u16 num_elements_per_oq; | |
4305 | ||
d727a776 KB |
4306 | if (reset_devices) { |
4307 | num_queue_groups = 1; | |
4308 | } else { | |
4309 | int num_cpus; | |
4310 | int max_queue_groups; | |
4311 | ||
4312 | max_queue_groups = min(ctrl_info->max_inbound_queues / 2, | |
4313 | ctrl_info->max_outbound_queues - 1); | |
4314 | max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); | |
6c223761 | 4315 | |
d727a776 KB |
4316 | num_cpus = num_online_cpus(); |
4317 | num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); | |
4318 | num_queue_groups = min(num_queue_groups, max_queue_groups); | |
4319 | } | |
6c223761 KB |
4320 | |
4321 | ctrl_info->num_queue_groups = num_queue_groups; | |
061ef06a | 4322 | ctrl_info->max_hw_queue_index = num_queue_groups - 1; |
6c223761 | 4323 | |
77668f41 KB |
4324 | /* |
4325 | * Make sure that the max. inbound IU length is an even multiple | |
4326 | * of our inbound element length. | |
4327 | */ | |
4328 | ctrl_info->max_inbound_iu_length = | |
4329 | (ctrl_info->max_inbound_iu_length_per_firmware / | |
4330 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * | |
4331 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; | |
6c223761 KB |
4332 | |
4333 | num_elements_per_iq = | |
4334 | (ctrl_info->max_inbound_iu_length / | |
4335 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
4336 | ||
4337 | /* Add one because one element in each queue is unusable. */ | |
4338 | num_elements_per_iq++; | |
4339 | ||
4340 | num_elements_per_iq = min(num_elements_per_iq, | |
4341 | ctrl_info->max_elements_per_iq); | |
4342 | ||
4343 | num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; | |
4344 | num_elements_per_oq = min(num_elements_per_oq, | |
4345 | ctrl_info->max_elements_per_oq); | |
4346 | ||
4347 | ctrl_info->num_elements_per_iq = num_elements_per_iq; | |
4348 | ctrl_info->num_elements_per_oq = num_elements_per_oq; | |
4349 | ||
4350 | ctrl_info->max_sg_per_iu = | |
4351 | ((ctrl_info->max_inbound_iu_length - | |
4352 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / | |
4353 | sizeof(struct pqi_sg_descriptor)) + | |
4354 | PQI_MAX_EMBEDDED_SG_DESCRIPTORS; | |
4355 | } | |
4356 | ||
4357 | static inline void pqi_set_sg_descriptor( | |
4358 | struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) | |
4359 | { | |
4360 | u64 address = (u64)sg_dma_address(sg); | |
4361 | unsigned int length = sg_dma_len(sg); | |
4362 | ||
4363 | put_unaligned_le64(address, &sg_descriptor->address); | |
4364 | put_unaligned_le32(length, &sg_descriptor->length); | |
4365 | put_unaligned_le32(0, &sg_descriptor->flags); | |
4366 | } | |
4367 | ||
4368 | static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, | |
4369 | struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, | |
4370 | struct pqi_io_request *io_request) | |
4371 | { | |
4372 | int i; | |
4373 | u16 iu_length; | |
4374 | int sg_count; | |
4375 | bool chained; | |
4376 | unsigned int num_sg_in_iu; | |
4377 | unsigned int max_sg_per_iu; | |
4378 | struct scatterlist *sg; | |
4379 | struct pqi_sg_descriptor *sg_descriptor; | |
4380 | ||
4381 | sg_count = scsi_dma_map(scmd); | |
4382 | if (sg_count < 0) | |
4383 | return sg_count; | |
4384 | ||
4385 | iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - | |
4386 | PQI_REQUEST_HEADER_LENGTH; | |
4387 | ||
4388 | if (sg_count == 0) | |
4389 | goto out; | |
4390 | ||
4391 | sg = scsi_sglist(scmd); | |
4392 | sg_descriptor = request->sg_descriptors; | |
4393 | max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; | |
4394 | chained = false; | |
4395 | num_sg_in_iu = 0; | |
4396 | i = 0; | |
4397 | ||
4398 | while (1) { | |
4399 | pqi_set_sg_descriptor(sg_descriptor, sg); | |
4400 | if (!chained) | |
4401 | num_sg_in_iu++; | |
4402 | i++; | |
4403 | if (i == sg_count) | |
4404 | break; | |
4405 | sg_descriptor++; | |
4406 | if (i == max_sg_per_iu) { | |
4407 | put_unaligned_le64( | |
4408 | (u64)io_request->sg_chain_buffer_dma_handle, | |
4409 | &sg_descriptor->address); | |
4410 | put_unaligned_le32((sg_count - num_sg_in_iu) | |
4411 | * sizeof(*sg_descriptor), | |
4412 | &sg_descriptor->length); | |
4413 | put_unaligned_le32(CISS_SG_CHAIN, | |
4414 | &sg_descriptor->flags); | |
4415 | chained = true; | |
4416 | num_sg_in_iu++; | |
4417 | sg_descriptor = io_request->sg_chain_buffer; | |
4418 | } | |
4419 | sg = sg_next(sg); | |
4420 | } | |
4421 | ||
4422 | put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); | |
4423 | request->partial = chained; | |
4424 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); | |
4425 | ||
4426 | out: | |
4427 | put_unaligned_le16(iu_length, &request->header.iu_length); | |
4428 | ||
4429 | return 0; | |
4430 | } | |
4431 | ||
4432 | static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, | |
4433 | struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, | |
4434 | struct pqi_io_request *io_request) | |
4435 | { | |
4436 | int i; | |
4437 | u16 iu_length; | |
4438 | int sg_count; | |
a60eec02 KB |
4439 | bool chained; |
4440 | unsigned int num_sg_in_iu; | |
4441 | unsigned int max_sg_per_iu; | |
6c223761 KB |
4442 | struct scatterlist *sg; |
4443 | struct pqi_sg_descriptor *sg_descriptor; | |
4444 | ||
4445 | sg_count = scsi_dma_map(scmd); | |
4446 | if (sg_count < 0) | |
4447 | return sg_count; | |
a60eec02 KB |
4448 | |
4449 | iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - | |
4450 | PQI_REQUEST_HEADER_LENGTH; | |
4451 | num_sg_in_iu = 0; | |
4452 | ||
6c223761 KB |
4453 | if (sg_count == 0) |
4454 | goto out; | |
4455 | ||
a60eec02 KB |
4456 | sg = scsi_sglist(scmd); |
4457 | sg_descriptor = request->sg_descriptors; | |
4458 | max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; | |
4459 | chained = false; | |
4460 | i = 0; | |
4461 | ||
4462 | while (1) { | |
4463 | pqi_set_sg_descriptor(sg_descriptor, sg); | |
4464 | if (!chained) | |
4465 | num_sg_in_iu++; | |
4466 | i++; | |
4467 | if (i == sg_count) | |
4468 | break; | |
4469 | sg_descriptor++; | |
4470 | if (i == max_sg_per_iu) { | |
4471 | put_unaligned_le64( | |
4472 | (u64)io_request->sg_chain_buffer_dma_handle, | |
4473 | &sg_descriptor->address); | |
4474 | put_unaligned_le32((sg_count - num_sg_in_iu) | |
4475 | * sizeof(*sg_descriptor), | |
4476 | &sg_descriptor->length); | |
4477 | put_unaligned_le32(CISS_SG_CHAIN, | |
4478 | &sg_descriptor->flags); | |
4479 | chained = true; | |
4480 | num_sg_in_iu++; | |
4481 | sg_descriptor = io_request->sg_chain_buffer; | |
6c223761 | 4482 | } |
a60eec02 | 4483 | sg = sg_next(sg); |
6c223761 KB |
4484 | } |
4485 | ||
a60eec02 KB |
4486 | put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); |
4487 | request->partial = chained; | |
6c223761 | 4488 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
a60eec02 KB |
4489 | |
4490 | out: | |
6c223761 KB |
4491 | put_unaligned_le16(iu_length, &request->header.iu_length); |
4492 | request->num_sg_descriptors = num_sg_in_iu; | |
4493 | ||
4494 | return 0; | |
4495 | } | |
4496 | ||
4497 | static void pqi_raid_io_complete(struct pqi_io_request *io_request, | |
4498 | void *context) | |
4499 | { | |
4500 | struct scsi_cmnd *scmd; | |
4501 | ||
4502 | scmd = io_request->scmd; | |
4503 | pqi_free_io_request(io_request); | |
4504 | scsi_dma_unmap(scmd); | |
4505 | pqi_scsi_done(scmd); | |
4506 | } | |
4507 | ||
376fb880 KB |
4508 | static int pqi_raid_submit_scsi_cmd_with_io_request( |
4509 | struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, | |
6c223761 KB |
4510 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
4511 | struct pqi_queue_group *queue_group) | |
4512 | { | |
4513 | int rc; | |
4514 | size_t cdb_length; | |
6c223761 KB |
4515 | struct pqi_raid_path_request *request; |
4516 | ||
6c223761 KB |
4517 | io_request->io_complete_callback = pqi_raid_io_complete; |
4518 | io_request->scmd = scmd; | |
4519 | ||
6c223761 KB |
4520 | request = io_request->iu; |
4521 | memset(request, 0, | |
4522 | offsetof(struct pqi_raid_path_request, sg_descriptors)); | |
4523 | ||
4524 | request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; | |
4525 | put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); | |
4526 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
4527 | put_unaligned_le16(io_request->index, &request->request_id); | |
4528 | request->error_index = request->request_id; | |
4529 | memcpy(request->lun_number, device->scsi3addr, | |
4530 | sizeof(request->lun_number)); | |
4531 | ||
4532 | cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); | |
4533 | memcpy(request->cdb, scmd->cmnd, cdb_length); | |
4534 | ||
4535 | switch (cdb_length) { | |
4536 | case 6: | |
4537 | case 10: | |
4538 | case 12: | |
4539 | case 16: | |
4540 | /* No bytes in the Additional CDB bytes field */ | |
4541 | request->additional_cdb_bytes_usage = | |
4542 | SOP_ADDITIONAL_CDB_BYTES_0; | |
4543 | break; | |
4544 | case 20: | |
4545 | /* 4 bytes in the Additional cdb field */ | |
4546 | request->additional_cdb_bytes_usage = | |
4547 | SOP_ADDITIONAL_CDB_BYTES_4; | |
4548 | break; | |
4549 | case 24: | |
4550 | /* 8 bytes in the Additional cdb field */ | |
4551 | request->additional_cdb_bytes_usage = | |
4552 | SOP_ADDITIONAL_CDB_BYTES_8; | |
4553 | break; | |
4554 | case 28: | |
4555 | /* 12 bytes in the Additional cdb field */ | |
4556 | request->additional_cdb_bytes_usage = | |
4557 | SOP_ADDITIONAL_CDB_BYTES_12; | |
4558 | break; | |
4559 | case 32: | |
4560 | default: | |
4561 | /* 16 bytes in the Additional cdb field */ | |
4562 | request->additional_cdb_bytes_usage = | |
4563 | SOP_ADDITIONAL_CDB_BYTES_16; | |
4564 | break; | |
4565 | } | |
4566 | ||
4567 | switch (scmd->sc_data_direction) { | |
4568 | case DMA_TO_DEVICE: | |
4569 | request->data_direction = SOP_READ_FLAG; | |
4570 | break; | |
4571 | case DMA_FROM_DEVICE: | |
4572 | request->data_direction = SOP_WRITE_FLAG; | |
4573 | break; | |
4574 | case DMA_NONE: | |
4575 | request->data_direction = SOP_NO_DIRECTION_FLAG; | |
4576 | break; | |
4577 | case DMA_BIDIRECTIONAL: | |
4578 | request->data_direction = SOP_BIDIRECTIONAL; | |
4579 | break; | |
4580 | default: | |
4581 | dev_err(&ctrl_info->pci_dev->dev, | |
4582 | "unknown data direction: %d\n", | |
4583 | scmd->sc_data_direction); | |
6c223761 KB |
4584 | break; |
4585 | } | |
4586 | ||
4587 | rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); | |
4588 | if (rc) { | |
4589 | pqi_free_io_request(io_request); | |
4590 | return SCSI_MLQUEUE_HOST_BUSY; | |
4591 | } | |
4592 | ||
4593 | pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); | |
4594 | ||
4595 | return 0; | |
4596 | } | |
4597 | ||
376fb880 KB |
4598 | static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, |
4599 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, | |
4600 | struct pqi_queue_group *queue_group) | |
4601 | { | |
4602 | struct pqi_io_request *io_request; | |
4603 | ||
4604 | io_request = pqi_alloc_io_request(ctrl_info); | |
4605 | ||
4606 | return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, | |
4607 | device, scmd, queue_group); | |
4608 | } | |
4609 | ||
4610 | static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) | |
4611 | { | |
4612 | if (!pqi_ctrl_blocked(ctrl_info)) | |
4613 | schedule_work(&ctrl_info->raid_bypass_retry_work); | |
4614 | } | |
4615 | ||
4616 | static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) | |
4617 | { | |
4618 | struct scsi_cmnd *scmd; | |
03b288cf | 4619 | struct pqi_scsi_dev *device; |
376fb880 KB |
4620 | struct pqi_ctrl_info *ctrl_info; |
4621 | ||
4622 | if (!io_request->raid_bypass) | |
4623 | return false; | |
4624 | ||
4625 | scmd = io_request->scmd; | |
4626 | if ((scmd->result & 0xff) == SAM_STAT_GOOD) | |
4627 | return false; | |
4628 | if (host_byte(scmd->result) == DID_NO_CONNECT) | |
4629 | return false; | |
4630 | ||
03b288cf KB |
4631 | device = scmd->device->hostdata; |
4632 | if (pqi_device_offline(device)) | |
4633 | return false; | |
4634 | ||
376fb880 KB |
4635 | ctrl_info = shost_to_hba(scmd->device->host); |
4636 | if (pqi_ctrl_offline(ctrl_info)) | |
4637 | return false; | |
4638 | ||
4639 | return true; | |
4640 | } | |
4641 | ||
4642 | static inline void pqi_add_to_raid_bypass_retry_list( | |
4643 | struct pqi_ctrl_info *ctrl_info, | |
4644 | struct pqi_io_request *io_request, bool at_head) | |
4645 | { | |
4646 | unsigned long flags; | |
4647 | ||
4648 | spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
4649 | if (at_head) | |
4650 | list_add(&io_request->request_list_entry, | |
4651 | &ctrl_info->raid_bypass_retry_list); | |
4652 | else | |
4653 | list_add_tail(&io_request->request_list_entry, | |
4654 | &ctrl_info->raid_bypass_retry_list); | |
4655 | spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
4656 | } | |
4657 | ||
4658 | static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, | |
4659 | void *context) | |
4660 | { | |
4661 | struct scsi_cmnd *scmd; | |
4662 | ||
4663 | scmd = io_request->scmd; | |
4664 | pqi_free_io_request(io_request); | |
4665 | pqi_scsi_done(scmd); | |
4666 | } | |
4667 | ||
4668 | static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) | |
4669 | { | |
4670 | struct scsi_cmnd *scmd; | |
4671 | struct pqi_ctrl_info *ctrl_info; | |
4672 | ||
4673 | io_request->io_complete_callback = pqi_queued_raid_bypass_complete; | |
4674 | scmd = io_request->scmd; | |
4675 | scmd->result = 0; | |
4676 | ctrl_info = shost_to_hba(scmd->device->host); | |
4677 | ||
4678 | pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); | |
4679 | pqi_schedule_bypass_retry(ctrl_info); | |
4680 | } | |
4681 | ||
4682 | static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) | |
4683 | { | |
4684 | struct scsi_cmnd *scmd; | |
4685 | struct pqi_scsi_dev *device; | |
4686 | struct pqi_ctrl_info *ctrl_info; | |
4687 | struct pqi_queue_group *queue_group; | |
4688 | ||
4689 | scmd = io_request->scmd; | |
4690 | device = scmd->device->hostdata; | |
4691 | if (pqi_device_in_reset(device)) { | |
4692 | pqi_free_io_request(io_request); | |
4693 | set_host_byte(scmd, DID_RESET); | |
4694 | pqi_scsi_done(scmd); | |
4695 | return 0; | |
4696 | } | |
4697 | ||
4698 | ctrl_info = shost_to_hba(scmd->device->host); | |
4699 | queue_group = io_request->queue_group; | |
4700 | ||
4701 | pqi_reinit_io_request(io_request); | |
4702 | ||
4703 | return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, | |
4704 | device, scmd, queue_group); | |
4705 | } | |
4706 | ||
4707 | static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( | |
4708 | struct pqi_ctrl_info *ctrl_info) | |
4709 | { | |
4710 | unsigned long flags; | |
4711 | struct pqi_io_request *io_request; | |
4712 | ||
4713 | spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
4714 | io_request = list_first_entry_or_null( | |
4715 | &ctrl_info->raid_bypass_retry_list, | |
4716 | struct pqi_io_request, request_list_entry); | |
4717 | if (io_request) | |
4718 | list_del(&io_request->request_list_entry); | |
4719 | spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
4720 | ||
4721 | return io_request; | |
4722 | } | |
4723 | ||
4724 | static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) | |
4725 | { | |
4726 | int rc; | |
4727 | struct pqi_io_request *io_request; | |
4728 | ||
4729 | pqi_ctrl_busy(ctrl_info); | |
4730 | ||
4731 | while (1) { | |
4732 | if (pqi_ctrl_blocked(ctrl_info)) | |
4733 | break; | |
4734 | io_request = pqi_next_queued_raid_bypass_request(ctrl_info); | |
4735 | if (!io_request) | |
4736 | break; | |
4737 | rc = pqi_retry_raid_bypass(io_request); | |
4738 | if (rc) { | |
4739 | pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, | |
4740 | true); | |
4741 | pqi_schedule_bypass_retry(ctrl_info); | |
4742 | break; | |
4743 | } | |
4744 | } | |
4745 | ||
4746 | pqi_ctrl_unbusy(ctrl_info); | |
4747 | } | |
4748 | ||
4749 | static void pqi_raid_bypass_retry_worker(struct work_struct *work) | |
4750 | { | |
4751 | struct pqi_ctrl_info *ctrl_info; | |
4752 | ||
4753 | ctrl_info = container_of(work, struct pqi_ctrl_info, | |
4754 | raid_bypass_retry_work); | |
4755 | pqi_retry_raid_bypass_requests(ctrl_info); | |
4756 | } | |
4757 | ||
5f310425 KB |
4758 | static void pqi_clear_all_queued_raid_bypass_retries( |
4759 | struct pqi_ctrl_info *ctrl_info) | |
376fb880 KB |
4760 | { |
4761 | unsigned long flags; | |
376fb880 KB |
4762 | |
4763 | spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
5f310425 | 4764 | INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); |
376fb880 KB |
4765 | spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); |
4766 | } | |
4767 | ||
6c223761 KB |
4768 | static void pqi_aio_io_complete(struct pqi_io_request *io_request, |
4769 | void *context) | |
4770 | { | |
4771 | struct scsi_cmnd *scmd; | |
4772 | ||
4773 | scmd = io_request->scmd; | |
4774 | scsi_dma_unmap(scmd); | |
4775 | if (io_request->status == -EAGAIN) | |
4776 | set_host_byte(scmd, DID_IMM_RETRY); | |
376fb880 KB |
4777 | else if (pqi_raid_bypass_retry_needed(io_request)) { |
4778 | pqi_queue_raid_bypass_retry(io_request); | |
4779 | return; | |
4780 | } | |
6c223761 KB |
4781 | pqi_free_io_request(io_request); |
4782 | pqi_scsi_done(scmd); | |
4783 | } | |
4784 | ||
4785 | static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, | |
4786 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, | |
4787 | struct pqi_queue_group *queue_group) | |
4788 | { | |
4789 | return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, | |
376fb880 | 4790 | scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); |
6c223761 KB |
4791 | } |
4792 | ||
4793 | static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, | |
4794 | struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, | |
4795 | unsigned int cdb_length, struct pqi_queue_group *queue_group, | |
376fb880 | 4796 | struct pqi_encryption_info *encryption_info, bool raid_bypass) |
6c223761 KB |
4797 | { |
4798 | int rc; | |
4799 | struct pqi_io_request *io_request; | |
4800 | struct pqi_aio_path_request *request; | |
4801 | ||
4802 | io_request = pqi_alloc_io_request(ctrl_info); | |
4803 | io_request->io_complete_callback = pqi_aio_io_complete; | |
4804 | io_request->scmd = scmd; | |
376fb880 | 4805 | io_request->raid_bypass = raid_bypass; |
6c223761 KB |
4806 | |
4807 | request = io_request->iu; | |
4808 | memset(request, 0, | |
4809 | offsetof(struct pqi_raid_path_request, sg_descriptors)); | |
4810 | ||
4811 | request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; | |
4812 | put_unaligned_le32(aio_handle, &request->nexus_id); | |
4813 | put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); | |
4814 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
4815 | put_unaligned_le16(io_request->index, &request->request_id); | |
4816 | request->error_index = request->request_id; | |
4817 | if (cdb_length > sizeof(request->cdb)) | |
4818 | cdb_length = sizeof(request->cdb); | |
4819 | request->cdb_length = cdb_length; | |
4820 | memcpy(request->cdb, cdb, cdb_length); | |
4821 | ||
4822 | switch (scmd->sc_data_direction) { | |
4823 | case DMA_TO_DEVICE: | |
4824 | request->data_direction = SOP_READ_FLAG; | |
4825 | break; | |
4826 | case DMA_FROM_DEVICE: | |
4827 | request->data_direction = SOP_WRITE_FLAG; | |
4828 | break; | |
4829 | case DMA_NONE: | |
4830 | request->data_direction = SOP_NO_DIRECTION_FLAG; | |
4831 | break; | |
4832 | case DMA_BIDIRECTIONAL: | |
4833 | request->data_direction = SOP_BIDIRECTIONAL; | |
4834 | break; | |
4835 | default: | |
4836 | dev_err(&ctrl_info->pci_dev->dev, | |
4837 | "unknown data direction: %d\n", | |
4838 | scmd->sc_data_direction); | |
6c223761 KB |
4839 | break; |
4840 | } | |
4841 | ||
4842 | if (encryption_info) { | |
4843 | request->encryption_enable = true; | |
4844 | put_unaligned_le16(encryption_info->data_encryption_key_index, | |
4845 | &request->data_encryption_key_index); | |
4846 | put_unaligned_le32(encryption_info->encrypt_tweak_lower, | |
4847 | &request->encrypt_tweak_lower); | |
4848 | put_unaligned_le32(encryption_info->encrypt_tweak_upper, | |
4849 | &request->encrypt_tweak_upper); | |
4850 | } | |
4851 | ||
4852 | rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); | |
4853 | if (rc) { | |
4854 | pqi_free_io_request(io_request); | |
4855 | return SCSI_MLQUEUE_HOST_BUSY; | |
4856 | } | |
4857 | ||
4858 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); | |
4859 | ||
4860 | return 0; | |
4861 | } | |
4862 | ||
061ef06a KB |
4863 | static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, |
4864 | struct scsi_cmnd *scmd) | |
4865 | { | |
4866 | u16 hw_queue; | |
4867 | ||
4868 | hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); | |
4869 | if (hw_queue > ctrl_info->max_hw_queue_index) | |
4870 | hw_queue = 0; | |
4871 | ||
4872 | return hw_queue; | |
4873 | } | |
4874 | ||
7561a7e4 KB |
4875 | /* |
4876 | * This function gets called just before we hand the completed SCSI request | |
4877 | * back to the SML. | |
4878 | */ | |
4879 | ||
4880 | void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) | |
4881 | { | |
4882 | struct pqi_scsi_dev *device; | |
4883 | ||
4884 | device = scmd->device->hostdata; | |
4885 | atomic_dec(&device->scsi_cmds_outstanding); | |
4886 | } | |
4887 | ||
6c223761 | 4888 | static int pqi_scsi_queue_command(struct Scsi_Host *shost, |
7d81d2b8 | 4889 | struct scsi_cmnd *scmd) |
6c223761 KB |
4890 | { |
4891 | int rc; | |
4892 | struct pqi_ctrl_info *ctrl_info; | |
4893 | struct pqi_scsi_dev *device; | |
061ef06a | 4894 | u16 hw_queue; |
6c223761 KB |
4895 | struct pqi_queue_group *queue_group; |
4896 | bool raid_bypassed; | |
4897 | ||
4898 | device = scmd->device->hostdata; | |
6c223761 KB |
4899 | ctrl_info = shost_to_hba(shost); |
4900 | ||
7561a7e4 KB |
4901 | atomic_inc(&device->scsi_cmds_outstanding); |
4902 | ||
6c223761 KB |
4903 | if (pqi_ctrl_offline(ctrl_info)) { |
4904 | set_host_byte(scmd, DID_NO_CONNECT); | |
4905 | pqi_scsi_done(scmd); | |
4906 | return 0; | |
4907 | } | |
4908 | ||
7561a7e4 KB |
4909 | pqi_ctrl_busy(ctrl_info); |
4910 | if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { | |
4911 | rc = SCSI_MLQUEUE_HOST_BUSY; | |
4912 | goto out; | |
4913 | } | |
4914 | ||
7d81d2b8 KB |
4915 | /* |
4916 | * This is necessary because the SML doesn't zero out this field during | |
4917 | * error recovery. | |
4918 | */ | |
4919 | scmd->result = 0; | |
4920 | ||
061ef06a KB |
4921 | hw_queue = pqi_get_hw_queue(ctrl_info, scmd); |
4922 | queue_group = &ctrl_info->queue_groups[hw_queue]; | |
6c223761 KB |
4923 | |
4924 | if (pqi_is_logical_device(device)) { | |
4925 | raid_bypassed = false; | |
4926 | if (device->offload_enabled && | |
57292b58 | 4927 | !blk_rq_is_passthrough(scmd->request)) { |
6c223761 KB |
4928 | rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, |
4929 | scmd, queue_group); | |
376fb880 KB |
4930 | if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) |
4931 | raid_bypassed = true; | |
6c223761 KB |
4932 | } |
4933 | if (!raid_bypassed) | |
4934 | rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, | |
4935 | queue_group); | |
4936 | } else { | |
4937 | if (device->aio_enabled) | |
4938 | rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, | |
4939 | queue_group); | |
4940 | else | |
4941 | rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, | |
4942 | queue_group); | |
4943 | } | |
4944 | ||
7561a7e4 KB |
4945 | out: |
4946 | pqi_ctrl_unbusy(ctrl_info); | |
4947 | if (rc) | |
4948 | atomic_dec(&device->scsi_cmds_outstanding); | |
4949 | ||
6c223761 KB |
4950 | return rc; |
4951 | } | |
4952 | ||
7561a7e4 KB |
4953 | static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, |
4954 | struct pqi_queue_group *queue_group) | |
4955 | { | |
4956 | unsigned int path; | |
4957 | unsigned long flags; | |
4958 | bool list_is_empty; | |
4959 | ||
4960 | for (path = 0; path < 2; path++) { | |
4961 | while (1) { | |
4962 | spin_lock_irqsave( | |
4963 | &queue_group->submit_lock[path], flags); | |
4964 | list_is_empty = | |
4965 | list_empty(&queue_group->request_list[path]); | |
4966 | spin_unlock_irqrestore( | |
4967 | &queue_group->submit_lock[path], flags); | |
4968 | if (list_is_empty) | |
4969 | break; | |
4970 | pqi_check_ctrl_health(ctrl_info); | |
4971 | if (pqi_ctrl_offline(ctrl_info)) | |
4972 | return -ENXIO; | |
4973 | usleep_range(1000, 2000); | |
4974 | } | |
4975 | } | |
4976 | ||
4977 | return 0; | |
4978 | } | |
4979 | ||
4980 | static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) | |
4981 | { | |
4982 | int rc; | |
4983 | unsigned int i; | |
4984 | unsigned int path; | |
4985 | struct pqi_queue_group *queue_group; | |
4986 | pqi_index_t iq_pi; | |
4987 | pqi_index_t iq_ci; | |
4988 | ||
4989 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
4990 | queue_group = &ctrl_info->queue_groups[i]; | |
4991 | ||
4992 | rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); | |
4993 | if (rc) | |
4994 | return rc; | |
4995 | ||
4996 | for (path = 0; path < 2; path++) { | |
4997 | iq_pi = queue_group->iq_pi_copy[path]; | |
4998 | ||
4999 | while (1) { | |
5000 | iq_ci = *queue_group->iq_ci[path]; | |
5001 | if (iq_ci == iq_pi) | |
5002 | break; | |
5003 | pqi_check_ctrl_health(ctrl_info); | |
5004 | if (pqi_ctrl_offline(ctrl_info)) | |
5005 | return -ENXIO; | |
5006 | usleep_range(1000, 2000); | |
5007 | } | |
5008 | } | |
5009 | } | |
5010 | ||
5011 | return 0; | |
5012 | } | |
5013 | ||
5014 | static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, | |
5015 | struct pqi_scsi_dev *device) | |
5016 | { | |
5017 | unsigned int i; | |
5018 | unsigned int path; | |
5019 | struct pqi_queue_group *queue_group; | |
5020 | unsigned long flags; | |
5021 | struct pqi_io_request *io_request; | |
5022 | struct pqi_io_request *next; | |
5023 | struct scsi_cmnd *scmd; | |
5024 | struct pqi_scsi_dev *scsi_device; | |
5025 | ||
5026 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
5027 | queue_group = &ctrl_info->queue_groups[i]; | |
5028 | ||
5029 | for (path = 0; path < 2; path++) { | |
5030 | spin_lock_irqsave( | |
5031 | &queue_group->submit_lock[path], flags); | |
5032 | ||
5033 | list_for_each_entry_safe(io_request, next, | |
5034 | &queue_group->request_list[path], | |
5035 | request_list_entry) { | |
5036 | scmd = io_request->scmd; | |
5037 | if (!scmd) | |
5038 | continue; | |
5039 | ||
5040 | scsi_device = scmd->device->hostdata; | |
5041 | if (scsi_device != device) | |
5042 | continue; | |
5043 | ||
5044 | list_del(&io_request->request_list_entry); | |
5045 | set_host_byte(scmd, DID_RESET); | |
5046 | pqi_scsi_done(scmd); | |
5047 | } | |
5048 | ||
5049 | spin_unlock_irqrestore( | |
5050 | &queue_group->submit_lock[path], flags); | |
5051 | } | |
5052 | } | |
5053 | } | |
5054 | ||
061ef06a KB |
5055 | static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, |
5056 | struct pqi_scsi_dev *device) | |
5057 | { | |
5058 | while (atomic_read(&device->scsi_cmds_outstanding)) { | |
5059 | pqi_check_ctrl_health(ctrl_info); | |
5060 | if (pqi_ctrl_offline(ctrl_info)) | |
5061 | return -ENXIO; | |
5062 | usleep_range(1000, 2000); | |
5063 | } | |
5064 | ||
5065 | return 0; | |
5066 | } | |
5067 | ||
5068 | static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) | |
5069 | { | |
5070 | bool io_pending; | |
5071 | unsigned long flags; | |
5072 | struct pqi_scsi_dev *device; | |
5073 | ||
5074 | while (1) { | |
5075 | io_pending = false; | |
5076 | ||
5077 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
5078 | list_for_each_entry(device, &ctrl_info->scsi_device_list, | |
5079 | scsi_device_list_entry) { | |
5080 | if (atomic_read(&device->scsi_cmds_outstanding)) { | |
5081 | io_pending = true; | |
5082 | break; | |
5083 | } | |
5084 | } | |
5085 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, | |
5086 | flags); | |
5087 | ||
5088 | if (!io_pending) | |
5089 | break; | |
5090 | ||
5091 | pqi_check_ctrl_health(ctrl_info); | |
5092 | if (pqi_ctrl_offline(ctrl_info)) | |
5093 | return -ENXIO; | |
5094 | ||
5095 | usleep_range(1000, 2000); | |
5096 | } | |
5097 | ||
5098 | return 0; | |
5099 | } | |
5100 | ||
14bb215d KB |
5101 | static void pqi_lun_reset_complete(struct pqi_io_request *io_request, |
5102 | void *context) | |
6c223761 | 5103 | { |
14bb215d | 5104 | struct completion *waiting = context; |
6c223761 | 5105 | |
14bb215d KB |
5106 | complete(waiting); |
5107 | } | |
6c223761 | 5108 | |
14bb215d KB |
5109 | #define PQI_LUN_RESET_TIMEOUT_SECS 10 |
5110 | ||
5111 | static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, | |
5112 | struct pqi_scsi_dev *device, struct completion *wait) | |
5113 | { | |
5114 | int rc; | |
14bb215d KB |
5115 | |
5116 | while (1) { | |
5117 | if (wait_for_completion_io_timeout(wait, | |
5118 | PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { | |
5119 | rc = 0; | |
5120 | break; | |
6c223761 KB |
5121 | } |
5122 | ||
14bb215d KB |
5123 | pqi_check_ctrl_health(ctrl_info); |
5124 | if (pqi_ctrl_offline(ctrl_info)) { | |
4e8415e3 | 5125 | rc = -ENXIO; |
14bb215d KB |
5126 | break; |
5127 | } | |
6c223761 | 5128 | } |
6c223761 | 5129 | |
14bb215d | 5130 | return rc; |
6c223761 KB |
5131 | } |
5132 | ||
14bb215d | 5133 | static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, |
6c223761 KB |
5134 | struct pqi_scsi_dev *device) |
5135 | { | |
5136 | int rc; | |
5137 | struct pqi_io_request *io_request; | |
5138 | DECLARE_COMPLETION_ONSTACK(wait); | |
5139 | struct pqi_task_management_request *request; | |
5140 | ||
6c223761 | 5141 | io_request = pqi_alloc_io_request(ctrl_info); |
14bb215d | 5142 | io_request->io_complete_callback = pqi_lun_reset_complete; |
6c223761 KB |
5143 | io_request->context = &wait; |
5144 | ||
5145 | request = io_request->iu; | |
5146 | memset(request, 0, sizeof(*request)); | |
5147 | ||
5148 | request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; | |
5149 | put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, | |
5150 | &request->header.iu_length); | |
5151 | put_unaligned_le16(io_request->index, &request->request_id); | |
5152 | memcpy(request->lun_number, device->scsi3addr, | |
5153 | sizeof(request->lun_number)); | |
5154 | request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; | |
5155 | ||
5156 | pqi_start_io(ctrl_info, | |
5157 | &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, | |
5158 | io_request); | |
5159 | ||
14bb215d KB |
5160 | rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); |
5161 | if (rc == 0) | |
6c223761 | 5162 | rc = io_request->status; |
6c223761 KB |
5163 | |
5164 | pqi_free_io_request(io_request); | |
6c223761 KB |
5165 | |
5166 | return rc; | |
5167 | } | |
5168 | ||
5169 | /* Performs a reset at the LUN level. */ | |
5170 | ||
5171 | static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, | |
5172 | struct pqi_scsi_dev *device) | |
5173 | { | |
5174 | int rc; | |
5175 | ||
14bb215d | 5176 | rc = pqi_lun_reset(ctrl_info, device); |
061ef06a KB |
5177 | if (rc == 0) |
5178 | rc = pqi_device_wait_for_pending_io(ctrl_info, device); | |
6c223761 | 5179 | |
14bb215d | 5180 | return rc == 0 ? SUCCESS : FAILED; |
6c223761 KB |
5181 | } |
5182 | ||
5183 | static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) | |
5184 | { | |
5185 | int rc; | |
7561a7e4 | 5186 | struct Scsi_Host *shost; |
6c223761 KB |
5187 | struct pqi_ctrl_info *ctrl_info; |
5188 | struct pqi_scsi_dev *device; | |
5189 | ||
7561a7e4 KB |
5190 | shost = scmd->device->host; |
5191 | ctrl_info = shost_to_hba(shost); | |
6c223761 KB |
5192 | device = scmd->device->hostdata; |
5193 | ||
5194 | dev_err(&ctrl_info->pci_dev->dev, | |
5195 | "resetting scsi %d:%d:%d:%d\n", | |
7561a7e4 | 5196 | shost->host_no, device->bus, device->target, device->lun); |
6c223761 | 5197 | |
7561a7e4 KB |
5198 | pqi_check_ctrl_health(ctrl_info); |
5199 | if (pqi_ctrl_offline(ctrl_info)) { | |
5200 | rc = FAILED; | |
5201 | goto out; | |
5202 | } | |
6c223761 | 5203 | |
7561a7e4 KB |
5204 | mutex_lock(&ctrl_info->lun_reset_mutex); |
5205 | ||
5206 | pqi_ctrl_block_requests(ctrl_info); | |
5207 | pqi_ctrl_wait_until_quiesced(ctrl_info); | |
5208 | pqi_fail_io_queued_for_device(ctrl_info, device); | |
5209 | rc = pqi_wait_until_inbound_queues_empty(ctrl_info); | |
5210 | pqi_device_reset_start(device); | |
5211 | pqi_ctrl_unblock_requests(ctrl_info); | |
5212 | ||
5213 | if (rc) | |
5214 | rc = FAILED; | |
5215 | else | |
5216 | rc = pqi_device_reset(ctrl_info, device); | |
5217 | ||
5218 | pqi_device_reset_done(device); | |
5219 | ||
5220 | mutex_unlock(&ctrl_info->lun_reset_mutex); | |
5221 | ||
5222 | out: | |
6c223761 KB |
5223 | dev_err(&ctrl_info->pci_dev->dev, |
5224 | "reset of scsi %d:%d:%d:%d: %s\n", | |
7561a7e4 | 5225 | shost->host_no, device->bus, device->target, device->lun, |
6c223761 KB |
5226 | rc == SUCCESS ? "SUCCESS" : "FAILED"); |
5227 | ||
5228 | return rc; | |
5229 | } | |
5230 | ||
5231 | static int pqi_slave_alloc(struct scsi_device *sdev) | |
5232 | { | |
5233 | struct pqi_scsi_dev *device; | |
5234 | unsigned long flags; | |
5235 | struct pqi_ctrl_info *ctrl_info; | |
5236 | struct scsi_target *starget; | |
5237 | struct sas_rphy *rphy; | |
5238 | ||
5239 | ctrl_info = shost_to_hba(sdev->host); | |
5240 | ||
5241 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
5242 | ||
5243 | if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { | |
5244 | starget = scsi_target(sdev); | |
5245 | rphy = target_to_rphy(starget); | |
5246 | device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); | |
5247 | if (device) { | |
5248 | device->target = sdev_id(sdev); | |
5249 | device->lun = sdev->lun; | |
5250 | device->target_lun_valid = true; | |
5251 | } | |
5252 | } else { | |
5253 | device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), | |
5254 | sdev_id(sdev), sdev->lun); | |
5255 | } | |
5256 | ||
94086f5b | 5257 | if (device) { |
6c223761 KB |
5258 | sdev->hostdata = device; |
5259 | device->sdev = sdev; | |
5260 | if (device->queue_depth) { | |
5261 | device->advertised_queue_depth = device->queue_depth; | |
5262 | scsi_change_queue_depth(sdev, | |
5263 | device->advertised_queue_depth); | |
5264 | } | |
5265 | } | |
5266 | ||
5267 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
5268 | ||
5269 | return 0; | |
5270 | } | |
5271 | ||
52198226 CH |
5272 | static int pqi_map_queues(struct Scsi_Host *shost) |
5273 | { | |
5274 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); | |
5275 | ||
5276 | return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev); | |
5277 | } | |
5278 | ||
6c223761 KB |
5279 | static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, |
5280 | void __user *arg) | |
5281 | { | |
5282 | struct pci_dev *pci_dev; | |
5283 | u32 subsystem_vendor; | |
5284 | u32 subsystem_device; | |
5285 | cciss_pci_info_struct pciinfo; | |
5286 | ||
5287 | if (!arg) | |
5288 | return -EINVAL; | |
5289 | ||
5290 | pci_dev = ctrl_info->pci_dev; | |
5291 | ||
5292 | pciinfo.domain = pci_domain_nr(pci_dev->bus); | |
5293 | pciinfo.bus = pci_dev->bus->number; | |
5294 | pciinfo.dev_fn = pci_dev->devfn; | |
5295 | subsystem_vendor = pci_dev->subsystem_vendor; | |
5296 | subsystem_device = pci_dev->subsystem_device; | |
5297 | pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | | |
5298 | subsystem_vendor; | |
5299 | ||
5300 | if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) | |
5301 | return -EFAULT; | |
5302 | ||
5303 | return 0; | |
5304 | } | |
5305 | ||
5306 | static int pqi_getdrivver_ioctl(void __user *arg) | |
5307 | { | |
5308 | u32 version; | |
5309 | ||
5310 | if (!arg) | |
5311 | return -EINVAL; | |
5312 | ||
5313 | version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | | |
5314 | (DRIVER_RELEASE << 16) | DRIVER_REVISION; | |
5315 | ||
5316 | if (copy_to_user(arg, &version, sizeof(version))) | |
5317 | return -EFAULT; | |
5318 | ||
5319 | return 0; | |
5320 | } | |
5321 | ||
5322 | struct ciss_error_info { | |
5323 | u8 scsi_status; | |
5324 | int command_status; | |
5325 | size_t sense_data_length; | |
5326 | }; | |
5327 | ||
5328 | static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, | |
5329 | struct ciss_error_info *ciss_error_info) | |
5330 | { | |
5331 | int ciss_cmd_status; | |
5332 | size_t sense_data_length; | |
5333 | ||
5334 | switch (pqi_error_info->data_out_result) { | |
5335 | case PQI_DATA_IN_OUT_GOOD: | |
5336 | ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; | |
5337 | break; | |
5338 | case PQI_DATA_IN_OUT_UNDERFLOW: | |
5339 | ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; | |
5340 | break; | |
5341 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: | |
5342 | ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; | |
5343 | break; | |
5344 | case PQI_DATA_IN_OUT_PROTOCOL_ERROR: | |
5345 | case PQI_DATA_IN_OUT_BUFFER_ERROR: | |
5346 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: | |
5347 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: | |
5348 | case PQI_DATA_IN_OUT_ERROR: | |
5349 | ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; | |
5350 | break; | |
5351 | case PQI_DATA_IN_OUT_HARDWARE_ERROR: | |
5352 | case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: | |
5353 | case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: | |
5354 | case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: | |
5355 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: | |
5356 | case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: | |
5357 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: | |
5358 | case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: | |
5359 | case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: | |
5360 | case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: | |
5361 | ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; | |
5362 | break; | |
5363 | case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: | |
5364 | ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; | |
5365 | break; | |
5366 | case PQI_DATA_IN_OUT_ABORTED: | |
5367 | ciss_cmd_status = CISS_CMD_STATUS_ABORTED; | |
5368 | break; | |
5369 | case PQI_DATA_IN_OUT_TIMEOUT: | |
5370 | ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; | |
5371 | break; | |
5372 | default: | |
5373 | ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; | |
5374 | break; | |
5375 | } | |
5376 | ||
5377 | sense_data_length = | |
5378 | get_unaligned_le16(&pqi_error_info->sense_data_length); | |
5379 | if (sense_data_length == 0) | |
5380 | sense_data_length = | |
5381 | get_unaligned_le16(&pqi_error_info->response_data_length); | |
5382 | if (sense_data_length) | |
5383 | if (sense_data_length > sizeof(pqi_error_info->data)) | |
5384 | sense_data_length = sizeof(pqi_error_info->data); | |
5385 | ||
5386 | ciss_error_info->scsi_status = pqi_error_info->status; | |
5387 | ciss_error_info->command_status = ciss_cmd_status; | |
5388 | ciss_error_info->sense_data_length = sense_data_length; | |
5389 | } | |
5390 | ||
5391 | static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) | |
5392 | { | |
5393 | int rc; | |
5394 | char *kernel_buffer = NULL; | |
5395 | u16 iu_length; | |
5396 | size_t sense_data_length; | |
5397 | IOCTL_Command_struct iocommand; | |
5398 | struct pqi_raid_path_request request; | |
5399 | struct pqi_raid_error_info pqi_error_info; | |
5400 | struct ciss_error_info ciss_error_info; | |
5401 | ||
5402 | if (pqi_ctrl_offline(ctrl_info)) | |
5403 | return -ENXIO; | |
5404 | if (!arg) | |
5405 | return -EINVAL; | |
5406 | if (!capable(CAP_SYS_RAWIO)) | |
5407 | return -EPERM; | |
5408 | if (copy_from_user(&iocommand, arg, sizeof(iocommand))) | |
5409 | return -EFAULT; | |
5410 | if (iocommand.buf_size < 1 && | |
5411 | iocommand.Request.Type.Direction != XFER_NONE) | |
5412 | return -EINVAL; | |
5413 | if (iocommand.Request.CDBLen > sizeof(request.cdb)) | |
5414 | return -EINVAL; | |
5415 | if (iocommand.Request.Type.Type != TYPE_CMD) | |
5416 | return -EINVAL; | |
5417 | ||
5418 | switch (iocommand.Request.Type.Direction) { | |
5419 | case XFER_NONE: | |
5420 | case XFER_WRITE: | |
5421 | case XFER_READ: | |
5422 | break; | |
5423 | default: | |
5424 | return -EINVAL; | |
5425 | } | |
5426 | ||
5427 | if (iocommand.buf_size > 0) { | |
5428 | kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); | |
5429 | if (!kernel_buffer) | |
5430 | return -ENOMEM; | |
5431 | if (iocommand.Request.Type.Direction & XFER_WRITE) { | |
5432 | if (copy_from_user(kernel_buffer, iocommand.buf, | |
5433 | iocommand.buf_size)) { | |
5434 | rc = -EFAULT; | |
5435 | goto out; | |
5436 | } | |
5437 | } else { | |
5438 | memset(kernel_buffer, 0, iocommand.buf_size); | |
5439 | } | |
5440 | } | |
5441 | ||
5442 | memset(&request, 0, sizeof(request)); | |
5443 | ||
5444 | request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; | |
5445 | iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - | |
5446 | PQI_REQUEST_HEADER_LENGTH; | |
5447 | memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, | |
5448 | sizeof(request.lun_number)); | |
5449 | memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); | |
5450 | request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; | |
5451 | ||
5452 | switch (iocommand.Request.Type.Direction) { | |
5453 | case XFER_NONE: | |
5454 | request.data_direction = SOP_NO_DIRECTION_FLAG; | |
5455 | break; | |
5456 | case XFER_WRITE: | |
5457 | request.data_direction = SOP_WRITE_FLAG; | |
5458 | break; | |
5459 | case XFER_READ: | |
5460 | request.data_direction = SOP_READ_FLAG; | |
5461 | break; | |
5462 | } | |
5463 | ||
5464 | request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
5465 | ||
5466 | if (iocommand.buf_size > 0) { | |
5467 | put_unaligned_le32(iocommand.buf_size, &request.buffer_length); | |
5468 | ||
5469 | rc = pqi_map_single(ctrl_info->pci_dev, | |
5470 | &request.sg_descriptors[0], kernel_buffer, | |
5471 | iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); | |
5472 | if (rc) | |
5473 | goto out; | |
5474 | ||
5475 | iu_length += sizeof(request.sg_descriptors[0]); | |
5476 | } | |
5477 | ||
5478 | put_unaligned_le16(iu_length, &request.header.iu_length); | |
5479 | ||
5480 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, | |
5481 | PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); | |
5482 | ||
5483 | if (iocommand.buf_size > 0) | |
5484 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, | |
5485 | PCI_DMA_BIDIRECTIONAL); | |
5486 | ||
5487 | memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); | |
5488 | ||
5489 | if (rc == 0) { | |
5490 | pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); | |
5491 | iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; | |
5492 | iocommand.error_info.CommandStatus = | |
5493 | ciss_error_info.command_status; | |
5494 | sense_data_length = ciss_error_info.sense_data_length; | |
5495 | if (sense_data_length) { | |
5496 | if (sense_data_length > | |
5497 | sizeof(iocommand.error_info.SenseInfo)) | |
5498 | sense_data_length = | |
5499 | sizeof(iocommand.error_info.SenseInfo); | |
5500 | memcpy(iocommand.error_info.SenseInfo, | |
5501 | pqi_error_info.data, sense_data_length); | |
5502 | iocommand.error_info.SenseLen = sense_data_length; | |
5503 | } | |
5504 | } | |
5505 | ||
5506 | if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { | |
5507 | rc = -EFAULT; | |
5508 | goto out; | |
5509 | } | |
5510 | ||
5511 | if (rc == 0 && iocommand.buf_size > 0 && | |
5512 | (iocommand.Request.Type.Direction & XFER_READ)) { | |
5513 | if (copy_to_user(iocommand.buf, kernel_buffer, | |
5514 | iocommand.buf_size)) { | |
5515 | rc = -EFAULT; | |
5516 | } | |
5517 | } | |
5518 | ||
5519 | out: | |
5520 | kfree(kernel_buffer); | |
5521 | ||
5522 | return rc; | |
5523 | } | |
5524 | ||
5525 | static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) | |
5526 | { | |
5527 | int rc; | |
5528 | struct pqi_ctrl_info *ctrl_info; | |
5529 | ||
5530 | ctrl_info = shost_to_hba(sdev->host); | |
5531 | ||
5532 | switch (cmd) { | |
5533 | case CCISS_DEREGDISK: | |
5534 | case CCISS_REGNEWDISK: | |
5535 | case CCISS_REGNEWD: | |
5536 | rc = pqi_scan_scsi_devices(ctrl_info); | |
5537 | break; | |
5538 | case CCISS_GETPCIINFO: | |
5539 | rc = pqi_getpciinfo_ioctl(ctrl_info, arg); | |
5540 | break; | |
5541 | case CCISS_GETDRIVVER: | |
5542 | rc = pqi_getdrivver_ioctl(arg); | |
5543 | break; | |
5544 | case CCISS_PASSTHRU: | |
5545 | rc = pqi_passthru_ioctl(ctrl_info, arg); | |
5546 | break; | |
5547 | default: | |
5548 | rc = -EINVAL; | |
5549 | break; | |
5550 | } | |
5551 | ||
5552 | return rc; | |
5553 | } | |
5554 | ||
5555 | static ssize_t pqi_version_show(struct device *dev, | |
5556 | struct device_attribute *attr, char *buffer) | |
5557 | { | |
5558 | ssize_t count = 0; | |
5559 | struct Scsi_Host *shost; | |
5560 | struct pqi_ctrl_info *ctrl_info; | |
5561 | ||
5562 | shost = class_to_shost(dev); | |
5563 | ctrl_info = shost_to_hba(shost); | |
5564 | ||
5565 | count += snprintf(buffer + count, PAGE_SIZE - count, | |
5566 | " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP); | |
5567 | ||
5568 | count += snprintf(buffer + count, PAGE_SIZE - count, | |
5569 | "firmware: %s\n", ctrl_info->firmware_version); | |
5570 | ||
5571 | return count; | |
5572 | } | |
5573 | ||
5574 | static ssize_t pqi_host_rescan_store(struct device *dev, | |
5575 | struct device_attribute *attr, const char *buffer, size_t count) | |
5576 | { | |
5577 | struct Scsi_Host *shost = class_to_shost(dev); | |
5578 | ||
5579 | pqi_scan_start(shost); | |
5580 | ||
5581 | return count; | |
5582 | } | |
5583 | ||
3c50976f KB |
5584 | static ssize_t pqi_lockup_action_show(struct device *dev, |
5585 | struct device_attribute *attr, char *buffer) | |
5586 | { | |
5587 | int count = 0; | |
5588 | unsigned int i; | |
5589 | ||
5590 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { | |
5591 | if (pqi_lockup_actions[i].action == pqi_lockup_action) | |
5592 | count += snprintf(buffer + count, PAGE_SIZE - count, | |
5593 | "[%s] ", pqi_lockup_actions[i].name); | |
5594 | else | |
5595 | count += snprintf(buffer + count, PAGE_SIZE - count, | |
5596 | "%s ", pqi_lockup_actions[i].name); | |
5597 | } | |
5598 | ||
5599 | count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); | |
5600 | ||
5601 | return count; | |
5602 | } | |
5603 | ||
5604 | static ssize_t pqi_lockup_action_store(struct device *dev, | |
5605 | struct device_attribute *attr, const char *buffer, size_t count) | |
5606 | { | |
5607 | unsigned int i; | |
5608 | char *action_name; | |
5609 | char action_name_buffer[32]; | |
5610 | ||
5611 | strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); | |
5612 | action_name = strstrip(action_name_buffer); | |
5613 | ||
5614 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { | |
5615 | if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { | |
5616 | pqi_lockup_action = pqi_lockup_actions[i].action; | |
5617 | return count; | |
5618 | } | |
5619 | } | |
5620 | ||
5621 | return -EINVAL; | |
5622 | } | |
5623 | ||
cbe0c7b1 KB |
5624 | static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); |
5625 | static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); | |
3c50976f KB |
5626 | static DEVICE_ATTR(lockup_action, 0644, |
5627 | pqi_lockup_action_show, pqi_lockup_action_store); | |
6c223761 KB |
5628 | |
5629 | static struct device_attribute *pqi_shost_attrs[] = { | |
5630 | &dev_attr_version, | |
5631 | &dev_attr_rescan, | |
3c50976f | 5632 | &dev_attr_lockup_action, |
6c223761 KB |
5633 | NULL |
5634 | }; | |
5635 | ||
5636 | static ssize_t pqi_sas_address_show(struct device *dev, | |
5637 | struct device_attribute *attr, char *buffer) | |
5638 | { | |
5639 | struct pqi_ctrl_info *ctrl_info; | |
5640 | struct scsi_device *sdev; | |
5641 | struct pqi_scsi_dev *device; | |
5642 | unsigned long flags; | |
5643 | u64 sas_address; | |
5644 | ||
5645 | sdev = to_scsi_device(dev); | |
5646 | ctrl_info = shost_to_hba(sdev->host); | |
5647 | ||
5648 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
5649 | ||
5650 | device = sdev->hostdata; | |
5651 | if (pqi_is_logical_device(device)) { | |
5652 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, | |
5653 | flags); | |
5654 | return -ENODEV; | |
5655 | } | |
5656 | sas_address = device->sas_address; | |
5657 | ||
5658 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
5659 | ||
5660 | return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); | |
5661 | } | |
5662 | ||
5663 | static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, | |
5664 | struct device_attribute *attr, char *buffer) | |
5665 | { | |
5666 | struct pqi_ctrl_info *ctrl_info; | |
5667 | struct scsi_device *sdev; | |
5668 | struct pqi_scsi_dev *device; | |
5669 | unsigned long flags; | |
5670 | ||
5671 | sdev = to_scsi_device(dev); | |
5672 | ctrl_info = shost_to_hba(sdev->host); | |
5673 | ||
5674 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
5675 | ||
5676 | device = sdev->hostdata; | |
5677 | buffer[0] = device->offload_enabled ? '1' : '0'; | |
5678 | buffer[1] = '\n'; | |
5679 | buffer[2] = '\0'; | |
5680 | ||
5681 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
5682 | ||
5683 | return 2; | |
5684 | } | |
5685 | ||
cbe0c7b1 KB |
5686 | static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); |
5687 | static DEVICE_ATTR(ssd_smart_path_enabled, 0444, | |
6c223761 KB |
5688 | pqi_ssd_smart_path_enabled_show, NULL); |
5689 | ||
5690 | static struct device_attribute *pqi_sdev_attrs[] = { | |
5691 | &dev_attr_sas_address, | |
5692 | &dev_attr_ssd_smart_path_enabled, | |
5693 | NULL | |
5694 | }; | |
5695 | ||
5696 | static struct scsi_host_template pqi_driver_template = { | |
5697 | .module = THIS_MODULE, | |
5698 | .name = DRIVER_NAME_SHORT, | |
5699 | .proc_name = DRIVER_NAME_SHORT, | |
5700 | .queuecommand = pqi_scsi_queue_command, | |
5701 | .scan_start = pqi_scan_start, | |
5702 | .scan_finished = pqi_scan_finished, | |
5703 | .this_id = -1, | |
5704 | .use_clustering = ENABLE_CLUSTERING, | |
5705 | .eh_device_reset_handler = pqi_eh_device_reset_handler, | |
5706 | .ioctl = pqi_ioctl, | |
5707 | .slave_alloc = pqi_slave_alloc, | |
52198226 | 5708 | .map_queues = pqi_map_queues, |
6c223761 KB |
5709 | .sdev_attrs = pqi_sdev_attrs, |
5710 | .shost_attrs = pqi_shost_attrs, | |
5711 | }; | |
5712 | ||
5713 | static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) | |
5714 | { | |
5715 | int rc; | |
5716 | struct Scsi_Host *shost; | |
5717 | ||
5718 | shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); | |
5719 | if (!shost) { | |
5720 | dev_err(&ctrl_info->pci_dev->dev, | |
5721 | "scsi_host_alloc failed for controller %u\n", | |
5722 | ctrl_info->ctrl_id); | |
5723 | return -ENOMEM; | |
5724 | } | |
5725 | ||
5726 | shost->io_port = 0; | |
5727 | shost->n_io_port = 0; | |
5728 | shost->this_id = -1; | |
5729 | shost->max_channel = PQI_MAX_BUS; | |
5730 | shost->max_cmd_len = MAX_COMMAND_SIZE; | |
5731 | shost->max_lun = ~0; | |
5732 | shost->max_id = ~0; | |
5733 | shost->max_sectors = ctrl_info->max_sectors; | |
5734 | shost->can_queue = ctrl_info->scsi_ml_can_queue; | |
5735 | shost->cmd_per_lun = shost->can_queue; | |
5736 | shost->sg_tablesize = ctrl_info->sg_tablesize; | |
5737 | shost->transportt = pqi_sas_transport_template; | |
52198226 | 5738 | shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); |
6c223761 KB |
5739 | shost->unique_id = shost->irq; |
5740 | shost->nr_hw_queues = ctrl_info->num_queue_groups; | |
5741 | shost->hostdata[0] = (unsigned long)ctrl_info; | |
5742 | ||
5743 | rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); | |
5744 | if (rc) { | |
5745 | dev_err(&ctrl_info->pci_dev->dev, | |
5746 | "scsi_add_host failed for controller %u\n", | |
5747 | ctrl_info->ctrl_id); | |
5748 | goto free_host; | |
5749 | } | |
5750 | ||
5751 | rc = pqi_add_sas_host(shost, ctrl_info); | |
5752 | if (rc) { | |
5753 | dev_err(&ctrl_info->pci_dev->dev, | |
5754 | "add SAS host failed for controller %u\n", | |
5755 | ctrl_info->ctrl_id); | |
5756 | goto remove_host; | |
5757 | } | |
5758 | ||
5759 | ctrl_info->scsi_host = shost; | |
5760 | ||
5761 | return 0; | |
5762 | ||
5763 | remove_host: | |
5764 | scsi_remove_host(shost); | |
5765 | free_host: | |
5766 | scsi_host_put(shost); | |
5767 | ||
5768 | return rc; | |
5769 | } | |
5770 | ||
5771 | static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) | |
5772 | { | |
5773 | struct Scsi_Host *shost; | |
5774 | ||
5775 | pqi_delete_sas_host(ctrl_info); | |
5776 | ||
5777 | shost = ctrl_info->scsi_host; | |
5778 | if (!shost) | |
5779 | return; | |
5780 | ||
5781 | scsi_remove_host(shost); | |
5782 | scsi_host_put(shost); | |
5783 | } | |
5784 | ||
5785 | #define PQI_RESET_ACTION_RESET 0x1 | |
5786 | ||
5787 | #define PQI_RESET_TYPE_NO_RESET 0x0 | |
5788 | #define PQI_RESET_TYPE_SOFT_RESET 0x1 | |
5789 | #define PQI_RESET_TYPE_FIRM_RESET 0x2 | |
5790 | #define PQI_RESET_TYPE_HARD_RESET 0x3 | |
5791 | ||
5792 | static int pqi_reset(struct pqi_ctrl_info *ctrl_info) | |
5793 | { | |
5794 | int rc; | |
5795 | u32 reset_params; | |
5796 | ||
5797 | reset_params = (PQI_RESET_ACTION_RESET << 5) | | |
5798 | PQI_RESET_TYPE_HARD_RESET; | |
5799 | ||
5800 | writel(reset_params, | |
5801 | &ctrl_info->pqi_registers->device_reset); | |
5802 | ||
5803 | rc = pqi_wait_for_pqi_mode_ready(ctrl_info); | |
5804 | if (rc) | |
5805 | dev_err(&ctrl_info->pci_dev->dev, | |
5806 | "PQI reset failed\n"); | |
5807 | ||
5808 | return rc; | |
5809 | } | |
5810 | ||
5811 | static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) | |
5812 | { | |
5813 | int rc; | |
5814 | struct bmic_identify_controller *identify; | |
5815 | ||
5816 | identify = kmalloc(sizeof(*identify), GFP_KERNEL); | |
5817 | if (!identify) | |
5818 | return -ENOMEM; | |
5819 | ||
5820 | rc = pqi_identify_controller(ctrl_info, identify); | |
5821 | if (rc) | |
5822 | goto out; | |
5823 | ||
5824 | memcpy(ctrl_info->firmware_version, identify->firmware_version, | |
5825 | sizeof(identify->firmware_version)); | |
5826 | ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; | |
5827 | snprintf(ctrl_info->firmware_version + | |
5828 | strlen(ctrl_info->firmware_version), | |
5829 | sizeof(ctrl_info->firmware_version), | |
5830 | "-%u", get_unaligned_le16(&identify->firmware_build_number)); | |
5831 | ||
5832 | out: | |
5833 | kfree(identify); | |
5834 | ||
5835 | return rc; | |
5836 | } | |
5837 | ||
98f87667 KB |
5838 | static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) |
5839 | { | |
5840 | u32 table_length; | |
5841 | u32 section_offset; | |
5842 | void __iomem *table_iomem_addr; | |
5843 | struct pqi_config_table *config_table; | |
5844 | struct pqi_config_table_section_header *section; | |
5845 | ||
5846 | table_length = ctrl_info->config_table_length; | |
5847 | ||
5848 | config_table = kmalloc(table_length, GFP_KERNEL); | |
5849 | if (!config_table) { | |
5850 | dev_err(&ctrl_info->pci_dev->dev, | |
d87d5474 | 5851 | "failed to allocate memory for PQI configuration table\n"); |
98f87667 KB |
5852 | return -ENOMEM; |
5853 | } | |
5854 | ||
5855 | /* | |
5856 | * Copy the config table contents from I/O memory space into the | |
5857 | * temporary buffer. | |
5858 | */ | |
5859 | table_iomem_addr = ctrl_info->iomem_base + | |
5860 | ctrl_info->config_table_offset; | |
5861 | memcpy_fromio(config_table, table_iomem_addr, table_length); | |
5862 | ||
5863 | section_offset = | |
5864 | get_unaligned_le32(&config_table->first_section_offset); | |
5865 | ||
5866 | while (section_offset) { | |
5867 | section = (void *)config_table + section_offset; | |
5868 | ||
5869 | switch (get_unaligned_le16(§ion->section_id)) { | |
5870 | case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: | |
5871 | ctrl_info->heartbeat_counter = table_iomem_addr + | |
5872 | section_offset + | |
5873 | offsetof(struct pqi_config_table_heartbeat, | |
5874 | heartbeat_counter); | |
5875 | break; | |
5876 | } | |
5877 | ||
5878 | section_offset = | |
5879 | get_unaligned_le16(§ion->next_section_offset); | |
5880 | } | |
5881 | ||
5882 | kfree(config_table); | |
5883 | ||
5884 | return 0; | |
5885 | } | |
5886 | ||
162d7753 KB |
5887 | /* Switches the controller from PQI mode back into SIS mode. */ |
5888 | ||
5889 | static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) | |
5890 | { | |
5891 | int rc; | |
5892 | ||
061ef06a | 5893 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); |
162d7753 KB |
5894 | rc = pqi_reset(ctrl_info); |
5895 | if (rc) | |
5896 | return rc; | |
5897 | sis_reenable_sis_mode(ctrl_info); | |
5898 | pqi_save_ctrl_mode(ctrl_info, SIS_MODE); | |
5899 | ||
5900 | return 0; | |
5901 | } | |
5902 | ||
5903 | /* | |
5904 | * If the controller isn't already in SIS mode, this function forces it into | |
5905 | * SIS mode. | |
5906 | */ | |
5907 | ||
5908 | static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) | |
ff6abb73 KB |
5909 | { |
5910 | if (!sis_is_firmware_running(ctrl_info)) | |
5911 | return -ENXIO; | |
5912 | ||
162d7753 KB |
5913 | if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) |
5914 | return 0; | |
5915 | ||
5916 | if (sis_is_kernel_up(ctrl_info)) { | |
5917 | pqi_save_ctrl_mode(ctrl_info, SIS_MODE); | |
5918 | return 0; | |
ff6abb73 KB |
5919 | } |
5920 | ||
162d7753 | 5921 | return pqi_revert_to_sis_mode(ctrl_info); |
ff6abb73 KB |
5922 | } |
5923 | ||
6c223761 KB |
5924 | static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) |
5925 | { | |
5926 | int rc; | |
5927 | ||
162d7753 KB |
5928 | rc = pqi_force_sis_mode(ctrl_info); |
5929 | if (rc) | |
5930 | return rc; | |
6c223761 KB |
5931 | |
5932 | /* | |
5933 | * Wait until the controller is ready to start accepting SIS | |
5934 | * commands. | |
5935 | */ | |
5936 | rc = sis_wait_for_ctrl_ready(ctrl_info); | |
8845fdfa | 5937 | if (rc) |
6c223761 | 5938 | return rc; |
6c223761 KB |
5939 | |
5940 | /* | |
5941 | * Get the controller properties. This allows us to determine | |
5942 | * whether or not it supports PQI mode. | |
5943 | */ | |
5944 | rc = sis_get_ctrl_properties(ctrl_info); | |
5945 | if (rc) { | |
5946 | dev_err(&ctrl_info->pci_dev->dev, | |
5947 | "error obtaining controller properties\n"); | |
5948 | return rc; | |
5949 | } | |
5950 | ||
5951 | rc = sis_get_pqi_capabilities(ctrl_info); | |
5952 | if (rc) { | |
5953 | dev_err(&ctrl_info->pci_dev->dev, | |
5954 | "error obtaining controller capabilities\n"); | |
5955 | return rc; | |
5956 | } | |
5957 | ||
d727a776 KB |
5958 | if (reset_devices) { |
5959 | if (ctrl_info->max_outstanding_requests > | |
5960 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) | |
5961 | ctrl_info->max_outstanding_requests = | |
5962 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; | |
5963 | } else { | |
5964 | if (ctrl_info->max_outstanding_requests > | |
5965 | PQI_MAX_OUTSTANDING_REQUESTS) | |
5966 | ctrl_info->max_outstanding_requests = | |
5967 | PQI_MAX_OUTSTANDING_REQUESTS; | |
5968 | } | |
6c223761 KB |
5969 | |
5970 | pqi_calculate_io_resources(ctrl_info); | |
5971 | ||
5972 | rc = pqi_alloc_error_buffer(ctrl_info); | |
5973 | if (rc) { | |
5974 | dev_err(&ctrl_info->pci_dev->dev, | |
5975 | "failed to allocate PQI error buffer\n"); | |
5976 | return rc; | |
5977 | } | |
5978 | ||
5979 | /* | |
5980 | * If the function we are about to call succeeds, the | |
5981 | * controller will transition from legacy SIS mode | |
5982 | * into PQI mode. | |
5983 | */ | |
5984 | rc = sis_init_base_struct_addr(ctrl_info); | |
5985 | if (rc) { | |
5986 | dev_err(&ctrl_info->pci_dev->dev, | |
5987 | "error initializing PQI mode\n"); | |
5988 | return rc; | |
5989 | } | |
5990 | ||
5991 | /* Wait for the controller to complete the SIS -> PQI transition. */ | |
5992 | rc = pqi_wait_for_pqi_mode_ready(ctrl_info); | |
5993 | if (rc) { | |
5994 | dev_err(&ctrl_info->pci_dev->dev, | |
5995 | "transition to PQI mode failed\n"); | |
5996 | return rc; | |
5997 | } | |
5998 | ||
5999 | /* From here on, we are running in PQI mode. */ | |
6000 | ctrl_info->pqi_mode_enabled = true; | |
ff6abb73 | 6001 | pqi_save_ctrl_mode(ctrl_info, PQI_MODE); |
6c223761 | 6002 | |
98f87667 KB |
6003 | rc = pqi_process_config_table(ctrl_info); |
6004 | if (rc) | |
6005 | return rc; | |
6006 | ||
6c223761 KB |
6007 | rc = pqi_alloc_admin_queues(ctrl_info); |
6008 | if (rc) { | |
6009 | dev_err(&ctrl_info->pci_dev->dev, | |
d87d5474 | 6010 | "failed to allocate admin queues\n"); |
6c223761 KB |
6011 | return rc; |
6012 | } | |
6013 | ||
6014 | rc = pqi_create_admin_queues(ctrl_info); | |
6015 | if (rc) { | |
6016 | dev_err(&ctrl_info->pci_dev->dev, | |
6017 | "error creating admin queues\n"); | |
6018 | return rc; | |
6019 | } | |
6020 | ||
6021 | rc = pqi_report_device_capability(ctrl_info); | |
6022 | if (rc) { | |
6023 | dev_err(&ctrl_info->pci_dev->dev, | |
6024 | "obtaining device capability failed\n"); | |
6025 | return rc; | |
6026 | } | |
6027 | ||
6028 | rc = pqi_validate_device_capability(ctrl_info); | |
6029 | if (rc) | |
6030 | return rc; | |
6031 | ||
6032 | pqi_calculate_queue_resources(ctrl_info); | |
6033 | ||
6034 | rc = pqi_enable_msix_interrupts(ctrl_info); | |
6035 | if (rc) | |
6036 | return rc; | |
6037 | ||
6038 | if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { | |
6039 | ctrl_info->max_msix_vectors = | |
6040 | ctrl_info->num_msix_vectors_enabled; | |
6041 | pqi_calculate_queue_resources(ctrl_info); | |
6042 | } | |
6043 | ||
6044 | rc = pqi_alloc_io_resources(ctrl_info); | |
6045 | if (rc) | |
6046 | return rc; | |
6047 | ||
6048 | rc = pqi_alloc_operational_queues(ctrl_info); | |
d87d5474 KB |
6049 | if (rc) { |
6050 | dev_err(&ctrl_info->pci_dev->dev, | |
6051 | "failed to allocate operational queues\n"); | |
6c223761 | 6052 | return rc; |
d87d5474 | 6053 | } |
6c223761 KB |
6054 | |
6055 | pqi_init_operational_queues(ctrl_info); | |
6056 | ||
6057 | rc = pqi_request_irqs(ctrl_info); | |
6058 | if (rc) | |
6059 | return rc; | |
6060 | ||
6c223761 KB |
6061 | rc = pqi_create_queues(ctrl_info); |
6062 | if (rc) | |
6063 | return rc; | |
6064 | ||
061ef06a KB |
6065 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); |
6066 | ||
6067 | ctrl_info->controller_online = true; | |
6068 | pqi_start_heartbeat_timer(ctrl_info); | |
6c223761 | 6069 | |
6a50d6ad | 6070 | rc = pqi_enable_events(ctrl_info); |
6c223761 KB |
6071 | if (rc) { |
6072 | dev_err(&ctrl_info->pci_dev->dev, | |
6a50d6ad | 6073 | "error enabling events\n"); |
6c223761 KB |
6074 | return rc; |
6075 | } | |
6076 | ||
6c223761 KB |
6077 | /* Register with the SCSI subsystem. */ |
6078 | rc = pqi_register_scsi(ctrl_info); | |
6079 | if (rc) | |
6080 | return rc; | |
6081 | ||
6082 | rc = pqi_get_ctrl_firmware_version(ctrl_info); | |
6083 | if (rc) { | |
6084 | dev_err(&ctrl_info->pci_dev->dev, | |
6085 | "error obtaining firmware version\n"); | |
6086 | return rc; | |
6087 | } | |
6088 | ||
6089 | rc = pqi_write_driver_version_to_host_wellness(ctrl_info); | |
6090 | if (rc) { | |
6091 | dev_err(&ctrl_info->pci_dev->dev, | |
6092 | "error updating host wellness\n"); | |
6093 | return rc; | |
6094 | } | |
6095 | ||
6096 | pqi_schedule_update_time_worker(ctrl_info); | |
6097 | ||
6098 | pqi_scan_scsi_devices(ctrl_info); | |
6099 | ||
6100 | return 0; | |
6101 | } | |
6102 | ||
061ef06a KB |
6103 | #if defined(CONFIG_PM) |
6104 | ||
6105 | static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) | |
6106 | { | |
6107 | unsigned int i; | |
6108 | struct pqi_admin_queues *admin_queues; | |
6109 | struct pqi_event_queue *event_queue; | |
6110 | ||
6111 | admin_queues = &ctrl_info->admin_queues; | |
6112 | admin_queues->iq_pi_copy = 0; | |
6113 | admin_queues->oq_ci_copy = 0; | |
6114 | *admin_queues->oq_pi = 0; | |
6115 | ||
6116 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
6117 | ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; | |
6118 | ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; | |
6119 | ctrl_info->queue_groups[i].oq_ci_copy = 0; | |
6120 | ||
6121 | *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0; | |
6122 | *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0; | |
6123 | *ctrl_info->queue_groups[i].oq_pi = 0; | |
6124 | } | |
6125 | ||
6126 | event_queue = &ctrl_info->event_queue; | |
6127 | *event_queue->oq_pi = 0; | |
6128 | event_queue->oq_ci_copy = 0; | |
6129 | } | |
6130 | ||
6131 | static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) | |
6132 | { | |
6133 | int rc; | |
6134 | ||
6135 | rc = pqi_force_sis_mode(ctrl_info); | |
6136 | if (rc) | |
6137 | return rc; | |
6138 | ||
6139 | /* | |
6140 | * Wait until the controller is ready to start accepting SIS | |
6141 | * commands. | |
6142 | */ | |
6143 | rc = sis_wait_for_ctrl_ready_resume(ctrl_info); | |
6144 | if (rc) | |
6145 | return rc; | |
6146 | ||
6147 | /* | |
6148 | * If the function we are about to call succeeds, the | |
6149 | * controller will transition from legacy SIS mode | |
6150 | * into PQI mode. | |
6151 | */ | |
6152 | rc = sis_init_base_struct_addr(ctrl_info); | |
6153 | if (rc) { | |
6154 | dev_err(&ctrl_info->pci_dev->dev, | |
6155 | "error initializing PQI mode\n"); | |
6156 | return rc; | |
6157 | } | |
6158 | ||
6159 | /* Wait for the controller to complete the SIS -> PQI transition. */ | |
6160 | rc = pqi_wait_for_pqi_mode_ready(ctrl_info); | |
6161 | if (rc) { | |
6162 | dev_err(&ctrl_info->pci_dev->dev, | |
6163 | "transition to PQI mode failed\n"); | |
6164 | return rc; | |
6165 | } | |
6166 | ||
6167 | /* From here on, we are running in PQI mode. */ | |
6168 | ctrl_info->pqi_mode_enabled = true; | |
6169 | pqi_save_ctrl_mode(ctrl_info, PQI_MODE); | |
6170 | ||
6171 | pqi_reinit_queues(ctrl_info); | |
6172 | ||
6173 | rc = pqi_create_admin_queues(ctrl_info); | |
6174 | if (rc) { | |
6175 | dev_err(&ctrl_info->pci_dev->dev, | |
6176 | "error creating admin queues\n"); | |
6177 | return rc; | |
6178 | } | |
6179 | ||
6180 | rc = pqi_create_queues(ctrl_info); | |
6181 | if (rc) | |
6182 | return rc; | |
6183 | ||
6184 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); | |
6185 | ||
6186 | ctrl_info->controller_online = true; | |
6187 | pqi_start_heartbeat_timer(ctrl_info); | |
6188 | pqi_ctrl_unblock_requests(ctrl_info); | |
6189 | ||
6190 | rc = pqi_enable_events(ctrl_info); | |
6191 | if (rc) { | |
6192 | dev_err(&ctrl_info->pci_dev->dev, | |
d87d5474 | 6193 | "error enabling events\n"); |
061ef06a KB |
6194 | return rc; |
6195 | } | |
6196 | ||
6197 | rc = pqi_write_driver_version_to_host_wellness(ctrl_info); | |
6198 | if (rc) { | |
6199 | dev_err(&ctrl_info->pci_dev->dev, | |
6200 | "error updating host wellness\n"); | |
6201 | return rc; | |
6202 | } | |
6203 | ||
6204 | pqi_schedule_update_time_worker(ctrl_info); | |
6205 | ||
6206 | pqi_scan_scsi_devices(ctrl_info); | |
6207 | ||
6208 | return 0; | |
6209 | } | |
6210 | ||
6211 | #endif /* CONFIG_PM */ | |
6212 | ||
a81ed5f3 KB |
6213 | static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, |
6214 | u16 timeout) | |
6215 | { | |
6216 | return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, | |
6217 | PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); | |
6218 | } | |
6219 | ||
6c223761 KB |
6220 | static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) |
6221 | { | |
6222 | int rc; | |
6223 | u64 mask; | |
6224 | ||
6225 | rc = pci_enable_device(ctrl_info->pci_dev); | |
6226 | if (rc) { | |
6227 | dev_err(&ctrl_info->pci_dev->dev, | |
6228 | "failed to enable PCI device\n"); | |
6229 | return rc; | |
6230 | } | |
6231 | ||
6232 | if (sizeof(dma_addr_t) > 4) | |
6233 | mask = DMA_BIT_MASK(64); | |
6234 | else | |
6235 | mask = DMA_BIT_MASK(32); | |
6236 | ||
6237 | rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); | |
6238 | if (rc) { | |
6239 | dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); | |
6240 | goto disable_device; | |
6241 | } | |
6242 | ||
6243 | rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); | |
6244 | if (rc) { | |
6245 | dev_err(&ctrl_info->pci_dev->dev, | |
6246 | "failed to obtain PCI resources\n"); | |
6247 | goto disable_device; | |
6248 | } | |
6249 | ||
6250 | ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( | |
6251 | ctrl_info->pci_dev, 0), | |
6252 | sizeof(struct pqi_ctrl_registers)); | |
6253 | if (!ctrl_info->iomem_base) { | |
6254 | dev_err(&ctrl_info->pci_dev->dev, | |
6255 | "failed to map memory for controller registers\n"); | |
6256 | rc = -ENOMEM; | |
6257 | goto release_regions; | |
6258 | } | |
6259 | ||
a81ed5f3 KB |
6260 | #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 |
6261 | ||
6262 | /* Increase the PCIe completion timeout. */ | |
6263 | rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, | |
6264 | PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); | |
6265 | if (rc) { | |
6266 | dev_err(&ctrl_info->pci_dev->dev, | |
6267 | "failed to set PCIe completion timeout\n"); | |
6268 | goto release_regions; | |
6269 | } | |
6270 | ||
6c223761 KB |
6271 | /* Enable bus mastering. */ |
6272 | pci_set_master(ctrl_info->pci_dev); | |
6273 | ||
cbe0c7b1 KB |
6274 | ctrl_info->registers = ctrl_info->iomem_base; |
6275 | ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; | |
6276 | ||
6c223761 KB |
6277 | pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); |
6278 | ||
6279 | return 0; | |
6280 | ||
6281 | release_regions: | |
6282 | pci_release_regions(ctrl_info->pci_dev); | |
6283 | disable_device: | |
6284 | pci_disable_device(ctrl_info->pci_dev); | |
6285 | ||
6286 | return rc; | |
6287 | } | |
6288 | ||
6289 | static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) | |
6290 | { | |
6291 | iounmap(ctrl_info->iomem_base); | |
6292 | pci_release_regions(ctrl_info->pci_dev); | |
cbe0c7b1 KB |
6293 | if (pci_is_enabled(ctrl_info->pci_dev)) |
6294 | pci_disable_device(ctrl_info->pci_dev); | |
6c223761 KB |
6295 | pci_set_drvdata(ctrl_info->pci_dev, NULL); |
6296 | } | |
6297 | ||
6298 | static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) | |
6299 | { | |
6300 | struct pqi_ctrl_info *ctrl_info; | |
6301 | ||
6302 | ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), | |
6303 | GFP_KERNEL, numa_node); | |
6304 | if (!ctrl_info) | |
6305 | return NULL; | |
6306 | ||
6307 | mutex_init(&ctrl_info->scan_mutex); | |
7561a7e4 | 6308 | mutex_init(&ctrl_info->lun_reset_mutex); |
6c223761 KB |
6309 | |
6310 | INIT_LIST_HEAD(&ctrl_info->scsi_device_list); | |
6311 | spin_lock_init(&ctrl_info->scsi_device_list_lock); | |
6312 | ||
6313 | INIT_WORK(&ctrl_info->event_work, pqi_event_worker); | |
6314 | atomic_set(&ctrl_info->num_interrupts, 0); | |
6315 | ||
6316 | INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); | |
6317 | INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); | |
6318 | ||
98f87667 | 6319 | init_timer(&ctrl_info->heartbeat_timer); |
5f310425 | 6320 | INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); |
98f87667 | 6321 | |
6c223761 KB |
6322 | sema_init(&ctrl_info->sync_request_sem, |
6323 | PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); | |
7561a7e4 | 6324 | init_waitqueue_head(&ctrl_info->block_requests_wait); |
6c223761 | 6325 | |
376fb880 KB |
6326 | INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); |
6327 | spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); | |
6328 | INIT_WORK(&ctrl_info->raid_bypass_retry_work, | |
6329 | pqi_raid_bypass_retry_worker); | |
6330 | ||
6c223761 | 6331 | ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; |
061ef06a | 6332 | ctrl_info->irq_mode = IRQ_MODE_NONE; |
6c223761 KB |
6333 | ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; |
6334 | ||
6335 | return ctrl_info; | |
6336 | } | |
6337 | ||
6338 | static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) | |
6339 | { | |
6340 | kfree(ctrl_info); | |
6341 | } | |
6342 | ||
6343 | static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) | |
6344 | { | |
98bf061b KB |
6345 | pqi_free_irqs(ctrl_info); |
6346 | pqi_disable_msix_interrupts(ctrl_info); | |
6c223761 KB |
6347 | } |
6348 | ||
6349 | static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) | |
6350 | { | |
6351 | pqi_stop_heartbeat_timer(ctrl_info); | |
6352 | pqi_free_interrupts(ctrl_info); | |
6353 | if (ctrl_info->queue_memory_base) | |
6354 | dma_free_coherent(&ctrl_info->pci_dev->dev, | |
6355 | ctrl_info->queue_memory_length, | |
6356 | ctrl_info->queue_memory_base, | |
6357 | ctrl_info->queue_memory_base_dma_handle); | |
6358 | if (ctrl_info->admin_queue_memory_base) | |
6359 | dma_free_coherent(&ctrl_info->pci_dev->dev, | |
6360 | ctrl_info->admin_queue_memory_length, | |
6361 | ctrl_info->admin_queue_memory_base, | |
6362 | ctrl_info->admin_queue_memory_base_dma_handle); | |
6363 | pqi_free_all_io_requests(ctrl_info); | |
6364 | if (ctrl_info->error_buffer) | |
6365 | dma_free_coherent(&ctrl_info->pci_dev->dev, | |
6366 | ctrl_info->error_buffer_length, | |
6367 | ctrl_info->error_buffer, | |
6368 | ctrl_info->error_buffer_dma_handle); | |
6369 | if (ctrl_info->iomem_base) | |
6370 | pqi_cleanup_pci_init(ctrl_info); | |
6371 | pqi_free_ctrl_info(ctrl_info); | |
6372 | } | |
6373 | ||
6374 | static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) | |
6375 | { | |
061ef06a KB |
6376 | pqi_cancel_rescan_worker(ctrl_info); |
6377 | pqi_cancel_update_time_worker(ctrl_info); | |
e57a1f9b KB |
6378 | pqi_remove_all_scsi_devices(ctrl_info); |
6379 | pqi_unregister_scsi(ctrl_info); | |
162d7753 KB |
6380 | if (ctrl_info->pqi_mode_enabled) |
6381 | pqi_revert_to_sis_mode(ctrl_info); | |
6c223761 KB |
6382 | pqi_free_ctrl_resources(ctrl_info); |
6383 | } | |
6384 | ||
3c50976f KB |
6385 | static void pqi_perform_lockup_action(void) |
6386 | { | |
6387 | switch (pqi_lockup_action) { | |
6388 | case PANIC: | |
6389 | panic("FATAL: Smart Family Controller lockup detected"); | |
6390 | break; | |
6391 | case REBOOT: | |
6392 | emergency_restart(); | |
6393 | break; | |
6394 | case NONE: | |
6395 | default: | |
6396 | break; | |
6397 | } | |
6398 | } | |
6399 | ||
5f310425 KB |
6400 | static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { |
6401 | .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, | |
6402 | .status = SAM_STAT_CHECK_CONDITION, | |
6403 | }; | |
6404 | ||
6405 | static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) | |
376fb880 KB |
6406 | { |
6407 | unsigned int i; | |
376fb880 | 6408 | struct pqi_io_request *io_request; |
376fb880 KB |
6409 | struct scsi_cmnd *scmd; |
6410 | ||
5f310425 KB |
6411 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
6412 | io_request = &ctrl_info->io_request_pool[i]; | |
6413 | if (atomic_read(&io_request->refcount) == 0) | |
6414 | continue; | |
376fb880 | 6415 | |
5f310425 KB |
6416 | scmd = io_request->scmd; |
6417 | if (scmd) { | |
6418 | set_host_byte(scmd, DID_NO_CONNECT); | |
6419 | } else { | |
6420 | io_request->status = -ENXIO; | |
6421 | io_request->error_info = | |
6422 | &pqi_ctrl_offline_raid_error_info; | |
376fb880 | 6423 | } |
5f310425 KB |
6424 | |
6425 | io_request->io_complete_callback(io_request, | |
6426 | io_request->context); | |
376fb880 KB |
6427 | } |
6428 | } | |
6429 | ||
5f310425 | 6430 | static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) |
376fb880 | 6431 | { |
5f310425 KB |
6432 | pqi_perform_lockup_action(); |
6433 | pqi_stop_heartbeat_timer(ctrl_info); | |
6434 | pqi_free_interrupts(ctrl_info); | |
6435 | pqi_cancel_rescan_worker(ctrl_info); | |
6436 | pqi_cancel_update_time_worker(ctrl_info); | |
6437 | pqi_ctrl_wait_until_quiesced(ctrl_info); | |
6438 | pqi_fail_all_outstanding_requests(ctrl_info); | |
6439 | pqi_clear_all_queued_raid_bypass_retries(ctrl_info); | |
6440 | pqi_ctrl_unblock_requests(ctrl_info); | |
6441 | } | |
6442 | ||
6443 | static void pqi_ctrl_offline_worker(struct work_struct *work) | |
6444 | { | |
6445 | struct pqi_ctrl_info *ctrl_info; | |
6446 | ||
6447 | ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); | |
6448 | pqi_take_ctrl_offline_deferred(ctrl_info); | |
376fb880 KB |
6449 | } |
6450 | ||
6451 | static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) | |
6452 | { | |
5f310425 KB |
6453 | if (!ctrl_info->controller_online) |
6454 | return; | |
6455 | ||
376fb880 | 6456 | ctrl_info->controller_online = false; |
5f310425 KB |
6457 | ctrl_info->pqi_mode_enabled = false; |
6458 | pqi_ctrl_block_requests(ctrl_info); | |
376fb880 KB |
6459 | sis_shutdown_ctrl(ctrl_info); |
6460 | pci_disable_device(ctrl_info->pci_dev); | |
6461 | dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); | |
5f310425 | 6462 | schedule_work(&ctrl_info->ctrl_offline_work); |
376fb880 KB |
6463 | } |
6464 | ||
d91d7820 | 6465 | static void pqi_print_ctrl_info(struct pci_dev *pci_dev, |
6c223761 KB |
6466 | const struct pci_device_id *id) |
6467 | { | |
6468 | char *ctrl_description; | |
6469 | ||
6470 | if (id->driver_data) { | |
6471 | ctrl_description = (char *)id->driver_data; | |
6472 | } else { | |
6473 | switch (id->subvendor) { | |
6474 | case PCI_VENDOR_ID_HP: | |
6475 | ctrl_description = hpe_branded_controller; | |
6476 | break; | |
6477 | case PCI_VENDOR_ID_ADAPTEC2: | |
6478 | default: | |
6479 | ctrl_description = microsemi_branded_controller; | |
6480 | break; | |
6481 | } | |
6482 | } | |
6483 | ||
d91d7820 | 6484 | dev_info(&pci_dev->dev, "%s found\n", ctrl_description); |
6c223761 KB |
6485 | } |
6486 | ||
d91d7820 KB |
6487 | static int pqi_pci_probe(struct pci_dev *pci_dev, |
6488 | const struct pci_device_id *id) | |
6c223761 KB |
6489 | { |
6490 | int rc; | |
6491 | int node; | |
6492 | struct pqi_ctrl_info *ctrl_info; | |
6493 | ||
d91d7820 | 6494 | pqi_print_ctrl_info(pci_dev, id); |
6c223761 KB |
6495 | |
6496 | if (pqi_disable_device_id_wildcards && | |
6497 | id->subvendor == PCI_ANY_ID && | |
6498 | id->subdevice == PCI_ANY_ID) { | |
d91d7820 | 6499 | dev_warn(&pci_dev->dev, |
6c223761 KB |
6500 | "controller not probed because device ID wildcards are disabled\n"); |
6501 | return -ENODEV; | |
6502 | } | |
6503 | ||
6504 | if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) | |
d91d7820 | 6505 | dev_warn(&pci_dev->dev, |
6c223761 KB |
6506 | "controller device ID matched using wildcards\n"); |
6507 | ||
d91d7820 | 6508 | node = dev_to_node(&pci_dev->dev); |
6c223761 | 6509 | if (node == NUMA_NO_NODE) |
d91d7820 | 6510 | set_dev_node(&pci_dev->dev, 0); |
6c223761 KB |
6511 | |
6512 | ctrl_info = pqi_alloc_ctrl_info(node); | |
6513 | if (!ctrl_info) { | |
d91d7820 | 6514 | dev_err(&pci_dev->dev, |
6c223761 KB |
6515 | "failed to allocate controller info block\n"); |
6516 | return -ENOMEM; | |
6517 | } | |
6518 | ||
d91d7820 | 6519 | ctrl_info->pci_dev = pci_dev; |
6c223761 KB |
6520 | |
6521 | rc = pqi_pci_init(ctrl_info); | |
6522 | if (rc) | |
6523 | goto error; | |
6524 | ||
6525 | rc = pqi_ctrl_init(ctrl_info); | |
6526 | if (rc) | |
6527 | goto error; | |
6528 | ||
6529 | return 0; | |
6530 | ||
6531 | error: | |
6532 | pqi_remove_ctrl(ctrl_info); | |
6533 | ||
6534 | return rc; | |
6535 | } | |
6536 | ||
d91d7820 | 6537 | static void pqi_pci_remove(struct pci_dev *pci_dev) |
6c223761 KB |
6538 | { |
6539 | struct pqi_ctrl_info *ctrl_info; | |
6540 | ||
d91d7820 | 6541 | ctrl_info = pci_get_drvdata(pci_dev); |
6c223761 KB |
6542 | if (!ctrl_info) |
6543 | return; | |
6544 | ||
6545 | pqi_remove_ctrl(ctrl_info); | |
6546 | } | |
6547 | ||
d91d7820 | 6548 | static void pqi_shutdown(struct pci_dev *pci_dev) |
6c223761 KB |
6549 | { |
6550 | int rc; | |
6551 | struct pqi_ctrl_info *ctrl_info; | |
6552 | ||
d91d7820 | 6553 | ctrl_info = pci_get_drvdata(pci_dev); |
6c223761 KB |
6554 | if (!ctrl_info) |
6555 | goto error; | |
6556 | ||
6557 | /* | |
6558 | * Write all data in the controller's battery-backed cache to | |
6559 | * storage. | |
6560 | */ | |
6561 | rc = pqi_flush_cache(ctrl_info); | |
6562 | if (rc == 0) | |
6563 | return; | |
6564 | ||
6565 | error: | |
d91d7820 | 6566 | dev_warn(&pci_dev->dev, |
6c223761 KB |
6567 | "unable to flush controller cache\n"); |
6568 | } | |
6569 | ||
3c50976f KB |
6570 | static void pqi_process_lockup_action_param(void) |
6571 | { | |
6572 | unsigned int i; | |
6573 | ||
6574 | if (!pqi_lockup_action_param) | |
6575 | return; | |
6576 | ||
6577 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { | |
6578 | if (strcmp(pqi_lockup_action_param, | |
6579 | pqi_lockup_actions[i].name) == 0) { | |
6580 | pqi_lockup_action = pqi_lockup_actions[i].action; | |
6581 | return; | |
6582 | } | |
6583 | } | |
6584 | ||
6585 | pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", | |
6586 | DRIVER_NAME_SHORT, pqi_lockup_action_param); | |
6587 | } | |
6588 | ||
6589 | static void pqi_process_module_params(void) | |
6590 | { | |
6591 | pqi_process_lockup_action_param(); | |
6592 | } | |
6593 | ||
061ef06a KB |
6594 | #if defined(CONFIG_PM) |
6595 | ||
6596 | static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) | |
6597 | { | |
6598 | struct pqi_ctrl_info *ctrl_info; | |
6599 | ||
6600 | ctrl_info = pci_get_drvdata(pci_dev); | |
6601 | ||
6602 | pqi_disable_events(ctrl_info); | |
6603 | pqi_cancel_update_time_worker(ctrl_info); | |
6604 | pqi_cancel_rescan_worker(ctrl_info); | |
6605 | pqi_wait_until_scan_finished(ctrl_info); | |
6606 | pqi_wait_until_lun_reset_finished(ctrl_info); | |
6607 | pqi_flush_cache(ctrl_info); | |
6608 | pqi_ctrl_block_requests(ctrl_info); | |
6609 | pqi_ctrl_wait_until_quiesced(ctrl_info); | |
6610 | pqi_wait_until_inbound_queues_empty(ctrl_info); | |
6611 | pqi_ctrl_wait_for_pending_io(ctrl_info); | |
6612 | pqi_stop_heartbeat_timer(ctrl_info); | |
6613 | ||
6614 | if (state.event == PM_EVENT_FREEZE) | |
6615 | return 0; | |
6616 | ||
6617 | pci_save_state(pci_dev); | |
6618 | pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); | |
6619 | ||
6620 | ctrl_info->controller_online = false; | |
6621 | ctrl_info->pqi_mode_enabled = false; | |
6622 | ||
6623 | return 0; | |
6624 | } | |
6625 | ||
6626 | static int pqi_resume(struct pci_dev *pci_dev) | |
6627 | { | |
6628 | int rc; | |
6629 | struct pqi_ctrl_info *ctrl_info; | |
6630 | ||
6631 | ctrl_info = pci_get_drvdata(pci_dev); | |
6632 | ||
6633 | if (pci_dev->current_state != PCI_D0) { | |
6634 | ctrl_info->max_hw_queue_index = 0; | |
6635 | pqi_free_interrupts(ctrl_info); | |
6636 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); | |
6637 | rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, | |
6638 | IRQF_SHARED, DRIVER_NAME_SHORT, | |
6639 | &ctrl_info->queue_groups[0]); | |
6640 | if (rc) { | |
6641 | dev_err(&ctrl_info->pci_dev->dev, | |
6642 | "irq %u init failed with error %d\n", | |
6643 | pci_dev->irq, rc); | |
6644 | return rc; | |
6645 | } | |
6646 | pqi_start_heartbeat_timer(ctrl_info); | |
6647 | pqi_ctrl_unblock_requests(ctrl_info); | |
6648 | return 0; | |
6649 | } | |
6650 | ||
6651 | pci_set_power_state(pci_dev, PCI_D0); | |
6652 | pci_restore_state(pci_dev); | |
6653 | ||
6654 | return pqi_ctrl_init_resume(ctrl_info); | |
6655 | } | |
6656 | ||
6657 | #endif /* CONFIG_PM */ | |
6658 | ||
6c223761 KB |
6659 | /* Define the PCI IDs for the controllers that we support. */ |
6660 | static const struct pci_device_id pqi_pci_id_table[] = { | |
7eddabff KB |
6661 | { |
6662 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6663 | 0x152d, 0x8a22) | |
6664 | }, | |
6665 | { | |
6666 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6667 | 0x152d, 0x8a23) | |
6668 | }, | |
6669 | { | |
6670 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6671 | 0x152d, 0x8a24) | |
6672 | }, | |
6673 | { | |
6674 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6675 | 0x152d, 0x8a36) | |
6676 | }, | |
6677 | { | |
6678 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6679 | 0x152d, 0x8a37) | |
6680 | }, | |
6c223761 KB |
6681 | { |
6682 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6683 | PCI_VENDOR_ID_ADAPTEC2, 0x0110) | |
6684 | }, | |
6685 | { | |
6686 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6687 | PCI_VENDOR_ID_ADAPTEC2, 0x0605) |
6c223761 KB |
6688 | }, |
6689 | { | |
6690 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6691 | PCI_VENDOR_ID_ADAPTEC2, 0x0800) |
6c223761 KB |
6692 | }, |
6693 | { | |
6694 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6695 | PCI_VENDOR_ID_ADAPTEC2, 0x0801) |
6c223761 KB |
6696 | }, |
6697 | { | |
6698 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6699 | PCI_VENDOR_ID_ADAPTEC2, 0x0802) |
6c223761 KB |
6700 | }, |
6701 | { | |
6702 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6703 | PCI_VENDOR_ID_ADAPTEC2, 0x0803) |
6c223761 KB |
6704 | }, |
6705 | { | |
6706 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6707 | PCI_VENDOR_ID_ADAPTEC2, 0x0804) |
6c223761 KB |
6708 | }, |
6709 | { | |
6710 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6711 | PCI_VENDOR_ID_ADAPTEC2, 0x0805) |
6c223761 KB |
6712 | }, |
6713 | { | |
6714 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6715 | PCI_VENDOR_ID_ADAPTEC2, 0x0806) |
6c223761 KB |
6716 | }, |
6717 | { | |
6718 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6719 | PCI_VENDOR_ID_ADAPTEC2, 0x0900) |
6c223761 KB |
6720 | }, |
6721 | { | |
6722 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6723 | PCI_VENDOR_ID_ADAPTEC2, 0x0901) |
6c223761 KB |
6724 | }, |
6725 | { | |
6726 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6727 | PCI_VENDOR_ID_ADAPTEC2, 0x0902) |
6c223761 KB |
6728 | }, |
6729 | { | |
6730 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6731 | PCI_VENDOR_ID_ADAPTEC2, 0x0903) |
6c223761 KB |
6732 | }, |
6733 | { | |
6734 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6735 | PCI_VENDOR_ID_ADAPTEC2, 0x0904) |
6c223761 KB |
6736 | }, |
6737 | { | |
6738 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6739 | PCI_VENDOR_ID_ADAPTEC2, 0x0905) |
6c223761 KB |
6740 | }, |
6741 | { | |
6742 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6743 | PCI_VENDOR_ID_ADAPTEC2, 0x0906) |
6c223761 KB |
6744 | }, |
6745 | { | |
6746 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6747 | PCI_VENDOR_ID_ADAPTEC2, 0x0907) |
6c223761 KB |
6748 | }, |
6749 | { | |
6750 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6751 | PCI_VENDOR_ID_ADAPTEC2, 0x0908) |
6c223761 KB |
6752 | }, |
6753 | { | |
6754 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6755 | PCI_VENDOR_ID_ADAPTEC2, 0x1200) |
6c223761 KB |
6756 | }, |
6757 | { | |
6758 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6759 | PCI_VENDOR_ID_ADAPTEC2, 0x1201) |
6c223761 KB |
6760 | }, |
6761 | { | |
6762 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6763 | PCI_VENDOR_ID_ADAPTEC2, 0x1202) |
6c223761 KB |
6764 | }, |
6765 | { | |
6766 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6767 | PCI_VENDOR_ID_ADAPTEC2, 0x1280) |
6c223761 KB |
6768 | }, |
6769 | { | |
6770 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6771 | PCI_VENDOR_ID_ADAPTEC2, 0x1281) |
6c223761 KB |
6772 | }, |
6773 | { | |
6774 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6775 | PCI_VENDOR_ID_ADAPTEC2, 0x1300) |
6c223761 KB |
6776 | }, |
6777 | { | |
6778 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 6779 | PCI_VENDOR_ID_ADAPTEC2, 0x1301) |
6c223761 KB |
6780 | }, |
6781 | { | |
6782 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff KB |
6783 | PCI_VENDOR_ID_ADAPTEC2, 0x1380) |
6784 | }, | |
6785 | { | |
6786 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6787 | PCI_VENDOR_ID_HP, 0x0600) | |
6788 | }, | |
6789 | { | |
6790 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6791 | PCI_VENDOR_ID_HP, 0x0601) | |
6792 | }, | |
6793 | { | |
6794 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6795 | PCI_VENDOR_ID_HP, 0x0602) | |
6796 | }, | |
6797 | { | |
6798 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6799 | PCI_VENDOR_ID_HP, 0x0603) | |
6800 | }, | |
6801 | { | |
6802 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6803 | PCI_VENDOR_ID_HP, 0x0604) | |
6804 | }, | |
6805 | { | |
6806 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6807 | PCI_VENDOR_ID_HP, 0x0606) | |
6808 | }, | |
6809 | { | |
6810 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6811 | PCI_VENDOR_ID_HP, 0x0650) | |
6812 | }, | |
6813 | { | |
6814 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6815 | PCI_VENDOR_ID_HP, 0x0651) | |
6816 | }, | |
6817 | { | |
6818 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6819 | PCI_VENDOR_ID_HP, 0x0652) | |
6820 | }, | |
6821 | { | |
6822 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6823 | PCI_VENDOR_ID_HP, 0x0653) | |
6824 | }, | |
6825 | { | |
6826 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6827 | PCI_VENDOR_ID_HP, 0x0654) | |
6828 | }, | |
6829 | { | |
6830 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6831 | PCI_VENDOR_ID_HP, 0x0655) | |
6832 | }, | |
6833 | { | |
6834 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6835 | PCI_VENDOR_ID_HP, 0x0656) | |
6836 | }, | |
6837 | { | |
6838 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6839 | PCI_VENDOR_ID_HP, 0x0657) | |
6840 | }, | |
6841 | { | |
6842 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6843 | PCI_VENDOR_ID_HP, 0x0700) | |
6844 | }, | |
6845 | { | |
6846 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6847 | PCI_VENDOR_ID_HP, 0x0701) | |
6c223761 KB |
6848 | }, |
6849 | { | |
6850 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6851 | PCI_VENDOR_ID_HP, 0x1001) | |
6852 | }, | |
6853 | { | |
6854 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6855 | PCI_VENDOR_ID_HP, 0x1100) | |
6856 | }, | |
6857 | { | |
6858 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6859 | PCI_VENDOR_ID_HP, 0x1101) | |
6860 | }, | |
6861 | { | |
6862 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6863 | PCI_VENDOR_ID_HP, 0x1102) | |
6864 | }, | |
6865 | { | |
6866 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6867 | PCI_VENDOR_ID_HP, 0x1150) | |
6868 | }, | |
6869 | { | |
6870 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
6871 | PCI_ANY_ID, PCI_ANY_ID) | |
6872 | }, | |
6873 | { 0 } | |
6874 | }; | |
6875 | ||
6876 | MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); | |
6877 | ||
6878 | static struct pci_driver pqi_pci_driver = { | |
6879 | .name = DRIVER_NAME_SHORT, | |
6880 | .id_table = pqi_pci_id_table, | |
6881 | .probe = pqi_pci_probe, | |
6882 | .remove = pqi_pci_remove, | |
6883 | .shutdown = pqi_shutdown, | |
061ef06a KB |
6884 | #if defined(CONFIG_PM) |
6885 | .suspend = pqi_suspend, | |
6886 | .resume = pqi_resume, | |
6887 | #endif | |
6c223761 KB |
6888 | }; |
6889 | ||
6890 | static int __init pqi_init(void) | |
6891 | { | |
6892 | int rc; | |
6893 | ||
6894 | pr_info(DRIVER_NAME "\n"); | |
6895 | ||
6896 | pqi_sas_transport_template = | |
6897 | sas_attach_transport(&pqi_sas_transport_functions); | |
6898 | if (!pqi_sas_transport_template) | |
6899 | return -ENODEV; | |
6900 | ||
3c50976f KB |
6901 | pqi_process_module_params(); |
6902 | ||
6c223761 KB |
6903 | rc = pci_register_driver(&pqi_pci_driver); |
6904 | if (rc) | |
6905 | sas_release_transport(pqi_sas_transport_template); | |
6906 | ||
6907 | return rc; | |
6908 | } | |
6909 | ||
6910 | static void __exit pqi_cleanup(void) | |
6911 | { | |
6912 | pci_unregister_driver(&pqi_pci_driver); | |
6913 | sas_release_transport(pqi_sas_transport_template); | |
6914 | } | |
6915 | ||
6916 | module_init(pqi_init); | |
6917 | module_exit(pqi_cleanup); | |
6918 | ||
6919 | static void __attribute__((unused)) verify_structures(void) | |
6920 | { | |
6921 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
6922 | sis_host_to_ctrl_doorbell) != 0x20); | |
6923 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
6924 | sis_interrupt_mask) != 0x34); | |
6925 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
6926 | sis_ctrl_to_host_doorbell) != 0x9c); | |
6927 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
6928 | sis_ctrl_to_host_doorbell_clear) != 0xa0); | |
ff6abb73 KB |
6929 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
6930 | sis_driver_scratch) != 0xb0); | |
6c223761 KB |
6931 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
6932 | sis_firmware_status) != 0xbc); | |
6933 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
6934 | sis_mailbox) != 0x1000); | |
6935 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
6936 | pqi_registers) != 0x4000); | |
6937 | ||
6938 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, | |
6939 | iu_type) != 0x0); | |
6940 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, | |
6941 | iu_length) != 0x2); | |
6942 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, | |
6943 | response_queue_id) != 0x4); | |
6944 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, | |
6945 | work_area) != 0x6); | |
6946 | BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); | |
6947 | ||
6948 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
6949 | status) != 0x0); | |
6950 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
6951 | service_response) != 0x1); | |
6952 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
6953 | data_present) != 0x2); | |
6954 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
6955 | reserved) != 0x3); | |
6956 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
6957 | residual_count) != 0x4); | |
6958 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
6959 | data_length) != 0x8); | |
6960 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
6961 | reserved1) != 0xa); | |
6962 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
6963 | data) != 0xc); | |
6964 | BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); | |
6965 | ||
6966 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6967 | data_in_result) != 0x0); | |
6968 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6969 | data_out_result) != 0x1); | |
6970 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6971 | reserved) != 0x2); | |
6972 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6973 | status) != 0x5); | |
6974 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6975 | status_qualifier) != 0x6); | |
6976 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6977 | sense_data_length) != 0x8); | |
6978 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6979 | response_data_length) != 0xa); | |
6980 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6981 | data_in_transferred) != 0xc); | |
6982 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6983 | data_out_transferred) != 0x10); | |
6984 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
6985 | data) != 0x14); | |
6986 | BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); | |
6987 | ||
6988 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
6989 | signature) != 0x0); | |
6990 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
6991 | function_and_status_code) != 0x8); | |
6992 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
6993 | max_admin_iq_elements) != 0x10); | |
6994 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
6995 | max_admin_oq_elements) != 0x11); | |
6996 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
6997 | admin_iq_element_length) != 0x12); | |
6998 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
6999 | admin_oq_element_length) != 0x13); | |
7000 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7001 | max_reset_timeout) != 0x14); | |
7002 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7003 | legacy_intx_status) != 0x18); | |
7004 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7005 | legacy_intx_mask_set) != 0x1c); | |
7006 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7007 | legacy_intx_mask_clear) != 0x20); | |
7008 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7009 | device_status) != 0x40); | |
7010 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7011 | admin_iq_pi_offset) != 0x48); | |
7012 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7013 | admin_oq_ci_offset) != 0x50); | |
7014 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7015 | admin_iq_element_array_addr) != 0x58); | |
7016 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7017 | admin_oq_element_array_addr) != 0x60); | |
7018 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7019 | admin_iq_ci_addr) != 0x68); | |
7020 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7021 | admin_oq_pi_addr) != 0x70); | |
7022 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7023 | admin_iq_num_elements) != 0x78); | |
7024 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7025 | admin_oq_num_elements) != 0x79); | |
7026 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7027 | admin_queue_int_msg_num) != 0x7a); | |
7028 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7029 | device_error) != 0x80); | |
7030 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7031 | error_details) != 0x88); | |
7032 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7033 | device_reset) != 0x90); | |
7034 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
7035 | power_action) != 0x94); | |
7036 | BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); | |
7037 | ||
7038 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7039 | header.iu_type) != 0); | |
7040 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7041 | header.iu_length) != 2); | |
7042 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7043 | header.work_area) != 6); | |
7044 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7045 | request_id) != 8); | |
7046 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7047 | function_code) != 10); | |
7048 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7049 | data.report_device_capability.buffer_length) != 44); | |
7050 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7051 | data.report_device_capability.sg_descriptor) != 48); | |
7052 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7053 | data.create_operational_iq.queue_id) != 12); | |
7054 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7055 | data.create_operational_iq.element_array_addr) != 16); | |
7056 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7057 | data.create_operational_iq.ci_addr) != 24); | |
7058 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7059 | data.create_operational_iq.num_elements) != 32); | |
7060 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7061 | data.create_operational_iq.element_length) != 34); | |
7062 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7063 | data.create_operational_iq.queue_protocol) != 36); | |
7064 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7065 | data.create_operational_oq.queue_id) != 12); | |
7066 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7067 | data.create_operational_oq.element_array_addr) != 16); | |
7068 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7069 | data.create_operational_oq.pi_addr) != 24); | |
7070 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7071 | data.create_operational_oq.num_elements) != 32); | |
7072 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7073 | data.create_operational_oq.element_length) != 34); | |
7074 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7075 | data.create_operational_oq.queue_protocol) != 36); | |
7076 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7077 | data.create_operational_oq.int_msg_num) != 40); | |
7078 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7079 | data.create_operational_oq.coalescing_count) != 42); | |
7080 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7081 | data.create_operational_oq.min_coalescing_time) != 44); | |
7082 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7083 | data.create_operational_oq.max_coalescing_time) != 48); | |
7084 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
7085 | data.delete_operational_queue.queue_id) != 12); | |
7086 | BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); | |
7087 | BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, | |
7088 | data.create_operational_iq) != 64 - 11); | |
7089 | BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, | |
7090 | data.create_operational_oq) != 64 - 11); | |
7091 | BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, | |
7092 | data.delete_operational_queue) != 64 - 11); | |
7093 | ||
7094 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7095 | header.iu_type) != 0); | |
7096 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7097 | header.iu_length) != 2); | |
7098 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7099 | header.work_area) != 6); | |
7100 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7101 | request_id) != 8); | |
7102 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7103 | function_code) != 10); | |
7104 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7105 | status) != 11); | |
7106 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7107 | data.create_operational_iq.status_descriptor) != 12); | |
7108 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7109 | data.create_operational_iq.iq_pi_offset) != 16); | |
7110 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7111 | data.create_operational_oq.status_descriptor) != 12); | |
7112 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
7113 | data.create_operational_oq.oq_ci_offset) != 16); | |
7114 | BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); | |
7115 | ||
7116 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7117 | header.iu_type) != 0); | |
7118 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7119 | header.iu_length) != 2); | |
7120 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7121 | header.response_queue_id) != 4); | |
7122 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7123 | header.work_area) != 6); | |
7124 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7125 | request_id) != 8); | |
7126 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7127 | nexus_id) != 10); | |
7128 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7129 | buffer_length) != 12); | |
7130 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7131 | lun_number) != 16); | |
7132 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7133 | protocol_specific) != 24); | |
7134 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7135 | error_index) != 27); | |
7136 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7137 | cdb) != 32); | |
7138 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
7139 | sg_descriptors) != 64); | |
7140 | BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != | |
7141 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
7142 | ||
7143 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7144 | header.iu_type) != 0); | |
7145 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7146 | header.iu_length) != 2); | |
7147 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7148 | header.response_queue_id) != 4); | |
7149 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7150 | header.work_area) != 6); | |
7151 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7152 | request_id) != 8); | |
7153 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7154 | nexus_id) != 12); | |
7155 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7156 | buffer_length) != 16); | |
7157 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7158 | data_encryption_key_index) != 22); | |
7159 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7160 | encrypt_tweak_lower) != 24); | |
7161 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7162 | encrypt_tweak_upper) != 28); | |
7163 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7164 | cdb) != 32); | |
7165 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7166 | error_index) != 48); | |
7167 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7168 | num_sg_descriptors) != 50); | |
7169 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7170 | cdb_length) != 51); | |
7171 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7172 | lun_number) != 52); | |
7173 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
7174 | sg_descriptors) != 64); | |
7175 | BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != | |
7176 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
7177 | ||
7178 | BUILD_BUG_ON(offsetof(struct pqi_io_response, | |
7179 | header.iu_type) != 0); | |
7180 | BUILD_BUG_ON(offsetof(struct pqi_io_response, | |
7181 | header.iu_length) != 2); | |
7182 | BUILD_BUG_ON(offsetof(struct pqi_io_response, | |
7183 | request_id) != 8); | |
7184 | BUILD_BUG_ON(offsetof(struct pqi_io_response, | |
7185 | error_index) != 10); | |
7186 | ||
7187 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
7188 | header.iu_type) != 0); | |
7189 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
7190 | header.iu_length) != 2); | |
7191 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
7192 | header.response_queue_id) != 4); | |
7193 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
7194 | request_id) != 8); | |
7195 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
7196 | data.report_event_configuration.buffer_length) != 12); | |
7197 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
7198 | data.report_event_configuration.sg_descriptors) != 16); | |
7199 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
7200 | data.set_event_configuration.global_event_oq_id) != 10); | |
7201 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
7202 | data.set_event_configuration.buffer_length) != 12); | |
7203 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
7204 | data.set_event_configuration.sg_descriptors) != 16); | |
7205 | ||
7206 | BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, | |
7207 | max_inbound_iu_length) != 6); | |
7208 | BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, | |
7209 | max_outbound_iu_length) != 14); | |
7210 | BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); | |
7211 | ||
7212 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7213 | data_length) != 0); | |
7214 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7215 | iq_arbitration_priority_support_bitmask) != 8); | |
7216 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7217 | maximum_aw_a) != 9); | |
7218 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7219 | maximum_aw_b) != 10); | |
7220 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7221 | maximum_aw_c) != 11); | |
7222 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7223 | max_inbound_queues) != 16); | |
7224 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7225 | max_elements_per_iq) != 18); | |
7226 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7227 | max_iq_element_length) != 24); | |
7228 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7229 | min_iq_element_length) != 26); | |
7230 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7231 | max_outbound_queues) != 30); | |
7232 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7233 | max_elements_per_oq) != 32); | |
7234 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7235 | intr_coalescing_time_granularity) != 34); | |
7236 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7237 | max_oq_element_length) != 36); | |
7238 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7239 | min_oq_element_length) != 38); | |
7240 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
7241 | iu_layer_descriptors) != 64); | |
7242 | BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); | |
7243 | ||
7244 | BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, | |
7245 | event_type) != 0); | |
7246 | BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, | |
7247 | oq_id) != 2); | |
7248 | BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); | |
7249 | ||
7250 | BUILD_BUG_ON(offsetof(struct pqi_event_config, | |
7251 | num_event_descriptors) != 2); | |
7252 | BUILD_BUG_ON(offsetof(struct pqi_event_config, | |
7253 | descriptors) != 4); | |
7254 | ||
061ef06a KB |
7255 | BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != |
7256 | ARRAY_SIZE(pqi_supported_event_types)); | |
7257 | ||
6c223761 KB |
7258 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
7259 | header.iu_type) != 0); | |
7260 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
7261 | header.iu_length) != 2); | |
7262 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
7263 | event_type) != 8); | |
7264 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
7265 | event_id) != 10); | |
7266 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
7267 | additional_event_id) != 12); | |
7268 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
7269 | data) != 16); | |
7270 | BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); | |
7271 | ||
7272 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
7273 | header.iu_type) != 0); | |
7274 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
7275 | header.iu_length) != 2); | |
7276 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
7277 | event_type) != 8); | |
7278 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
7279 | event_id) != 10); | |
7280 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
7281 | additional_event_id) != 12); | |
7282 | BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); | |
7283 | ||
7284 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
7285 | header.iu_type) != 0); | |
7286 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
7287 | header.iu_length) != 2); | |
7288 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
7289 | request_id) != 8); | |
7290 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
7291 | nexus_id) != 10); | |
7292 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
7293 | lun_number) != 16); | |
7294 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
7295 | protocol_specific) != 24); | |
7296 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
7297 | outbound_queue_id_to_manage) != 26); | |
7298 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
7299 | request_id_to_manage) != 28); | |
7300 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
7301 | task_management_function) != 30); | |
7302 | BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); | |
7303 | ||
7304 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
7305 | header.iu_type) != 0); | |
7306 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
7307 | header.iu_length) != 2); | |
7308 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
7309 | request_id) != 8); | |
7310 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
7311 | nexus_id) != 10); | |
7312 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
7313 | additional_response_info) != 12); | |
7314 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
7315 | response_code) != 15); | |
7316 | BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); | |
7317 | ||
7318 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
7319 | configured_logical_drive_count) != 0); | |
7320 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
7321 | configuration_signature) != 1); | |
7322 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
7323 | firmware_version) != 5); | |
7324 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
7325 | extended_logical_unit_count) != 154); | |
7326 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
7327 | firmware_build_number) != 190); | |
7328 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
7329 | controller_mode) != 292); | |
7330 | ||
1be42f46 KB |
7331 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
7332 | phys_bay_in_box) != 115); | |
7333 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
7334 | device_type) != 120); | |
7335 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
7336 | redundant_path_present_map) != 1736); | |
7337 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
7338 | active_path_number) != 1738); | |
7339 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
7340 | alternate_paths_phys_connector) != 1739); | |
7341 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
7342 | alternate_paths_phys_box_on_port) != 1755); | |
7343 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
7344 | current_queue_depth_limit) != 1796); | |
7345 | BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); | |
7346 | ||
6c223761 KB |
7347 | BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); |
7348 | BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); | |
7349 | BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % | |
7350 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); | |
7351 | BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % | |
7352 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); | |
7353 | BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); | |
7354 | BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % | |
7355 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); | |
7356 | BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); | |
7357 | BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % | |
7358 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); | |
7359 | ||
7360 | BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); | |
d727a776 KB |
7361 | BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= |
7362 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); | |
6c223761 | 7363 | } |