2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 16;
102 static unsigned int ipr_fast_reboot
;
103 static DEFINE_SPINLOCK(ipr_driver_lock
);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size
= 0x20,
114 .set_interrupt_mask_reg
= 0x0022C,
115 .clr_interrupt_mask_reg
= 0x00230,
116 .clr_interrupt_mask_reg32
= 0x00230,
117 .sense_interrupt_mask_reg
= 0x0022C,
118 .sense_interrupt_mask_reg32
= 0x0022C,
119 .clr_interrupt_reg
= 0x00228,
120 .clr_interrupt_reg32
= 0x00228,
121 .sense_interrupt_reg
= 0x00224,
122 .sense_interrupt_reg32
= 0x00224,
123 .ioarrin_reg
= 0x00404,
124 .sense_uproc_interrupt_reg
= 0x00214,
125 .sense_uproc_interrupt_reg32
= 0x00214,
126 .set_uproc_interrupt_reg
= 0x00214,
127 .set_uproc_interrupt_reg32
= 0x00214,
128 .clr_uproc_interrupt_reg
= 0x00218,
129 .clr_uproc_interrupt_reg32
= 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size
= 0x20,
139 .set_interrupt_mask_reg
= 0x00288,
140 .clr_interrupt_mask_reg
= 0x0028C,
141 .clr_interrupt_mask_reg32
= 0x0028C,
142 .sense_interrupt_mask_reg
= 0x00288,
143 .sense_interrupt_mask_reg32
= 0x00288,
144 .clr_interrupt_reg
= 0x00284,
145 .clr_interrupt_reg32
= 0x00284,
146 .sense_interrupt_reg
= 0x00280,
147 .sense_interrupt_reg32
= 0x00280,
148 .ioarrin_reg
= 0x00504,
149 .sense_uproc_interrupt_reg
= 0x00290,
150 .sense_uproc_interrupt_reg32
= 0x00290,
151 .set_uproc_interrupt_reg
= 0x00290,
152 .set_uproc_interrupt_reg32
= 0x00290,
153 .clr_uproc_interrupt_reg
= 0x00294,
154 .clr_uproc_interrupt_reg32
= 0x00294
160 .cache_line_size
= 0x20,
164 .set_interrupt_mask_reg
= 0x00010,
165 .clr_interrupt_mask_reg
= 0x00018,
166 .clr_interrupt_mask_reg32
= 0x0001C,
167 .sense_interrupt_mask_reg
= 0x00010,
168 .sense_interrupt_mask_reg32
= 0x00014,
169 .clr_interrupt_reg
= 0x00008,
170 .clr_interrupt_reg32
= 0x0000C,
171 .sense_interrupt_reg
= 0x00000,
172 .sense_interrupt_reg32
= 0x00004,
173 .ioarrin_reg
= 0x00070,
174 .sense_uproc_interrupt_reg
= 0x00020,
175 .sense_uproc_interrupt_reg32
= 0x00024,
176 .set_uproc_interrupt_reg
= 0x00020,
177 .set_uproc_interrupt_reg32
= 0x00024,
178 .clr_uproc_interrupt_reg
= 0x00028,
179 .clr_uproc_interrupt_reg32
= 0x0002C,
180 .init_feedback_reg
= 0x0005C,
181 .dump_addr_reg
= 0x00064,
182 .dump_data_reg
= 0x00068,
183 .endian_swap_reg
= 0x00084
188 static const struct ipr_chip_t ipr_chip
[] = {
189 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
191 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, true, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
194 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
195 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
197 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
198 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
201 static int ipr_max_bus_speeds
[] = {
202 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
208 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level
, ipr_log_level
, uint
, 0);
210 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode
, ipr_testmode
, int, 0);
212 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
214 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
215 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
216 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
218 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs
, ipr_max_devs
, int, 0);
222 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
224 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
225 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
226 module_param_named(fast_reboot
, ipr_fast_reboot
, int, S_IRUGO
| S_IWUSR
);
227 MODULE_PARM_DESC(fast_reboot
, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION
);
231 /* A constant array of IOASCs/URCs/Error Messages */
233 struct ipr_error_table_t ipr_error_table
[] = {
234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
235 "8155: An unknown error was received"},
237 "Soft underlength error"},
239 "Command to be cancelled not found"},
241 "Qualified success"},
242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
243 "FFFE: Soft device bus error recovered by the IOA"},
244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
245 "4101: Soft device bus fabric error"},
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
259 "FFFD: Logical block guard error recovered by the IOA"},
260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
261 "FFF9: Device sector reassign successful"},
262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
263 "FFF7: Media error recovered by device rewrite procedures"},
264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
265 "7001: IOA sector reassignment successful"},
266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
267 "FFF9: Soft media error. Sector reassignment recommended"},
268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
269 "FFF7: Media error recovered by IOA rewrite procedures"},
270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
271 "FF3D: Soft PCI bus error recovered by the IOA"},
272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
273 "FFF6: Device hardware error recovered by the IOA"},
274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
275 "FFF6: Device hardware error recovered by the device"},
276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
277 "FF3D: Soft IOA error recovered by the IOA"},
278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
279 "FFFA: Undefined device response recovered by the IOA"},
280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
281 "FFF6: Device bus error, message or command phase"},
282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
283 "FFFE: Task Management Function failed"},
284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
285 "FFF6: Failure prediction threshold exceeded"},
286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
287 "8009: Impending cache battery pack failure"},
289 "Logical Unit in process of becoming ready"},
291 "Initializing command required"},
293 "34FF: Disk device format in progress"},
295 "Logical unit not accessible, target port in unavailable state"},
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
297 "9070: IOA requested reset"},
299 "Synchronization required"},
301 "IOA microcode download required"},
303 "Device bus connection is prohibited by host"},
305 "No ready, IOA shutdown"},
307 "Not ready, IOA has been shutdown"},
308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
309 "3020: Storage subsystem configuration error"},
311 "FFF5: Medium error, data unreadable, recommend reassign"},
313 "7000: Medium error, data unreadable, do not reassign"},
314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
315 "FFF3: Disk media format bad"},
316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
317 "3002: Addressed device failed to respond to selection"},
318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
319 "3100: Device bus error"},
320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
321 "3109: IOA timed out a device command"},
323 "3120: SCSI bus is not operational"},
324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
325 "4100: Hard device bus fabric error"},
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
339 "310D: Logical block guard error detected by the IOA"},
340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
341 "9000: IOA reserved area data check"},
342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
343 "9001: IOA reserved area invalid data pattern"},
344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
345 "9002: IOA reserved area LRC error"},
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
347 "Hardware Error, IOA metadata access error"},
348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
349 "102E: Out of alternate sectors for disk storage"},
350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
351 "FFF4: Data transfer underlength error"},
352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
353 "FFF4: Data transfer overlength error"},
354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
355 "3400: Logical unit failure"},
356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
357 "FFF4: Device microcode is corrupt"},
358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
359 "8150: PCI bus error"},
361 "Unsupported device bus message received"},
362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
363 "FFF4: Disk device problem"},
364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
365 "8150: Permanent IOA failure"},
366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
367 "3010: Disk device returned wrong response to IOA"},
368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
369 "8151: IOA microcode error"},
371 "Device bus status error"},
372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
373 "8157: IOA error requiring IOA reset to recover"},
375 "ATA device status error"},
377 "Message reject received from the device"},
378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
379 "8008: A permanent cache battery pack failure occurred"},
380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
381 "9090: Disk unit has been modified after the last known status"},
382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
383 "9081: IOA detected device error"},
384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
385 "9082: IOA detected device error"},
386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
387 "3110: Device bus error, message or command phase"},
388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
389 "3110: SAS Command / Task Management Function failed"},
390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
391 "9091: Incorrect hardware configuration change has been detected"},
392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
393 "9073: Invalid multi-adapter configuration"},
394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
395 "4010: Incorrect connection between cascaded expanders"},
396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
397 "4020: Connections exceed IOA design limits"},
398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
399 "4030: Incorrect multipath connection"},
400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
401 "4110: Unsupported enclosure function"},
402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL
,
403 "4120: SAS cable VPD cannot be read"},
404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
405 "FFF4: Command to logical unit failed"},
407 "Illegal request, invalid request type or request packet"},
409 "Illegal request, invalid resource handle"},
411 "Illegal request, commands not allowed to this device"},
413 "Illegal request, command not allowed to a secondary adapter"},
415 "Illegal request, command not allowed to a non-optimized resource"},
417 "Illegal request, invalid field in parameter list"},
419 "Illegal request, parameter not supported"},
421 "Illegal request, parameter value invalid"},
423 "Illegal request, command sequence error"},
425 "Illegal request, dual adapter support not enabled"},
427 "Illegal request, another cable connector was physically disabled"},
429 "Illegal request, inconsistent group id/group count"},
430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
431 "9031: Array protection temporarily suspended, protection resuming"},
432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
433 "9040: Array protection temporarily suspended, protection resuming"},
434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL
,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
437 "4085: Service required"},
438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
439 "4086: SAS Adapter Hardware Configuration Error"},
440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
441 "3140: Device bus not ready to ready transition"},
442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
443 "FFFB: SCSI bus was reset"},
445 "FFFE: SCSI bus transition to single ended"},
447 "FFFE: SCSI bus transition to LVD"},
448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
449 "FFFB: SCSI bus was reset by another initiator"},
450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
451 "3029: A device replacement has occurred"},
452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL
,
453 "4102: Device bus fabric performance degradation"},
454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
455 "9051: IOA cache data exists for a missing or failed device"},
456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
459 "9025: Disk unit is not supported at its physical location"},
460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
461 "3020: IOA detected a SCSI bus configuration error"},
462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
463 "3150: SCSI bus configuration error"},
464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
465 "9074: Asymmetric advanced function disk configuration"},
466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
467 "4040: Incomplete multipath connection between IOA and enclosure"},
468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
469 "4041: Incomplete multipath connection between enclosure and device"},
470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
471 "9075: Incomplete multipath connection between IOA and remote IOA"},
472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
473 "9076: Configuration error, missing remote IOA"},
474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
475 "4050: Enclosure does not support a required multipath function"},
476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL
,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL
,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL
,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL
,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
485 "4070: Logically bad block written on device"},
486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
487 "9041: Array protection temporarily suspended"},
488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
489 "9042: Corrupt array parity detected on specified device"},
490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
491 "9030: Array no longer protected due to missing or failed disk unit"},
492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
493 "9071: Link operational transition"},
494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
495 "9072: Link not operational transition"},
496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
497 "9032: Array exposed but still protected"},
498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL
,
499 "70DD: Device forced failed by disrupt device command"},
500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
501 "4061: Multipath redundancy level got better"},
502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
503 "4060: Multipath redundancy level got worse"},
504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL
,
505 "9083: Device raw mode enabled"},
506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL
,
507 "9084: Device raw mode disabled"},
509 "Failure due to other device"},
510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
511 "9008: IOA does not support functions expected by devices"},
512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
513 "9010: Cache data associated with attached devices cannot be found"},
514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
515 "9011: Cache data belongs to devices other than those attached"},
516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
517 "9020: Array missing 2 or more devices with only 1 device present"},
518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
519 "9021: Array missing 2 or more devices with 2 or more devices present"},
520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
521 "9022: Exposed array is missing a required device"},
522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
523 "9023: Array member(s) not at required physical locations"},
524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
525 "9024: Array not functional due to present hardware configuration"},
526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
527 "9026: Array not functional due to present hardware configuration"},
528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
529 "9027: Array is missing a device and parity is out of sync"},
530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
531 "9028: Maximum number of arrays already exist"},
532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
533 "9050: Required cache data cannot be located for a disk unit"},
534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
535 "9052: Cache data exists for a device that has been modified"},
536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
537 "9054: IOA resources not available due to previous problems"},
538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
539 "9092: Disk unit requires initialization before use"},
540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
541 "9029: Incorrect hardware configuration change has been detected"},
542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
543 "9060: One or more disk pairs are missing from an array"},
544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
545 "9061: One or more disks are missing from an array"},
546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
547 "9062: One or more disks are missing from an array"},
548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
549 "9063: Maximum number of functional arrays has been exceeded"},
551 "Data protect, other volume set problem"},
553 "Aborted command, invalid descriptor"},
555 "Target operating conditions have changed, dual adapter takeover"},
557 "Aborted command, medium removal prevented"},
559 "Command terminated by host"},
561 "Aborted command, command terminated by host"}
564 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
581 * Function Prototypes
583 static int ipr_reset_alert(struct ipr_cmnd
*);
584 static void ipr_process_ccn(struct ipr_cmnd
*);
585 static void ipr_process_error(struct ipr_cmnd
*);
586 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
588 enum ipr_shutdown_type
);
590 #ifdef CONFIG_SCSI_IPR_TRACE
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
595 * @add_data: additional data
600 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
601 u8 type
, u32 add_data
)
603 struct ipr_trace_entry
*trace_entry
;
604 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
605 unsigned int trace_index
;
607 trace_index
= atomic_add_return(1, &ioa_cfg
->trace_index
) & IPR_TRACE_INDEX_MASK
;
608 trace_entry
= &ioa_cfg
->trace
[trace_index
];
609 trace_entry
->time
= jiffies
;
610 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
611 trace_entry
->type
= type
;
612 if (ipr_cmd
->ioa_cfg
->sis64
)
613 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
615 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
616 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
617 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
618 trace_entry
->u
.add_data
= add_data
;
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
632 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
634 unsigned long lock_flags
;
635 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
637 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
638 ipr_cmd
->done(ipr_cmd
);
639 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
651 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
652 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
653 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
654 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
657 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
658 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
659 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
660 ioarcb
->data_transfer_length
= 0;
661 ioarcb
->read_data_transfer_length
= 0;
662 ioarcb
->ioadl_len
= 0;
663 ioarcb
->read_ioadl_len
= 0;
665 if (ipr_cmd
->ioa_cfg
->sis64
) {
666 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
667 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
668 ioasa64
->u
.gata
.status
= 0;
670 ioarcb
->write_ioadl_addr
=
671 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
672 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
673 ioasa
->u
.gata
.status
= 0;
676 ioasa
->hdr
.ioasc
= 0;
677 ioasa
->hdr
.residual_data_len
= 0;
678 ipr_cmd
->scsi_cmd
= NULL
;
680 ipr_cmd
->sense_buffer
[0] = 0;
681 ipr_cmd
->dma_use_sg
= 0;
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
692 void (*fast_done
) (struct ipr_cmnd
*))
694 ipr_reinit_ipr_cmnd(ipr_cmd
);
695 ipr_cmd
->u
.scratch
= 0;
696 ipr_cmd
->sibling
= NULL
;
697 ipr_cmd
->eh_comp
= NULL
;
698 ipr_cmd
->fast_done
= fast_done
;
699 timer_setup(&ipr_cmd
->timer
, NULL
, 0);
703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704 * @ioa_cfg: ioa config struct
707 * pointer to ipr command struct
710 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
712 struct ipr_cmnd
*ipr_cmd
= NULL
;
714 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
715 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
716 struct ipr_cmnd
, queue
);
717 list_del(&ipr_cmd
->queue
);
725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
729 * pointer to ipr command struct
732 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
734 struct ipr_cmnd
*ipr_cmd
=
735 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
736 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
754 volatile u32 int_reg
;
757 /* Stop new interrupts */
758 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
759 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
760 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
761 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
765 /* Set interrupt mask to stop all new interrupts */
767 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
769 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
771 /* Clear any pending interrupts */
773 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
774 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
775 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
779 * ipr_save_pcix_cmd_reg - Save PCI-X command register
780 * @ioa_cfg: ioa config struct
783 * 0 on success / -EIO on failure
785 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
787 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
789 if (pcix_cmd_reg
== 0)
792 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
793 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
794 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
798 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
803 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
804 * @ioa_cfg: ioa config struct
807 * 0 on success / -EIO on failure
809 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
811 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
814 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
815 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
816 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
825 * __ipr_sata_eh_done - done function for aborted SATA commands
826 * @ipr_cmd: ipr command struct
828 * This function is invoked for ops generated to SATA
829 * devices which are being aborted.
834 static void __ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
836 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
837 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
839 qc
->err_mask
|= AC_ERR_OTHER
;
840 sata_port
->ioasa
.status
|= ATA_BUSY
;
842 if (ipr_cmd
->eh_comp
)
843 complete(ipr_cmd
->eh_comp
);
844 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
848 * ipr_sata_eh_done - done function for aborted SATA commands
849 * @ipr_cmd: ipr command struct
851 * This function is invoked for ops generated to SATA
852 * devices which are being aborted.
857 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
859 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
860 unsigned long hrrq_flags
;
862 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
863 __ipr_sata_eh_done(ipr_cmd
);
864 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
868 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
869 * @ipr_cmd: ipr command struct
871 * This function is invoked by the interrupt handler for
872 * ops generated by the SCSI mid-layer which are being aborted.
877 static void __ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
879 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
881 scsi_cmd
->result
|= (DID_ERROR
<< 16);
883 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
884 scsi_cmd
->scsi_done(scsi_cmd
);
885 if (ipr_cmd
->eh_comp
)
886 complete(ipr_cmd
->eh_comp
);
887 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
891 * ipr_scsi_eh_done - mid-layer done function for aborted ops
892 * @ipr_cmd: ipr command struct
894 * This function is invoked by the interrupt handler for
895 * ops generated by the SCSI mid-layer which are being aborted.
900 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
902 unsigned long hrrq_flags
;
903 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
905 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
906 __ipr_scsi_eh_done(ipr_cmd
);
907 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
911 * ipr_fail_all_ops - Fails all outstanding ops.
912 * @ioa_cfg: ioa config struct
914 * This function fails all outstanding ops.
919 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
921 struct ipr_cmnd
*ipr_cmd
, *temp
;
922 struct ipr_hrr_queue
*hrrq
;
925 for_each_hrrq(hrrq
, ioa_cfg
) {
926 spin_lock(&hrrq
->_lock
);
927 list_for_each_entry_safe(ipr_cmd
,
928 temp
, &hrrq
->hrrq_pending_q
, queue
) {
929 list_del(&ipr_cmd
->queue
);
931 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
932 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
933 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
934 cpu_to_be32(IPR_DRIVER_ILID
);
936 if (ipr_cmd
->scsi_cmd
)
937 ipr_cmd
->done
= __ipr_scsi_eh_done
;
938 else if (ipr_cmd
->qc
)
939 ipr_cmd
->done
= __ipr_sata_eh_done
;
941 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
942 IPR_IOASC_IOA_WAS_RESET
);
943 del_timer(&ipr_cmd
->timer
);
944 ipr_cmd
->done(ipr_cmd
);
946 spin_unlock(&hrrq
->_lock
);
952 * ipr_send_command - Send driver initiated requests.
953 * @ipr_cmd: ipr command struct
955 * This function sends a command to the adapter using the correct write call.
956 * In the case of sis64, calculate the ioarcb size required. Then or in the
962 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
964 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
965 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
967 if (ioa_cfg
->sis64
) {
968 /* The default size is 256 bytes */
969 send_dma_addr
|= 0x1;
971 /* If the number of ioadls * size of ioadl > 128 bytes,
972 then use a 512 byte ioarcb */
973 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
974 send_dma_addr
|= 0x4;
975 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
977 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
981 * ipr_do_req - Send driver initiated requests.
982 * @ipr_cmd: ipr command struct
983 * @done: done function
984 * @timeout_func: timeout function
985 * @timeout: timeout value
987 * This function sends the specified command to the adapter with the
988 * timeout given. The done function is invoked on command completion.
993 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
994 void (*done
) (struct ipr_cmnd
*),
995 void (*timeout_func
) (struct timer_list
*), u32 timeout
)
997 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
999 ipr_cmd
->done
= done
;
1001 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
1002 ipr_cmd
->timer
.function
= timeout_func
;
1004 add_timer(&ipr_cmd
->timer
);
1006 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
1008 ipr_send_command(ipr_cmd
);
1012 * ipr_internal_cmd_done - Op done function for an internally generated op.
1013 * @ipr_cmd: ipr command struct
1015 * This function is the op done function for an internally generated,
1016 * blocking op. It simply wakes the sleeping thread.
1021 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
1023 if (ipr_cmd
->sibling
)
1024 ipr_cmd
->sibling
= NULL
;
1026 complete(&ipr_cmd
->completion
);
1030 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1031 * @ipr_cmd: ipr command struct
1032 * @dma_addr: dma address
1033 * @len: transfer length
1034 * @flags: ioadl flag value
1036 * This function initializes an ioadl in the case where there is only a single
1042 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
1045 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
1046 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
1048 ipr_cmd
->dma_use_sg
= 1;
1050 if (ipr_cmd
->ioa_cfg
->sis64
) {
1051 ioadl64
->flags
= cpu_to_be32(flags
);
1052 ioadl64
->data_len
= cpu_to_be32(len
);
1053 ioadl64
->address
= cpu_to_be64(dma_addr
);
1055 ipr_cmd
->ioarcb
.ioadl_len
=
1056 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
1057 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1059 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
1060 ioadl
->address
= cpu_to_be32(dma_addr
);
1062 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
1063 ipr_cmd
->ioarcb
.read_ioadl_len
=
1064 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1065 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
1067 ipr_cmd
->ioarcb
.ioadl_len
=
1068 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1069 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1075 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1076 * @ipr_cmd: ipr command struct
1077 * @timeout_func: function to invoke if command times out
1083 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
1084 void (*timeout_func
) (struct timer_list
*),
1087 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1089 init_completion(&ipr_cmd
->completion
);
1090 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
1092 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1093 wait_for_completion(&ipr_cmd
->completion
);
1094 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1097 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1101 if (ioa_cfg
->hrrq_num
== 1)
1104 hrrq
= atomic_add_return(1, &ioa_cfg
->hrrq_index
);
1105 hrrq
= (hrrq
% (ioa_cfg
->hrrq_num
- 1)) + 1;
1111 * ipr_send_hcam - Send an HCAM to the adapter.
1112 * @ioa_cfg: ioa config struct
1114 * @hostrcb: hostrcb struct
1116 * This function will send a Host Controlled Async command to the adapter.
1117 * If HCAMs are currently not allowed to be issued to the adapter, it will
1118 * place the hostrcb on the free queue.
1123 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1124 struct ipr_hostrcb
*hostrcb
)
1126 struct ipr_cmnd
*ipr_cmd
;
1127 struct ipr_ioarcb
*ioarcb
;
1129 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1130 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1131 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1132 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1134 ipr_cmd
->u
.hostrcb
= hostrcb
;
1135 ioarcb
= &ipr_cmd
->ioarcb
;
1137 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1138 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1139 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1140 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1141 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1142 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1144 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1145 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1147 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1148 ipr_cmd
->done
= ipr_process_ccn
;
1150 ipr_cmd
->done
= ipr_process_error
;
1152 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1154 ipr_send_command(ipr_cmd
);
1156 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1161 * ipr_update_ata_class - Update the ata class in the resource entry
1162 * @res: resource entry struct
1163 * @proto: cfgte device bus protocol value
1168 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1171 case IPR_PROTO_SATA
:
1172 case IPR_PROTO_SAS_STP
:
1173 res
->ata_class
= ATA_DEV_ATA
;
1175 case IPR_PROTO_SATA_ATAPI
:
1176 case IPR_PROTO_SAS_STP_ATAPI
:
1177 res
->ata_class
= ATA_DEV_ATAPI
;
1180 res
->ata_class
= ATA_DEV_UNKNOWN
;
1186 * ipr_init_res_entry - Initialize a resource entry struct.
1187 * @res: resource entry struct
1188 * @cfgtew: config table entry wrapper struct
1193 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1194 struct ipr_config_table_entry_wrapper
*cfgtew
)
1198 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1199 struct ipr_resource_entry
*gscsi_res
= NULL
;
1201 res
->needs_sync_complete
= 0;
1204 res
->del_from_ml
= 0;
1205 res
->resetting_device
= 0;
1206 res
->reset_occurred
= 0;
1208 res
->sata_port
= NULL
;
1210 if (ioa_cfg
->sis64
) {
1211 proto
= cfgtew
->u
.cfgte64
->proto
;
1212 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1213 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1214 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1215 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1217 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1218 sizeof(res
->res_path
));
1221 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1222 sizeof(res
->dev_lun
.scsi_lun
));
1223 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1225 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1226 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1227 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1229 res
->target
= gscsi_res
->target
;
1234 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1235 ioa_cfg
->max_devs_supported
);
1236 set_bit(res
->target
, ioa_cfg
->target_ids
);
1238 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1239 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1241 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1242 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1243 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1244 ioa_cfg
->max_devs_supported
);
1245 set_bit(res
->target
, ioa_cfg
->array_ids
);
1246 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1247 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1248 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1249 ioa_cfg
->max_devs_supported
);
1250 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1252 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1253 ioa_cfg
->max_devs_supported
);
1254 set_bit(res
->target
, ioa_cfg
->target_ids
);
1257 proto
= cfgtew
->u
.cfgte
->proto
;
1258 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1259 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1260 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1261 res
->type
= IPR_RES_TYPE_IOAFP
;
1263 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1265 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1266 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1267 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1268 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1271 ipr_update_ata_class(res
, proto
);
1275 * ipr_is_same_device - Determine if two devices are the same.
1276 * @res: resource entry struct
1277 * @cfgtew: config table entry wrapper struct
1280 * 1 if the devices are the same / 0 otherwise
1282 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1283 struct ipr_config_table_entry_wrapper
*cfgtew
)
1285 if (res
->ioa_cfg
->sis64
) {
1286 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1287 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1288 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1289 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1293 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1294 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1295 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1303 * __ipr_format_res_path - Format the resource path for printing.
1304 * @res_path: resource path
1306 * @len: length of buffer provided
1311 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1317 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1318 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1319 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1325 * ipr_format_res_path - Format the resource path for printing.
1326 * @ioa_cfg: ioa config struct
1327 * @res_path: resource path
1329 * @len: length of buffer provided
1334 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1335 u8
*res_path
, char *buffer
, int len
)
1340 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1341 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1346 * ipr_update_res_entry - Update the resource entry.
1347 * @res: resource entry struct
1348 * @cfgtew: config table entry wrapper struct
1353 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1354 struct ipr_config_table_entry_wrapper
*cfgtew
)
1356 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1360 if (res
->ioa_cfg
->sis64
) {
1361 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1362 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1363 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1365 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1366 sizeof(struct ipr_std_inq_data
));
1368 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1369 proto
= cfgtew
->u
.cfgte64
->proto
;
1370 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1371 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1373 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1374 sizeof(res
->dev_lun
.scsi_lun
));
1376 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1377 sizeof(res
->res_path
))) {
1378 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1379 sizeof(res
->res_path
));
1383 if (res
->sdev
&& new_path
)
1384 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1385 ipr_format_res_path(res
->ioa_cfg
,
1386 res
->res_path
, buffer
, sizeof(buffer
)));
1388 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1389 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1390 res
->type
= IPR_RES_TYPE_IOAFP
;
1392 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1394 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1395 sizeof(struct ipr_std_inq_data
));
1397 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1398 proto
= cfgtew
->u
.cfgte
->proto
;
1399 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1402 ipr_update_ata_class(res
, proto
);
1406 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1408 * @res: resource entry struct
1409 * @cfgtew: config table entry wrapper struct
1414 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1416 struct ipr_resource_entry
*gscsi_res
= NULL
;
1417 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1419 if (!ioa_cfg
->sis64
)
1422 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1423 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1424 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1425 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1426 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1427 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1428 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1430 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1432 } else if (res
->bus
== 0)
1433 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1437 * ipr_handle_config_change - Handle a config change from the adapter
1438 * @ioa_cfg: ioa config struct
1444 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1445 struct ipr_hostrcb
*hostrcb
)
1447 struct ipr_resource_entry
*res
= NULL
;
1448 struct ipr_config_table_entry_wrapper cfgtew
;
1449 __be32 cc_res_handle
;
1453 if (ioa_cfg
->sis64
) {
1454 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1455 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1457 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1458 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1461 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1462 if (res
->res_handle
== cc_res_handle
) {
1469 if (list_empty(&ioa_cfg
->free_res_q
)) {
1470 ipr_send_hcam(ioa_cfg
,
1471 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1476 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1477 struct ipr_resource_entry
, queue
);
1479 list_del(&res
->queue
);
1480 ipr_init_res_entry(res
, &cfgtew
);
1481 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1484 ipr_update_res_entry(res
, &cfgtew
);
1486 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1488 res
->del_from_ml
= 1;
1489 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1490 schedule_work(&ioa_cfg
->work_q
);
1492 ipr_clear_res_target(res
);
1493 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1495 } else if (!res
->sdev
|| res
->del_from_ml
) {
1497 schedule_work(&ioa_cfg
->work_q
);
1500 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1504 * ipr_process_ccn - Op done function for a CCN.
1505 * @ipr_cmd: ipr command struct
1507 * This function is the op done function for a configuration
1508 * change notification host controlled async from the adapter.
1513 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1515 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1516 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1517 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1519 list_del_init(&hostrcb
->queue
);
1520 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1523 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
1524 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
)
1525 dev_err(&ioa_cfg
->pdev
->dev
,
1526 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1528 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1530 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1535 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1536 * @i: index into buffer
1537 * @buf: string to modify
1539 * This function will strip all trailing whitespace, pad the end
1540 * of the string with a single space, and NULL terminate the string.
1543 * new length of string
1545 static int strip_and_pad_whitespace(int i
, char *buf
)
1547 while (i
&& buf
[i
] == ' ')
1555 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1556 * @prefix: string to print at start of printk
1557 * @hostrcb: hostrcb pointer
1558 * @vpd: vendor/product id/sn struct
1563 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1564 struct ipr_vpd
*vpd
)
1566 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1569 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1570 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1572 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1573 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1575 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1576 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1578 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1582 * ipr_log_vpd - Log the passed VPD to the error log.
1583 * @vpd: vendor/product id/sn struct
1588 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1590 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1591 + IPR_SERIAL_NUM_LEN
];
1593 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1594 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1596 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1597 ipr_err("Vendor/Product ID: %s\n", buffer
);
1599 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1600 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1601 ipr_err(" Serial Number: %s\n", buffer
);
1605 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606 * @prefix: string to print at start of printk
1607 * @hostrcb: hostrcb pointer
1608 * @vpd: vendor/product id/sn/wwn struct
1613 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1614 struct ipr_ext_vpd
*vpd
)
1616 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1617 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1618 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1622 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623 * @vpd: vendor/product id/sn/wwn struct
1628 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1630 ipr_log_vpd(&vpd
->vpd
);
1631 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1632 be32_to_cpu(vpd
->wwid
[1]));
1636 * ipr_log_enhanced_cache_error - Log a cache error.
1637 * @ioa_cfg: ioa config struct
1638 * @hostrcb: hostrcb struct
1643 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1644 struct ipr_hostrcb
*hostrcb
)
1646 struct ipr_hostrcb_type_12_error
*error
;
1649 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1651 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1653 ipr_err("-----Current Configuration-----\n");
1654 ipr_err("Cache Directory Card Information:\n");
1655 ipr_log_ext_vpd(&error
->ioa_vpd
);
1656 ipr_err("Adapter Card Information:\n");
1657 ipr_log_ext_vpd(&error
->cfc_vpd
);
1659 ipr_err("-----Expected Configuration-----\n");
1660 ipr_err("Cache Directory Card Information:\n");
1661 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1662 ipr_err("Adapter Card Information:\n");
1663 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1665 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666 be32_to_cpu(error
->ioa_data
[0]),
1667 be32_to_cpu(error
->ioa_data
[1]),
1668 be32_to_cpu(error
->ioa_data
[2]));
1672 * ipr_log_cache_error - Log a cache error.
1673 * @ioa_cfg: ioa config struct
1674 * @hostrcb: hostrcb struct
1679 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1680 struct ipr_hostrcb
*hostrcb
)
1682 struct ipr_hostrcb_type_02_error
*error
=
1683 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1685 ipr_err("-----Current Configuration-----\n");
1686 ipr_err("Cache Directory Card Information:\n");
1687 ipr_log_vpd(&error
->ioa_vpd
);
1688 ipr_err("Adapter Card Information:\n");
1689 ipr_log_vpd(&error
->cfc_vpd
);
1691 ipr_err("-----Expected Configuration-----\n");
1692 ipr_err("Cache Directory Card Information:\n");
1693 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1694 ipr_err("Adapter Card Information:\n");
1695 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1697 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698 be32_to_cpu(error
->ioa_data
[0]),
1699 be32_to_cpu(error
->ioa_data
[1]),
1700 be32_to_cpu(error
->ioa_data
[2]));
1704 * ipr_log_enhanced_config_error - Log a configuration error.
1705 * @ioa_cfg: ioa config struct
1706 * @hostrcb: hostrcb struct
1711 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1712 struct ipr_hostrcb
*hostrcb
)
1714 int errors_logged
, i
;
1715 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1716 struct ipr_hostrcb_type_13_error
*error
;
1718 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1719 errors_logged
= be32_to_cpu(error
->errors_logged
);
1721 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722 be32_to_cpu(error
->errors_detected
), errors_logged
);
1724 dev_entry
= error
->dev
;
1726 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1729 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1730 ipr_log_ext_vpd(&dev_entry
->vpd
);
1732 ipr_err("-----New Device Information-----\n");
1733 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1735 ipr_err("Cache Directory Card Information:\n");
1736 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1738 ipr_err("Adapter Card Information:\n");
1739 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1744 * ipr_log_sis64_config_error - Log a device error.
1745 * @ioa_cfg: ioa config struct
1746 * @hostrcb: hostrcb struct
1751 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1752 struct ipr_hostrcb
*hostrcb
)
1754 int errors_logged
, i
;
1755 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1756 struct ipr_hostrcb_type_23_error
*error
;
1757 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1759 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1760 errors_logged
= be32_to_cpu(error
->errors_logged
);
1762 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763 be32_to_cpu(error
->errors_detected
), errors_logged
);
1765 dev_entry
= error
->dev
;
1767 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1770 ipr_err("Device %d : %s", i
+ 1,
1771 __ipr_format_res_path(dev_entry
->res_path
,
1772 buffer
, sizeof(buffer
)));
1773 ipr_log_ext_vpd(&dev_entry
->vpd
);
1775 ipr_err("-----New Device Information-----\n");
1776 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1778 ipr_err("Cache Directory Card Information:\n");
1779 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1781 ipr_err("Adapter Card Information:\n");
1782 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1787 * ipr_log_config_error - Log a configuration error.
1788 * @ioa_cfg: ioa config struct
1789 * @hostrcb: hostrcb struct
1794 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1795 struct ipr_hostrcb
*hostrcb
)
1797 int errors_logged
, i
;
1798 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1799 struct ipr_hostrcb_type_03_error
*error
;
1801 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1802 errors_logged
= be32_to_cpu(error
->errors_logged
);
1804 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805 be32_to_cpu(error
->errors_detected
), errors_logged
);
1807 dev_entry
= error
->dev
;
1809 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1812 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1813 ipr_log_vpd(&dev_entry
->vpd
);
1815 ipr_err("-----New Device Information-----\n");
1816 ipr_log_vpd(&dev_entry
->new_vpd
);
1818 ipr_err("Cache Directory Card Information:\n");
1819 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1821 ipr_err("Adapter Card Information:\n");
1822 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1824 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825 be32_to_cpu(dev_entry
->ioa_data
[0]),
1826 be32_to_cpu(dev_entry
->ioa_data
[1]),
1827 be32_to_cpu(dev_entry
->ioa_data
[2]),
1828 be32_to_cpu(dev_entry
->ioa_data
[3]),
1829 be32_to_cpu(dev_entry
->ioa_data
[4]));
1834 * ipr_log_enhanced_array_error - Log an array configuration error.
1835 * @ioa_cfg: ioa config struct
1836 * @hostrcb: hostrcb struct
1841 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1842 struct ipr_hostrcb
*hostrcb
)
1845 struct ipr_hostrcb_type_14_error
*error
;
1846 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1847 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1849 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1853 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854 error
->protection_level
,
1855 ioa_cfg
->host
->host_no
,
1856 error
->last_func_vset_res_addr
.bus
,
1857 error
->last_func_vset_res_addr
.target
,
1858 error
->last_func_vset_res_addr
.lun
);
1862 array_entry
= error
->array_member
;
1863 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1864 ARRAY_SIZE(error
->array_member
));
1866 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1867 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1870 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1871 ipr_err("Exposed Array Member %d:\n", i
);
1873 ipr_err("Array Member %d:\n", i
);
1875 ipr_log_ext_vpd(&array_entry
->vpd
);
1876 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1877 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1878 "Expected Location");
1885 * ipr_log_array_error - Log an array configuration error.
1886 * @ioa_cfg: ioa config struct
1887 * @hostrcb: hostrcb struct
1892 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1893 struct ipr_hostrcb
*hostrcb
)
1896 struct ipr_hostrcb_type_04_error
*error
;
1897 struct ipr_hostrcb_array_data_entry
*array_entry
;
1898 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1900 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1904 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905 error
->protection_level
,
1906 ioa_cfg
->host
->host_no
,
1907 error
->last_func_vset_res_addr
.bus
,
1908 error
->last_func_vset_res_addr
.target
,
1909 error
->last_func_vset_res_addr
.lun
);
1913 array_entry
= error
->array_member
;
1915 for (i
= 0; i
< 18; i
++) {
1916 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1919 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1920 ipr_err("Exposed Array Member %d:\n", i
);
1922 ipr_err("Array Member %d:\n", i
);
1924 ipr_log_vpd(&array_entry
->vpd
);
1926 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1927 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1928 "Expected Location");
1933 array_entry
= error
->array_member2
;
1940 * ipr_log_hex_data - Log additional hex IOA error data.
1941 * @ioa_cfg: ioa config struct
1942 * @data: IOA error data
1948 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, __be32
*data
, int len
)
1955 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1956 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1958 for (i
= 0; i
< len
/ 4; i
+= 4) {
1959 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1960 be32_to_cpu(data
[i
]),
1961 be32_to_cpu(data
[i
+1]),
1962 be32_to_cpu(data
[i
+2]),
1963 be32_to_cpu(data
[i
+3]));
1968 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969 * @ioa_cfg: ioa config struct
1970 * @hostrcb: hostrcb struct
1975 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1976 struct ipr_hostrcb
*hostrcb
)
1978 struct ipr_hostrcb_type_17_error
*error
;
1981 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1983 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1985 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1986 strim(error
->failure_reason
);
1988 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1989 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1990 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1991 ipr_log_hex_data(ioa_cfg
, error
->data
,
1992 be32_to_cpu(hostrcb
->hcam
.length
) -
1993 (offsetof(struct ipr_hostrcb_error
, u
) +
1994 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1998 * ipr_log_dual_ioa_error - Log a dual adapter error.
1999 * @ioa_cfg: ioa config struct
2000 * @hostrcb: hostrcb struct
2005 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
2006 struct ipr_hostrcb
*hostrcb
)
2008 struct ipr_hostrcb_type_07_error
*error
;
2010 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
2011 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2012 strim(error
->failure_reason
);
2014 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
2015 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
2016 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
2017 ipr_log_hex_data(ioa_cfg
, error
->data
,
2018 be32_to_cpu(hostrcb
->hcam
.length
) -
2019 (offsetof(struct ipr_hostrcb_error
, u
) +
2020 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
2023 static const struct {
2026 } path_active_desc
[] = {
2027 { IPR_PATH_NO_INFO
, "Path" },
2028 { IPR_PATH_ACTIVE
, "Active path" },
2029 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
2032 static const struct {
2035 } path_state_desc
[] = {
2036 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
2037 { IPR_PATH_HEALTHY
, "is healthy" },
2038 { IPR_PATH_DEGRADED
, "is degraded" },
2039 { IPR_PATH_FAILED
, "is failed" }
2043 * ipr_log_fabric_path - Log a fabric path error
2044 * @hostrcb: hostrcb struct
2045 * @fabric: fabric descriptor
2050 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
2051 struct ipr_hostrcb_fabric_desc
*fabric
)
2054 u8 path_state
= fabric
->path_state
;
2055 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2056 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2058 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2059 if (path_active_desc
[i
].active
!= active
)
2062 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2063 if (path_state_desc
[j
].state
!= state
)
2066 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
2067 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
2068 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2070 } else if (fabric
->cascaded_expander
== 0xff) {
2071 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
2072 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2073 fabric
->ioa_port
, fabric
->phy
);
2074 } else if (fabric
->phy
== 0xff) {
2075 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
2076 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2077 fabric
->ioa_port
, fabric
->cascaded_expander
);
2079 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2081 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2087 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
2088 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2092 * ipr_log64_fabric_path - Log a fabric path error
2093 * @hostrcb: hostrcb struct
2094 * @fabric: fabric descriptor
2099 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
2100 struct ipr_hostrcb64_fabric_desc
*fabric
)
2103 u8 path_state
= fabric
->path_state
;
2104 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2105 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2106 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2108 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2109 if (path_active_desc
[i
].active
!= active
)
2112 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2113 if (path_state_desc
[j
].state
!= state
)
2116 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2117 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2118 ipr_format_res_path(hostrcb
->ioa_cfg
,
2120 buffer
, sizeof(buffer
)));
2125 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2126 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2127 buffer
, sizeof(buffer
)));
2130 static const struct {
2133 } path_type_desc
[] = {
2134 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2135 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2136 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2137 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2140 static const struct {
2143 } path_status_desc
[] = {
2144 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2145 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2146 { IPR_PATH_CFG_FAILED
, "Failed" },
2147 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2148 { IPR_PATH_NOT_DETECTED
, "Missing" },
2149 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2152 static const char *link_rate
[] = {
2155 "phy reset problem",
2172 * ipr_log_path_elem - Log a fabric path element.
2173 * @hostrcb: hostrcb struct
2174 * @cfg: fabric path element struct
2179 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2180 struct ipr_hostrcb_config_element
*cfg
)
2183 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2184 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2186 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2189 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2190 if (path_type_desc
[i
].type
!= type
)
2193 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2194 if (path_status_desc
[j
].status
!= status
)
2197 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2198 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2200 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2201 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2203 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2204 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2206 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2207 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2208 } else if (cfg
->cascaded_expander
== 0xff) {
2209 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2210 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2211 path_type_desc
[i
].desc
, cfg
->phy
,
2212 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2213 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2214 } else if (cfg
->phy
== 0xff) {
2215 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2216 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2217 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2218 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2219 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2221 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2223 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2224 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2225 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2232 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2234 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2235 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2239 * ipr_log64_path_elem - Log a fabric path element.
2240 * @hostrcb: hostrcb struct
2241 * @cfg: fabric path element struct
2246 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2247 struct ipr_hostrcb64_config_element
*cfg
)
2250 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2251 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2252 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2253 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2255 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2258 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2259 if (path_type_desc
[i
].type
!= type
)
2262 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2263 if (path_status_desc
[j
].status
!= status
)
2266 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2268 ipr_format_res_path(hostrcb
->ioa_cfg
,
2269 cfg
->res_path
, buffer
, sizeof(buffer
)),
2270 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2271 be32_to_cpu(cfg
->wwid
[0]),
2272 be32_to_cpu(cfg
->wwid
[1]));
2276 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277 "WWN=%08X%08X\n", cfg
->type_status
,
2278 ipr_format_res_path(hostrcb
->ioa_cfg
,
2279 cfg
->res_path
, buffer
, sizeof(buffer
)),
2280 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2281 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2285 * ipr_log_fabric_error - Log a fabric error.
2286 * @ioa_cfg: ioa config struct
2287 * @hostrcb: hostrcb struct
2292 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2293 struct ipr_hostrcb
*hostrcb
)
2295 struct ipr_hostrcb_type_20_error
*error
;
2296 struct ipr_hostrcb_fabric_desc
*fabric
;
2297 struct ipr_hostrcb_config_element
*cfg
;
2300 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2301 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2302 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2304 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2305 (offsetof(struct ipr_hostrcb_error
, u
) +
2306 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2308 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2309 ipr_log_fabric_path(hostrcb
, fabric
);
2310 for_each_fabric_cfg(fabric
, cfg
)
2311 ipr_log_path_elem(hostrcb
, cfg
);
2313 add_len
-= be16_to_cpu(fabric
->length
);
2314 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2315 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2318 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2322 * ipr_log_sis64_array_error - Log a sis64 array error.
2323 * @ioa_cfg: ioa config struct
2324 * @hostrcb: hostrcb struct
2329 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2330 struct ipr_hostrcb
*hostrcb
)
2333 struct ipr_hostrcb_type_24_error
*error
;
2334 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2335 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2336 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2338 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2342 ipr_err("RAID %s Array Configuration: %s\n",
2343 error
->protection_level
,
2344 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2345 buffer
, sizeof(buffer
)));
2349 array_entry
= error
->array_member
;
2350 num_entries
= min_t(u32
, error
->num_entries
,
2351 ARRAY_SIZE(error
->array_member
));
2353 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2355 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2358 if (error
->exposed_mode_adn
== i
)
2359 ipr_err("Exposed Array Member %d:\n", i
);
2361 ipr_err("Array Member %d:\n", i
);
2363 ipr_err("Array Member %d:\n", i
);
2364 ipr_log_ext_vpd(&array_entry
->vpd
);
2365 ipr_err("Current Location: %s\n",
2366 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2367 buffer
, sizeof(buffer
)));
2368 ipr_err("Expected Location: %s\n",
2369 ipr_format_res_path(ioa_cfg
,
2370 array_entry
->expected_res_path
,
2371 buffer
, sizeof(buffer
)));
2378 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379 * @ioa_cfg: ioa config struct
2380 * @hostrcb: hostrcb struct
2385 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2386 struct ipr_hostrcb
*hostrcb
)
2388 struct ipr_hostrcb_type_30_error
*error
;
2389 struct ipr_hostrcb64_fabric_desc
*fabric
;
2390 struct ipr_hostrcb64_config_element
*cfg
;
2393 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2395 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2396 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2398 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2399 (offsetof(struct ipr_hostrcb64_error
, u
) +
2400 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2402 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2403 ipr_log64_fabric_path(hostrcb
, fabric
);
2404 for_each_fabric_cfg(fabric
, cfg
)
2405 ipr_log64_path_elem(hostrcb
, cfg
);
2407 add_len
-= be16_to_cpu(fabric
->length
);
2408 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2409 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2412 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2416 * ipr_log_generic_error - Log an adapter error.
2417 * @ioa_cfg: ioa config struct
2418 * @hostrcb: hostrcb struct
2423 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2424 struct ipr_hostrcb
*hostrcb
)
2426 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2427 be32_to_cpu(hostrcb
->hcam
.length
));
2431 * ipr_log_sis64_device_error - Log a cache error.
2432 * @ioa_cfg: ioa config struct
2433 * @hostrcb: hostrcb struct
2438 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg
*ioa_cfg
,
2439 struct ipr_hostrcb
*hostrcb
)
2441 struct ipr_hostrcb_type_21_error
*error
;
2442 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2444 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2446 ipr_err("-----Failing Device Information-----\n");
2447 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2448 be32_to_cpu(error
->wwn
[0]), be32_to_cpu(error
->wwn
[1]),
2449 be32_to_cpu(error
->wwn
[2]), be32_to_cpu(error
->wwn
[3]));
2450 ipr_err("Device Resource Path: %s\n",
2451 __ipr_format_res_path(error
->res_path
,
2452 buffer
, sizeof(buffer
)));
2453 error
->primary_problem_desc
[sizeof(error
->primary_problem_desc
) - 1] = '\0';
2454 error
->second_problem_desc
[sizeof(error
->second_problem_desc
) - 1] = '\0';
2455 ipr_err("Primary Problem Description: %s\n", error
->primary_problem_desc
);
2456 ipr_err("Secondary Problem Description: %s\n", error
->second_problem_desc
);
2457 ipr_err("SCSI Sense Data:\n");
2458 ipr_log_hex_data(ioa_cfg
, error
->sense_data
, sizeof(error
->sense_data
));
2459 ipr_err("SCSI Command Descriptor Block: \n");
2460 ipr_log_hex_data(ioa_cfg
, error
->cdb
, sizeof(error
->cdb
));
2462 ipr_err("Additional IOA Data:\n");
2463 ipr_log_hex_data(ioa_cfg
, error
->ioa_data
, be32_to_cpu(error
->length_of_error
));
2467 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2470 * This function will return the index of into the ipr_error_table
2471 * for the specified IOASC. If the IOASC is not in the table,
2472 * 0 will be returned, which points to the entry used for unknown errors.
2475 * index into the ipr_error_table
2477 static u32
ipr_get_error(u32 ioasc
)
2481 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2482 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2489 * ipr_handle_log_data - Log an adapter error.
2490 * @ioa_cfg: ioa config struct
2491 * @hostrcb: hostrcb struct
2493 * This function logs an adapter error to the system.
2498 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2499 struct ipr_hostrcb
*hostrcb
)
2503 struct ipr_hostrcb_type_21_error
*error
;
2505 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2508 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2509 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2512 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2514 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2516 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2517 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2518 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2519 scsi_report_bus_reset(ioa_cfg
->host
,
2520 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2523 error_index
= ipr_get_error(ioasc
);
2525 if (!ipr_error_table
[error_index
].log_hcam
)
2528 if (ioasc
== IPR_IOASC_HW_CMD_FAILED
&&
2529 hostrcb
->hcam
.overlay_id
== IPR_HOST_RCB_OVERLAY_ID_21
) {
2530 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2532 if (((be32_to_cpu(error
->sense_data
[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST
&&
2533 ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
2537 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2539 /* Set indication we have logged an error */
2540 ioa_cfg
->errors_logged
++;
2542 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2544 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2545 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2547 switch (hostrcb
->hcam
.overlay_id
) {
2548 case IPR_HOST_RCB_OVERLAY_ID_2
:
2549 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2551 case IPR_HOST_RCB_OVERLAY_ID_3
:
2552 ipr_log_config_error(ioa_cfg
, hostrcb
);
2554 case IPR_HOST_RCB_OVERLAY_ID_4
:
2555 case IPR_HOST_RCB_OVERLAY_ID_6
:
2556 ipr_log_array_error(ioa_cfg
, hostrcb
);
2558 case IPR_HOST_RCB_OVERLAY_ID_7
:
2559 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2561 case IPR_HOST_RCB_OVERLAY_ID_12
:
2562 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2564 case IPR_HOST_RCB_OVERLAY_ID_13
:
2565 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2567 case IPR_HOST_RCB_OVERLAY_ID_14
:
2568 case IPR_HOST_RCB_OVERLAY_ID_16
:
2569 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2571 case IPR_HOST_RCB_OVERLAY_ID_17
:
2572 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2574 case IPR_HOST_RCB_OVERLAY_ID_20
:
2575 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2577 case IPR_HOST_RCB_OVERLAY_ID_21
:
2578 ipr_log_sis64_device_error(ioa_cfg
, hostrcb
);
2580 case IPR_HOST_RCB_OVERLAY_ID_23
:
2581 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2583 case IPR_HOST_RCB_OVERLAY_ID_24
:
2584 case IPR_HOST_RCB_OVERLAY_ID_26
:
2585 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2587 case IPR_HOST_RCB_OVERLAY_ID_30
:
2588 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2590 case IPR_HOST_RCB_OVERLAY_ID_1
:
2591 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2593 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2598 static struct ipr_hostrcb
*ipr_get_free_hostrcb(struct ipr_ioa_cfg
*ioa
)
2600 struct ipr_hostrcb
*hostrcb
;
2602 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_free_q
,
2603 struct ipr_hostrcb
, queue
);
2605 if (unlikely(!hostrcb
)) {
2606 dev_info(&ioa
->pdev
->dev
, "Reclaiming async error buffers.");
2607 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_report_q
,
2608 struct ipr_hostrcb
, queue
);
2611 list_del_init(&hostrcb
->queue
);
2616 * ipr_process_error - Op done function for an adapter error log.
2617 * @ipr_cmd: ipr command struct
2619 * This function is the op done function for an error log host
2620 * controlled async from the adapter. It will log the error and
2621 * send the HCAM back to the adapter.
2626 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2628 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2629 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2630 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2634 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2636 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2638 list_del_init(&hostrcb
->queue
);
2639 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2642 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2643 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2644 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2645 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
2646 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
) {
2647 dev_err(&ioa_cfg
->pdev
->dev
,
2648 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2651 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_report_q
);
2652 schedule_work(&ioa_cfg
->work_q
);
2653 hostrcb
= ipr_get_free_hostrcb(ioa_cfg
);
2655 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2659 * ipr_timeout - An internally generated op has timed out.
2660 * @ipr_cmd: ipr command struct
2662 * This function blocks host requests and initiates an
2668 static void ipr_timeout(struct timer_list
*t
)
2670 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2671 unsigned long lock_flags
= 0;
2672 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2675 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2677 ioa_cfg
->errors_logged
++;
2678 dev_err(&ioa_cfg
->pdev
->dev
,
2679 "Adapter being reset due to command timeout.\n");
2681 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2682 ioa_cfg
->sdt_state
= GET_DUMP
;
2684 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2685 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2687 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2692 * ipr_oper_timeout - Adapter timed out transitioning to operational
2693 * @ipr_cmd: ipr command struct
2695 * This function blocks host requests and initiates an
2701 static void ipr_oper_timeout(struct timer_list
*t
)
2703 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2704 unsigned long lock_flags
= 0;
2705 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2708 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2710 ioa_cfg
->errors_logged
++;
2711 dev_err(&ioa_cfg
->pdev
->dev
,
2712 "Adapter timed out transitioning to operational.\n");
2714 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2715 ioa_cfg
->sdt_state
= GET_DUMP
;
2717 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2719 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2720 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2723 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2728 * ipr_find_ses_entry - Find matching SES in SES table
2729 * @res: resource entry struct of SES
2732 * pointer to SES table entry / NULL on failure
2734 static const struct ipr_ses_table_entry
*
2735 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2738 struct ipr_std_inq_vpids
*vpids
;
2739 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2741 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2742 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2743 if (ste
->compare_product_id_byte
[j
] == 'X') {
2744 vpids
= &res
->std_inq_data
.vpids
;
2745 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2753 if (matches
== IPR_PROD_ID_LEN
)
2761 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2762 * @ioa_cfg: ioa config struct
2764 * @bus_width: bus width
2767 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2768 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2769 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2770 * max 160MHz = max 320MB/sec).
2772 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2774 struct ipr_resource_entry
*res
;
2775 const struct ipr_ses_table_entry
*ste
;
2776 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2778 /* Loop through each config table entry in the config table buffer */
2779 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2780 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2783 if (bus
!= res
->bus
)
2786 if (!(ste
= ipr_find_ses_entry(res
)))
2789 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2792 return max_xfer_rate
;
2796 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2797 * @ioa_cfg: ioa config struct
2798 * @max_delay: max delay in micro-seconds to wait
2800 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2803 * 0 on success / other on failure
2805 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2807 volatile u32 pcii_reg
;
2810 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2811 while (delay
< max_delay
) {
2812 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2814 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2817 /* udelay cannot be used if delay is more than a few milliseconds */
2818 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2819 mdelay(delay
/ 1000);
2829 * ipr_get_sis64_dump_data_section - Dump IOA memory
2830 * @ioa_cfg: ioa config struct
2831 * @start_addr: adapter address to dump
2832 * @dest: destination kernel buffer
2833 * @length_in_words: length to dump in 4 byte words
2838 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2840 __be32
*dest
, u32 length_in_words
)
2844 for (i
= 0; i
< length_in_words
; i
++) {
2845 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2846 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2854 * ipr_get_ldump_data_section - Dump IOA memory
2855 * @ioa_cfg: ioa config struct
2856 * @start_addr: adapter address to dump
2857 * @dest: destination kernel buffer
2858 * @length_in_words: length to dump in 4 byte words
2861 * 0 on success / -EIO on failure
2863 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2865 __be32
*dest
, u32 length_in_words
)
2867 volatile u32 temp_pcii_reg
;
2871 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2872 dest
, length_in_words
);
2874 /* Write IOA interrupt reg starting LDUMP state */
2875 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2876 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2878 /* Wait for IO debug acknowledge */
2879 if (ipr_wait_iodbg_ack(ioa_cfg
,
2880 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2881 dev_err(&ioa_cfg
->pdev
->dev
,
2882 "IOA dump long data transfer timeout\n");
2886 /* Signal LDUMP interlocked - clear IO debug ack */
2887 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2888 ioa_cfg
->regs
.clr_interrupt_reg
);
2890 /* Write Mailbox with starting address */
2891 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2893 /* Signal address valid - clear IOA Reset alert */
2894 writel(IPR_UPROCI_RESET_ALERT
,
2895 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2897 for (i
= 0; i
< length_in_words
; i
++) {
2898 /* Wait for IO debug acknowledge */
2899 if (ipr_wait_iodbg_ack(ioa_cfg
,
2900 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2901 dev_err(&ioa_cfg
->pdev
->dev
,
2902 "IOA dump short data transfer timeout\n");
2906 /* Read data from mailbox and increment destination pointer */
2907 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2910 /* For all but the last word of data, signal data received */
2911 if (i
< (length_in_words
- 1)) {
2912 /* Signal dump data received - Clear IO debug Ack */
2913 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2914 ioa_cfg
->regs
.clr_interrupt_reg
);
2918 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2919 writel(IPR_UPROCI_RESET_ALERT
,
2920 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2922 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2923 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2925 /* Signal dump data received - Clear IO debug Ack */
2926 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2927 ioa_cfg
->regs
.clr_interrupt_reg
);
2929 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2930 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2932 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2934 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2944 #ifdef CONFIG_SCSI_IPR_DUMP
2946 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2947 * @ioa_cfg: ioa config struct
2948 * @pci_address: adapter address
2949 * @length: length of data to copy
2951 * Copy data from PCI adapter to kernel buffer.
2952 * Note: length MUST be a 4 byte multiple
2954 * 0 on success / other on failure
2956 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2957 unsigned long pci_address
, u32 length
)
2959 int bytes_copied
= 0;
2960 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2962 unsigned long lock_flags
= 0;
2963 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2966 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2968 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2970 while (bytes_copied
< length
&&
2971 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2972 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2973 ioa_dump
->page_offset
== 0) {
2974 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
2978 return bytes_copied
;
2981 ioa_dump
->page_offset
= 0;
2982 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
2983 ioa_dump
->next_page_index
++;
2985 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
2987 rem_len
= length
- bytes_copied
;
2988 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
2989 cur_len
= min(rem_len
, rem_page_len
);
2991 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2992 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
2995 rc
= ipr_get_ldump_data_section(ioa_cfg
,
2996 pci_address
+ bytes_copied
,
2997 &page
[ioa_dump
->page_offset
/ 4],
2998 (cur_len
/ sizeof(u32
)));
3000 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3003 ioa_dump
->page_offset
+= cur_len
;
3004 bytes_copied
+= cur_len
;
3012 return bytes_copied
;
3016 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3017 * @hdr: dump entry header struct
3022 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
3024 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3026 hdr
->offset
= sizeof(*hdr
);
3027 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
3031 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3032 * @ioa_cfg: ioa config struct
3033 * @driver_dump: driver dump struct
3038 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
3039 struct ipr_driver_dump
*driver_dump
)
3041 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3043 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
3044 driver_dump
->ioa_type_entry
.hdr
.len
=
3045 sizeof(struct ipr_dump_ioa_type_entry
) -
3046 sizeof(struct ipr_dump_entry_header
);
3047 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3048 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
3049 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
3050 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
3051 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
3052 ucode_vpd
->minor_release
[1];
3053 driver_dump
->hdr
.num_entries
++;
3057 * ipr_dump_version_data - Fill in the driver version in the dump.
3058 * @ioa_cfg: ioa config struct
3059 * @driver_dump: driver dump struct
3064 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
3065 struct ipr_driver_dump
*driver_dump
)
3067 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
3068 driver_dump
->version_entry
.hdr
.len
=
3069 sizeof(struct ipr_dump_version_entry
) -
3070 sizeof(struct ipr_dump_entry_header
);
3071 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3072 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
3073 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
3074 driver_dump
->hdr
.num_entries
++;
3078 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3079 * @ioa_cfg: ioa config struct
3080 * @driver_dump: driver dump struct
3085 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
3086 struct ipr_driver_dump
*driver_dump
)
3088 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
3089 driver_dump
->trace_entry
.hdr
.len
=
3090 sizeof(struct ipr_dump_trace_entry
) -
3091 sizeof(struct ipr_dump_entry_header
);
3092 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3093 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
3094 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
3095 driver_dump
->hdr
.num_entries
++;
3099 * ipr_dump_location_data - Fill in the IOA location in the dump.
3100 * @ioa_cfg: ioa config struct
3101 * @driver_dump: driver dump struct
3106 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
3107 struct ipr_driver_dump
*driver_dump
)
3109 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
3110 driver_dump
->location_entry
.hdr
.len
=
3111 sizeof(struct ipr_dump_location_entry
) -
3112 sizeof(struct ipr_dump_entry_header
);
3113 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3114 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
3115 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
3116 driver_dump
->hdr
.num_entries
++;
3120 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3121 * @ioa_cfg: ioa config struct
3122 * @dump: dump struct
3127 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
3129 unsigned long start_addr
, sdt_word
;
3130 unsigned long lock_flags
= 0;
3131 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
3132 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
3133 u32 num_entries
, max_num_entries
, start_off
, end_off
;
3134 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
3135 struct ipr_sdt
*sdt
;
3141 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3143 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
3144 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3148 if (ioa_cfg
->sis64
) {
3149 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3150 ssleep(IPR_DUMP_DELAY_SECONDS
);
3151 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3154 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3156 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3157 dev_err(&ioa_cfg
->pdev
->dev
,
3158 "Invalid dump table format: %lx\n", start_addr
);
3159 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3163 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3165 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3167 /* Initialize the overall dump header */
3168 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3169 driver_dump
->hdr
.num_entries
= 1;
3170 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3171 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3172 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3173 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3175 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3176 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3177 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3178 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3180 /* Update dump_header */
3181 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3183 /* IOA Dump entry */
3184 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3185 ioa_dump
->hdr
.len
= 0;
3186 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3187 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3189 /* First entries in sdt are actually a list of dump addresses and
3190 lengths to gather the real dump data. sdt represents the pointer
3191 to the ioa generated dump table. Dump data will be extracted based
3192 on entries in this table */
3193 sdt
= &ioa_dump
->sdt
;
3195 if (ioa_cfg
->sis64
) {
3196 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3197 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3199 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3200 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3203 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3204 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3205 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3206 bytes_to_copy
/ sizeof(__be32
));
3208 /* Smart Dump table is ready to use and the first entry is valid */
3209 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3210 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3211 dev_err(&ioa_cfg
->pdev
->dev
,
3212 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3213 rc
, be32_to_cpu(sdt
->hdr
.state
));
3214 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3215 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3216 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3220 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3222 if (num_entries
> max_num_entries
)
3223 num_entries
= max_num_entries
;
3225 /* Update dump length to the actual data to be copied */
3226 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3228 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3230 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3232 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3234 for (i
= 0; i
< num_entries
; i
++) {
3235 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3236 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3240 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3241 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3243 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3245 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3246 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3248 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3249 bytes_to_copy
= end_off
- start_off
;
3254 if (bytes_to_copy
> max_dump_size
) {
3255 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3259 /* Copy data from adapter to driver buffers */
3260 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3263 ioa_dump
->hdr
.len
+= bytes_copied
;
3265 if (bytes_copied
!= bytes_to_copy
) {
3266 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3273 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3275 /* Update dump_header */
3276 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3278 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3283 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3287 * ipr_release_dump - Free adapter dump memory
3288 * @kref: kref struct
3293 static void ipr_release_dump(struct kref
*kref
)
3295 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3296 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3297 unsigned long lock_flags
= 0;
3301 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3302 ioa_cfg
->dump
= NULL
;
3303 ioa_cfg
->sdt_state
= INACTIVE
;
3304 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3306 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3307 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3309 vfree(dump
->ioa_dump
.ioa_data
);
3315 * ipr_worker_thread - Worker thread
3316 * @work: ioa config struct
3318 * Called at task level from a work thread. This function takes care
3319 * of adding and removing device from the mid-layer as configuration
3320 * changes are detected by the adapter.
3325 static void ipr_worker_thread(struct work_struct
*work
)
3327 unsigned long lock_flags
;
3328 struct ipr_resource_entry
*res
;
3329 struct scsi_device
*sdev
;
3330 struct ipr_dump
*dump
;
3331 struct ipr_ioa_cfg
*ioa_cfg
=
3332 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3333 u8 bus
, target
, lun
;
3337 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3339 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3340 dump
= ioa_cfg
->dump
;
3342 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3345 kref_get(&dump
->kref
);
3346 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3347 ipr_get_ioa_dump(ioa_cfg
, dump
);
3348 kref_put(&dump
->kref
, ipr_release_dump
);
3350 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3351 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3352 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3353 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3357 if (ioa_cfg
->scsi_unblock
) {
3358 ioa_cfg
->scsi_unblock
= 0;
3359 ioa_cfg
->scsi_blocked
= 0;
3360 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3361 scsi_unblock_requests(ioa_cfg
->host
);
3362 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3363 if (ioa_cfg
->scsi_blocked
)
3364 scsi_block_requests(ioa_cfg
->host
);
3367 if (!ioa_cfg
->scan_enabled
) {
3368 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3375 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
3376 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3380 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3381 if (res
->del_from_ml
&& res
->sdev
) {
3384 if (!scsi_device_get(sdev
)) {
3385 if (!res
->add_to_ml
)
3386 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3388 res
->del_from_ml
= 0;
3389 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3390 scsi_remove_device(sdev
);
3391 scsi_device_put(sdev
);
3392 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3399 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3400 if (res
->add_to_ml
) {
3402 target
= res
->target
;
3405 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3406 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3407 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3412 ioa_cfg
->scan_done
= 1;
3413 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3414 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3418 #ifdef CONFIG_SCSI_IPR_TRACE
3420 * ipr_read_trace - Dump the adapter trace
3421 * @filp: open sysfs file
3422 * @kobj: kobject struct
3423 * @bin_attr: bin_attribute struct
3426 * @count: buffer size
3429 * number of bytes printed to buffer
3431 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3432 struct bin_attribute
*bin_attr
,
3433 char *buf
, loff_t off
, size_t count
)
3435 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3436 struct Scsi_Host
*shost
= class_to_shost(dev
);
3437 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3438 unsigned long lock_flags
= 0;
3441 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3442 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3444 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3449 static struct bin_attribute ipr_trace_attr
= {
3455 .read
= ipr_read_trace
,
3460 * ipr_show_fw_version - Show the firmware version
3461 * @dev: class device struct
3465 * number of bytes printed to buffer
3467 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3468 struct device_attribute
*attr
, char *buf
)
3470 struct Scsi_Host
*shost
= class_to_shost(dev
);
3471 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3472 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3473 unsigned long lock_flags
= 0;
3476 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3477 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3478 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3479 ucode_vpd
->minor_release
[0],
3480 ucode_vpd
->minor_release
[1]);
3481 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3485 static struct device_attribute ipr_fw_version_attr
= {
3487 .name
= "fw_version",
3490 .show
= ipr_show_fw_version
,
3494 * ipr_show_log_level - Show the adapter's error logging level
3495 * @dev: class device struct
3499 * number of bytes printed to buffer
3501 static ssize_t
ipr_show_log_level(struct device
*dev
,
3502 struct device_attribute
*attr
, char *buf
)
3504 struct Scsi_Host
*shost
= class_to_shost(dev
);
3505 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3506 unsigned long lock_flags
= 0;
3509 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3510 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3511 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3516 * ipr_store_log_level - Change the adapter's error logging level
3517 * @dev: class device struct
3521 * number of bytes printed to buffer
3523 static ssize_t
ipr_store_log_level(struct device
*dev
,
3524 struct device_attribute
*attr
,
3525 const char *buf
, size_t count
)
3527 struct Scsi_Host
*shost
= class_to_shost(dev
);
3528 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3529 unsigned long lock_flags
= 0;
3531 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3532 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3533 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3537 static struct device_attribute ipr_log_level_attr
= {
3539 .name
= "log_level",
3540 .mode
= S_IRUGO
| S_IWUSR
,
3542 .show
= ipr_show_log_level
,
3543 .store
= ipr_store_log_level
3547 * ipr_store_diagnostics - IOA Diagnostics interface
3548 * @dev: device struct
3550 * @count: buffer size
3552 * This function will reset the adapter and wait a reasonable
3553 * amount of time for any errors that the adapter might log.
3556 * count on success / other on failure
3558 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3559 struct device_attribute
*attr
,
3560 const char *buf
, size_t count
)
3562 struct Scsi_Host
*shost
= class_to_shost(dev
);
3563 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3564 unsigned long lock_flags
= 0;
3567 if (!capable(CAP_SYS_ADMIN
))
3570 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3571 while (ioa_cfg
->in_reset_reload
) {
3572 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3573 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3574 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3577 ioa_cfg
->errors_logged
= 0;
3578 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3580 if (ioa_cfg
->in_reset_reload
) {
3581 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3582 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3584 /* Wait for a second for any errors to be logged */
3587 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3591 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3592 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3594 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3599 static struct device_attribute ipr_diagnostics_attr
= {
3601 .name
= "run_diagnostics",
3604 .store
= ipr_store_diagnostics
3608 * ipr_show_adapter_state - Show the adapter's state
3609 * @class_dev: device struct
3613 * number of bytes printed to buffer
3615 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3616 struct device_attribute
*attr
, char *buf
)
3618 struct Scsi_Host
*shost
= class_to_shost(dev
);
3619 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3620 unsigned long lock_flags
= 0;
3623 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3624 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3625 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3627 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3628 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3633 * ipr_store_adapter_state - Change adapter state
3634 * @dev: device struct
3636 * @count: buffer size
3638 * This function will change the adapter's state.
3641 * count on success / other on failure
3643 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3644 struct device_attribute
*attr
,
3645 const char *buf
, size_t count
)
3647 struct Scsi_Host
*shost
= class_to_shost(dev
);
3648 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3649 unsigned long lock_flags
;
3650 int result
= count
, i
;
3652 if (!capable(CAP_SYS_ADMIN
))
3655 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3656 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3657 !strncmp(buf
, "online", 6)) {
3658 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3659 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3660 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3661 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3664 ioa_cfg
->reset_retries
= 0;
3665 ioa_cfg
->in_ioa_bringdown
= 0;
3666 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3668 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3669 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3674 static struct device_attribute ipr_ioa_state_attr
= {
3676 .name
= "online_state",
3677 .mode
= S_IRUGO
| S_IWUSR
,
3679 .show
= ipr_show_adapter_state
,
3680 .store
= ipr_store_adapter_state
3684 * ipr_store_reset_adapter - Reset the adapter
3685 * @dev: device struct
3687 * @count: buffer size
3689 * This function will reset the adapter.
3692 * count on success / other on failure
3694 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3695 struct device_attribute
*attr
,
3696 const char *buf
, size_t count
)
3698 struct Scsi_Host
*shost
= class_to_shost(dev
);
3699 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3700 unsigned long lock_flags
;
3703 if (!capable(CAP_SYS_ADMIN
))
3706 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3707 if (!ioa_cfg
->in_reset_reload
)
3708 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3709 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3710 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3715 static struct device_attribute ipr_ioa_reset_attr
= {
3717 .name
= "reset_host",
3720 .store
= ipr_store_reset_adapter
3723 static int ipr_iopoll(struct irq_poll
*iop
, int budget
);
3725 * ipr_show_iopoll_weight - Show ipr polling mode
3726 * @dev: class device struct
3730 * number of bytes printed to buffer
3732 static ssize_t
ipr_show_iopoll_weight(struct device
*dev
,
3733 struct device_attribute
*attr
, char *buf
)
3735 struct Scsi_Host
*shost
= class_to_shost(dev
);
3736 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3737 unsigned long lock_flags
= 0;
3740 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3741 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->iopoll_weight
);
3742 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3748 * ipr_store_iopoll_weight - Change the adapter's polling mode
3749 * @dev: class device struct
3753 * number of bytes printed to buffer
3755 static ssize_t
ipr_store_iopoll_weight(struct device
*dev
,
3756 struct device_attribute
*attr
,
3757 const char *buf
, size_t count
)
3759 struct Scsi_Host
*shost
= class_to_shost(dev
);
3760 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3761 unsigned long user_iopoll_weight
;
3762 unsigned long lock_flags
= 0;
3765 if (!ioa_cfg
->sis64
) {
3766 dev_info(&ioa_cfg
->pdev
->dev
, "irq_poll not supported on this adapter\n");
3769 if (kstrtoul(buf
, 10, &user_iopoll_weight
))
3772 if (user_iopoll_weight
> 256) {
3773 dev_info(&ioa_cfg
->pdev
->dev
, "Invalid irq_poll weight. It must be less than 256\n");
3777 if (user_iopoll_weight
== ioa_cfg
->iopoll_weight
) {
3778 dev_info(&ioa_cfg
->pdev
->dev
, "Current irq_poll weight has the same weight\n");
3782 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3783 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
3784 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
3787 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3788 ioa_cfg
->iopoll_weight
= user_iopoll_weight
;
3789 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3790 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
3791 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
3792 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
3795 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3800 static struct device_attribute ipr_iopoll_weight_attr
= {
3802 .name
= "iopoll_weight",
3803 .mode
= S_IRUGO
| S_IWUSR
,
3805 .show
= ipr_show_iopoll_weight
,
3806 .store
= ipr_store_iopoll_weight
3810 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3811 * @buf_len: buffer length
3813 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3814 * list to use for microcode download
3817 * pointer to sglist / NULL on failure
3819 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3822 struct ipr_sglist
*sglist
;
3824 /* Get the minimum size per scatter/gather element */
3825 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3827 /* Get the actual size per element */
3828 order
= get_order(sg_size
);
3830 /* Allocate a scatter/gather list for the DMA */
3831 sglist
= kzalloc(sizeof(struct ipr_sglist
), GFP_KERNEL
);
3832 if (sglist
== NULL
) {
3836 sglist
->order
= order
;
3837 sglist
->scatterlist
= sgl_alloc_order(buf_len
, order
, false, GFP_KERNEL
,
3839 if (!sglist
->scatterlist
) {
3848 * ipr_free_ucode_buffer - Frees a microcode download buffer
3849 * @p_dnld: scatter/gather list pointer
3851 * Free a DMA'able ucode download buffer previously allocated with
3852 * ipr_alloc_ucode_buffer
3857 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3859 sgl_free_order(sglist
->scatterlist
, sglist
->order
);
3864 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3865 * @sglist: scatter/gather list pointer
3866 * @buffer: buffer pointer
3867 * @len: buffer length
3869 * Copy a microcode image from a user buffer into a buffer allocated by
3870 * ipr_alloc_ucode_buffer
3873 * 0 on success / other on failure
3875 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3876 u8
*buffer
, u32 len
)
3878 int bsize_elem
, i
, result
= 0;
3879 struct scatterlist
*scatterlist
;
3882 /* Determine the actual number of bytes per element */
3883 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3885 scatterlist
= sglist
->scatterlist
;
3887 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3888 struct page
*page
= sg_page(&scatterlist
[i
]);
3891 memcpy(kaddr
, buffer
, bsize_elem
);
3894 scatterlist
[i
].length
= bsize_elem
;
3902 if (len
% bsize_elem
) {
3903 struct page
*page
= sg_page(&scatterlist
[i
]);
3906 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3909 scatterlist
[i
].length
= len
% bsize_elem
;
3912 sglist
->buffer_len
= len
;
3917 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3918 * @ipr_cmd: ipr command struct
3919 * @sglist: scatter/gather list
3921 * Builds a microcode download IOA data list (IOADL).
3924 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3925 struct ipr_sglist
*sglist
)
3927 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3928 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3929 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3932 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3933 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3934 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3937 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3938 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3939 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3940 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3941 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3944 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3948 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3949 * @ipr_cmd: ipr command struct
3950 * @sglist: scatter/gather list
3952 * Builds a microcode download IOA data list (IOADL).
3955 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3956 struct ipr_sglist
*sglist
)
3958 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3959 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3960 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3963 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3964 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3965 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3968 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3970 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3971 ioadl
[i
].flags_and_data_len
=
3972 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3974 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
3977 ioadl
[i
-1].flags_and_data_len
|=
3978 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3982 * ipr_update_ioa_ucode - Update IOA's microcode
3983 * @ioa_cfg: ioa config struct
3984 * @sglist: scatter/gather list
3986 * Initiate an adapter reset to update the IOA's microcode
3989 * 0 on success / -EIO on failure
3991 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
3992 struct ipr_sglist
*sglist
)
3994 unsigned long lock_flags
;
3996 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3997 while (ioa_cfg
->in_reset_reload
) {
3998 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3999 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4000 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4003 if (ioa_cfg
->ucode_sglist
) {
4004 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4005 dev_err(&ioa_cfg
->pdev
->dev
,
4006 "Microcode download already in progress\n");
4010 sglist
->num_dma_sg
= dma_map_sg(&ioa_cfg
->pdev
->dev
,
4011 sglist
->scatterlist
, sglist
->num_sg
,
4014 if (!sglist
->num_dma_sg
) {
4015 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4016 dev_err(&ioa_cfg
->pdev
->dev
,
4017 "Failed to map microcode download buffer!\n");
4021 ioa_cfg
->ucode_sglist
= sglist
;
4022 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
4023 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4024 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4026 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4027 ioa_cfg
->ucode_sglist
= NULL
;
4028 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4033 * ipr_store_update_fw - Update the firmware on the adapter
4034 * @class_dev: device struct
4036 * @count: buffer size
4038 * This function will update the firmware on the adapter.
4041 * count on success / other on failure
4043 static ssize_t
ipr_store_update_fw(struct device
*dev
,
4044 struct device_attribute
*attr
,
4045 const char *buf
, size_t count
)
4047 struct Scsi_Host
*shost
= class_to_shost(dev
);
4048 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4049 struct ipr_ucode_image_header
*image_hdr
;
4050 const struct firmware
*fw_entry
;
4051 struct ipr_sglist
*sglist
;
4055 int result
, dnld_size
;
4057 if (!capable(CAP_SYS_ADMIN
))
4060 snprintf(fname
, sizeof(fname
), "%s", buf
);
4062 endline
= strchr(fname
, '\n');
4066 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
4067 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
4071 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
4073 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
4074 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
4075 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
4078 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
4079 release_firmware(fw_entry
);
4083 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
4086 dev_err(&ioa_cfg
->pdev
->dev
,
4087 "Microcode buffer copy to DMA buffer failed\n");
4091 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4093 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
4098 ipr_free_ucode_buffer(sglist
);
4099 release_firmware(fw_entry
);
4103 static struct device_attribute ipr_update_fw_attr
= {
4105 .name
= "update_fw",
4108 .store
= ipr_store_update_fw
4112 * ipr_show_fw_type - Show the adapter's firmware type.
4113 * @dev: class device struct
4117 * number of bytes printed to buffer
4119 static ssize_t
ipr_show_fw_type(struct device
*dev
,
4120 struct device_attribute
*attr
, char *buf
)
4122 struct Scsi_Host
*shost
= class_to_shost(dev
);
4123 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4124 unsigned long lock_flags
= 0;
4127 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4128 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
4129 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4133 static struct device_attribute ipr_ioa_fw_type_attr
= {
4138 .show
= ipr_show_fw_type
4141 static ssize_t
ipr_read_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4142 struct bin_attribute
*bin_attr
, char *buf
,
4143 loff_t off
, size_t count
)
4145 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4146 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4147 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4148 struct ipr_hostrcb
*hostrcb
;
4149 unsigned long lock_flags
= 0;
4152 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4153 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4154 struct ipr_hostrcb
, queue
);
4156 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4159 ret
= memory_read_from_buffer(buf
, count
, &off
, &hostrcb
->hcam
,
4160 sizeof(hostrcb
->hcam
));
4161 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4165 static ssize_t
ipr_next_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4166 struct bin_attribute
*bin_attr
, char *buf
,
4167 loff_t off
, size_t count
)
4169 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4170 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4171 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4172 struct ipr_hostrcb
*hostrcb
;
4173 unsigned long lock_flags
= 0;
4175 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4176 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4177 struct ipr_hostrcb
, queue
);
4179 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4183 /* Reclaim hostrcb before exit */
4184 list_move_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
4185 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4189 static struct bin_attribute ipr_ioa_async_err_log
= {
4191 .name
= "async_err_log",
4192 .mode
= S_IRUGO
| S_IWUSR
,
4195 .read
= ipr_read_async_err_log
,
4196 .write
= ipr_next_async_err_log
4199 static struct device_attribute
*ipr_ioa_attrs
[] = {
4200 &ipr_fw_version_attr
,
4201 &ipr_log_level_attr
,
4202 &ipr_diagnostics_attr
,
4203 &ipr_ioa_state_attr
,
4204 &ipr_ioa_reset_attr
,
4205 &ipr_update_fw_attr
,
4206 &ipr_ioa_fw_type_attr
,
4207 &ipr_iopoll_weight_attr
,
4211 #ifdef CONFIG_SCSI_IPR_DUMP
4213 * ipr_read_dump - Dump the adapter
4214 * @filp: open sysfs file
4215 * @kobj: kobject struct
4216 * @bin_attr: bin_attribute struct
4219 * @count: buffer size
4222 * number of bytes printed to buffer
4224 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
4225 struct bin_attribute
*bin_attr
,
4226 char *buf
, loff_t off
, size_t count
)
4228 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4229 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4230 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4231 struct ipr_dump
*dump
;
4232 unsigned long lock_flags
= 0;
4237 if (!capable(CAP_SYS_ADMIN
))
4240 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4241 dump
= ioa_cfg
->dump
;
4243 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
4244 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4247 kref_get(&dump
->kref
);
4248 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4250 if (off
> dump
->driver_dump
.hdr
.len
) {
4251 kref_put(&dump
->kref
, ipr_release_dump
);
4255 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
4256 count
= dump
->driver_dump
.hdr
.len
- off
;
4260 if (count
&& off
< sizeof(dump
->driver_dump
)) {
4261 if (off
+ count
> sizeof(dump
->driver_dump
))
4262 len
= sizeof(dump
->driver_dump
) - off
;
4265 src
= (u8
*)&dump
->driver_dump
+ off
;
4266 memcpy(buf
, src
, len
);
4272 off
-= sizeof(dump
->driver_dump
);
4275 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4276 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4277 sizeof(struct ipr_sdt_entry
));
4279 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4280 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4282 if (count
&& off
< sdt_end
) {
4283 if (off
+ count
> sdt_end
)
4284 len
= sdt_end
- off
;
4287 src
= (u8
*)&dump
->ioa_dump
+ off
;
4288 memcpy(buf
, src
, len
);
4297 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4298 len
= PAGE_ALIGN(off
) - off
;
4301 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4302 src
+= off
& ~PAGE_MASK
;
4303 memcpy(buf
, src
, len
);
4309 kref_put(&dump
->kref
, ipr_release_dump
);
4314 * ipr_alloc_dump - Prepare for adapter dump
4315 * @ioa_cfg: ioa config struct
4318 * 0 on success / other on failure
4320 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4322 struct ipr_dump
*dump
;
4324 unsigned long lock_flags
= 0;
4326 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4329 ipr_err("Dump memory allocation failed\n");
4334 ioa_data
= vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES
,
4337 ioa_data
= vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES
,
4341 ipr_err("Dump memory allocation failed\n");
4346 dump
->ioa_dump
.ioa_data
= ioa_data
;
4348 kref_init(&dump
->kref
);
4349 dump
->ioa_cfg
= ioa_cfg
;
4351 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4353 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4354 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4355 vfree(dump
->ioa_dump
.ioa_data
);
4360 ioa_cfg
->dump
= dump
;
4361 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4362 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4363 ioa_cfg
->dump_taken
= 1;
4364 schedule_work(&ioa_cfg
->work_q
);
4366 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4372 * ipr_free_dump - Free adapter dump memory
4373 * @ioa_cfg: ioa config struct
4376 * 0 on success / other on failure
4378 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4380 struct ipr_dump
*dump
;
4381 unsigned long lock_flags
= 0;
4385 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4386 dump
= ioa_cfg
->dump
;
4388 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4392 ioa_cfg
->dump
= NULL
;
4393 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4395 kref_put(&dump
->kref
, ipr_release_dump
);
4402 * ipr_write_dump - Setup dump state of adapter
4403 * @filp: open sysfs file
4404 * @kobj: kobject struct
4405 * @bin_attr: bin_attribute struct
4408 * @count: buffer size
4411 * number of bytes printed to buffer
4413 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4414 struct bin_attribute
*bin_attr
,
4415 char *buf
, loff_t off
, size_t count
)
4417 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4418 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4419 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4422 if (!capable(CAP_SYS_ADMIN
))
4426 rc
= ipr_alloc_dump(ioa_cfg
);
4427 else if (buf
[0] == '0')
4428 rc
= ipr_free_dump(ioa_cfg
);
4438 static struct bin_attribute ipr_dump_attr
= {
4441 .mode
= S_IRUSR
| S_IWUSR
,
4444 .read
= ipr_read_dump
,
4445 .write
= ipr_write_dump
4448 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4452 * ipr_change_queue_depth - Change the device's queue depth
4453 * @sdev: scsi device struct
4454 * @qdepth: depth to set
4455 * @reason: calling context
4460 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
4462 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4463 struct ipr_resource_entry
*res
;
4464 unsigned long lock_flags
= 0;
4466 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4467 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4469 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4470 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4471 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4473 scsi_change_queue_depth(sdev
, qdepth
);
4474 return sdev
->queue_depth
;
4478 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4479 * @dev: device struct
4480 * @attr: device attribute structure
4484 * number of bytes printed to buffer
4486 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4488 struct scsi_device
*sdev
= to_scsi_device(dev
);
4489 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4490 struct ipr_resource_entry
*res
;
4491 unsigned long lock_flags
= 0;
4492 ssize_t len
= -ENXIO
;
4494 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4495 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4497 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4498 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4502 static struct device_attribute ipr_adapter_handle_attr
= {
4504 .name
= "adapter_handle",
4507 .show
= ipr_show_adapter_handle
4511 * ipr_show_resource_path - Show the resource path or the resource address for
4513 * @dev: device struct
4514 * @attr: device attribute structure
4518 * number of bytes printed to buffer
4520 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4522 struct scsi_device
*sdev
= to_scsi_device(dev
);
4523 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4524 struct ipr_resource_entry
*res
;
4525 unsigned long lock_flags
= 0;
4526 ssize_t len
= -ENXIO
;
4527 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4529 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4530 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4531 if (res
&& ioa_cfg
->sis64
)
4532 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4533 __ipr_format_res_path(res
->res_path
, buffer
,
4536 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4537 res
->bus
, res
->target
, res
->lun
);
4539 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4543 static struct device_attribute ipr_resource_path_attr
= {
4545 .name
= "resource_path",
4548 .show
= ipr_show_resource_path
4552 * ipr_show_device_id - Show the device_id for this device.
4553 * @dev: device struct
4554 * @attr: device attribute structure
4558 * number of bytes printed to buffer
4560 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4562 struct scsi_device
*sdev
= to_scsi_device(dev
);
4563 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4564 struct ipr_resource_entry
*res
;
4565 unsigned long lock_flags
= 0;
4566 ssize_t len
= -ENXIO
;
4568 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4569 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4570 if (res
&& ioa_cfg
->sis64
)
4571 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", be64_to_cpu(res
->dev_id
));
4573 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4575 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4579 static struct device_attribute ipr_device_id_attr
= {
4581 .name
= "device_id",
4584 .show
= ipr_show_device_id
4588 * ipr_show_resource_type - Show the resource type for this device.
4589 * @dev: device struct
4590 * @attr: device attribute structure
4594 * number of bytes printed to buffer
4596 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4598 struct scsi_device
*sdev
= to_scsi_device(dev
);
4599 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4600 struct ipr_resource_entry
*res
;
4601 unsigned long lock_flags
= 0;
4602 ssize_t len
= -ENXIO
;
4604 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4605 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4608 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4610 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4614 static struct device_attribute ipr_resource_type_attr
= {
4616 .name
= "resource_type",
4619 .show
= ipr_show_resource_type
4623 * ipr_show_raw_mode - Show the adapter's raw mode
4624 * @dev: class device struct
4628 * number of bytes printed to buffer
4630 static ssize_t
ipr_show_raw_mode(struct device
*dev
,
4631 struct device_attribute
*attr
, char *buf
)
4633 struct scsi_device
*sdev
= to_scsi_device(dev
);
4634 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4635 struct ipr_resource_entry
*res
;
4636 unsigned long lock_flags
= 0;
4639 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4640 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4642 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", res
->raw_mode
);
4645 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4650 * ipr_store_raw_mode - Change the adapter's raw mode
4651 * @dev: class device struct
4655 * number of bytes printed to buffer
4657 static ssize_t
ipr_store_raw_mode(struct device
*dev
,
4658 struct device_attribute
*attr
,
4659 const char *buf
, size_t count
)
4661 struct scsi_device
*sdev
= to_scsi_device(dev
);
4662 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4663 struct ipr_resource_entry
*res
;
4664 unsigned long lock_flags
= 0;
4667 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4668 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4670 if (ipr_is_af_dasd_device(res
)) {
4671 res
->raw_mode
= simple_strtoul(buf
, NULL
, 10);
4674 sdev_printk(KERN_INFO
, res
->sdev
, "raw mode is %s\n",
4675 res
->raw_mode
? "enabled" : "disabled");
4680 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4684 static struct device_attribute ipr_raw_mode_attr
= {
4687 .mode
= S_IRUGO
| S_IWUSR
,
4689 .show
= ipr_show_raw_mode
,
4690 .store
= ipr_store_raw_mode
4693 static struct device_attribute
*ipr_dev_attrs
[] = {
4694 &ipr_adapter_handle_attr
,
4695 &ipr_resource_path_attr
,
4696 &ipr_device_id_attr
,
4697 &ipr_resource_type_attr
,
4703 * ipr_biosparam - Return the HSC mapping
4704 * @sdev: scsi device struct
4705 * @block_device: block device pointer
4706 * @capacity: capacity of the device
4707 * @parm: Array containing returned HSC values.
4709 * This function generates the HSC parms that fdisk uses.
4710 * We want to make sure we return something that places partitions
4711 * on 4k boundaries for best performance with the IOA.
4716 static int ipr_biosparam(struct scsi_device
*sdev
,
4717 struct block_device
*block_device
,
4718 sector_t capacity
, int *parm
)
4726 cylinders
= capacity
;
4727 sector_div(cylinders
, (128 * 32));
4732 parm
[2] = cylinders
;
4738 * ipr_find_starget - Find target based on bus/target.
4739 * @starget: scsi target struct
4742 * resource entry pointer if found / NULL if not found
4744 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4746 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4747 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4748 struct ipr_resource_entry
*res
;
4750 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4751 if ((res
->bus
== starget
->channel
) &&
4752 (res
->target
== starget
->id
)) {
4760 static struct ata_port_info sata_port_info
;
4763 * ipr_target_alloc - Prepare for commands to a SCSI target
4764 * @starget: scsi target struct
4766 * If the device is a SATA device, this function allocates an
4767 * ATA port with libata, else it does nothing.
4770 * 0 on success / non-0 on failure
4772 static int ipr_target_alloc(struct scsi_target
*starget
)
4774 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4775 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4776 struct ipr_sata_port
*sata_port
;
4777 struct ata_port
*ap
;
4778 struct ipr_resource_entry
*res
;
4779 unsigned long lock_flags
;
4781 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4782 res
= ipr_find_starget(starget
);
4783 starget
->hostdata
= NULL
;
4785 if (res
&& ipr_is_gata(res
)) {
4786 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4787 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4791 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4793 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4794 sata_port
->ioa_cfg
= ioa_cfg
;
4796 sata_port
->res
= res
;
4798 res
->sata_port
= sata_port
;
4799 ap
->private_data
= sata_port
;
4800 starget
->hostdata
= sata_port
;
4806 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4812 * ipr_target_destroy - Destroy a SCSI target
4813 * @starget: scsi target struct
4815 * If the device was a SATA device, this function frees the libata
4816 * ATA port, else it does nothing.
4819 static void ipr_target_destroy(struct scsi_target
*starget
)
4821 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4822 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4823 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4825 if (ioa_cfg
->sis64
) {
4826 if (!ipr_find_starget(starget
)) {
4827 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4828 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4829 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4830 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4831 else if (starget
->channel
== 0)
4832 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4837 starget
->hostdata
= NULL
;
4838 ata_sas_port_destroy(sata_port
->ap
);
4844 * ipr_find_sdev - Find device based on bus/target/lun.
4845 * @sdev: scsi device struct
4848 * resource entry pointer if found / NULL if not found
4850 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4852 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4853 struct ipr_resource_entry
*res
;
4855 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4856 if ((res
->bus
== sdev
->channel
) &&
4857 (res
->target
== sdev
->id
) &&
4858 (res
->lun
== sdev
->lun
))
4866 * ipr_slave_destroy - Unconfigure a SCSI device
4867 * @sdev: scsi device struct
4872 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4874 struct ipr_resource_entry
*res
;
4875 struct ipr_ioa_cfg
*ioa_cfg
;
4876 unsigned long lock_flags
= 0;
4878 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4880 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4881 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4884 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4885 sdev
->hostdata
= NULL
;
4887 res
->sata_port
= NULL
;
4889 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4893 * ipr_slave_configure - Configure a SCSI device
4894 * @sdev: scsi device struct
4896 * This function configures the specified scsi device.
4901 static int ipr_slave_configure(struct scsi_device
*sdev
)
4903 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4904 struct ipr_resource_entry
*res
;
4905 struct ata_port
*ap
= NULL
;
4906 unsigned long lock_flags
= 0;
4907 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4909 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4910 res
= sdev
->hostdata
;
4912 if (ipr_is_af_dasd_device(res
))
4913 sdev
->type
= TYPE_RAID
;
4914 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4915 sdev
->scsi_level
= 4;
4916 sdev
->no_uld_attach
= 1;
4918 if (ipr_is_vset_device(res
)) {
4919 sdev
->scsi_level
= SCSI_SPC_3
;
4920 sdev
->no_report_opcodes
= 1;
4921 blk_queue_rq_timeout(sdev
->request_queue
,
4922 IPR_VSET_RW_TIMEOUT
);
4923 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4925 if (ipr_is_gata(res
) && res
->sata_port
)
4926 ap
= res
->sata_port
->ap
;
4927 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4930 scsi_change_queue_depth(sdev
, IPR_MAX_CMD_PER_ATA_LUN
);
4931 ata_sas_slave_configure(sdev
, ap
);
4935 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4936 ipr_format_res_path(ioa_cfg
,
4937 res
->res_path
, buffer
, sizeof(buffer
)));
4940 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4945 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4946 * @sdev: scsi device struct
4948 * This function initializes an ATA port so that future commands
4949 * sent through queuecommand will work.
4954 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4956 struct ipr_sata_port
*sata_port
= NULL
;
4960 if (sdev
->sdev_target
)
4961 sata_port
= sdev
->sdev_target
->hostdata
;
4963 rc
= ata_sas_port_init(sata_port
->ap
);
4965 rc
= ata_sas_sync_probe(sata_port
->ap
);
4969 ipr_slave_destroy(sdev
);
4976 * ipr_slave_alloc - Prepare for commands to a device.
4977 * @sdev: scsi device struct
4979 * This function saves a pointer to the resource entry
4980 * in the scsi device struct if the device exists. We
4981 * can then use this pointer in ipr_queuecommand when
4982 * handling new commands.
4985 * 0 on success / -ENXIO if device does not exist
4987 static int ipr_slave_alloc(struct scsi_device
*sdev
)
4989 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4990 struct ipr_resource_entry
*res
;
4991 unsigned long lock_flags
;
4994 sdev
->hostdata
= NULL
;
4996 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4998 res
= ipr_find_sdev(sdev
);
5003 sdev
->hostdata
= res
;
5004 if (!ipr_is_naca_model(res
))
5005 res
->needs_sync_complete
= 1;
5007 if (ipr_is_gata(res
)) {
5008 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5009 return ipr_ata_slave_alloc(sdev
);
5013 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5019 * ipr_match_lun - Match function for specified LUN
5020 * @ipr_cmd: ipr command struct
5021 * @device: device to match (sdev)
5024 * 1 if command matches sdev / 0 if command does not match sdev
5026 static int ipr_match_lun(struct ipr_cmnd
*ipr_cmd
, void *device
)
5028 if (ipr_cmd
->scsi_cmd
&& ipr_cmd
->scsi_cmd
->device
== device
)
5034 * ipr_cmnd_is_free - Check if a command is free or not
5035 * @ipr_cmd ipr command struct
5040 static bool ipr_cmnd_is_free(struct ipr_cmnd
*ipr_cmd
)
5042 struct ipr_cmnd
*loop_cmd
;
5044 list_for_each_entry(loop_cmd
, &ipr_cmd
->hrrq
->hrrq_free_q
, queue
) {
5045 if (loop_cmd
== ipr_cmd
)
5053 * ipr_match_res - Match function for specified resource entry
5054 * @ipr_cmd: ipr command struct
5055 * @resource: resource entry to match
5058 * 1 if command matches sdev / 0 if command does not match sdev
5060 static int ipr_match_res(struct ipr_cmnd
*ipr_cmd
, void *resource
)
5062 struct ipr_resource_entry
*res
= resource
;
5064 if (res
&& ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
)
5070 * ipr_wait_for_ops - Wait for matching commands to complete
5071 * @ipr_cmd: ipr command struct
5072 * @device: device to match (sdev)
5073 * @match: match function to use
5078 static int ipr_wait_for_ops(struct ipr_ioa_cfg
*ioa_cfg
, void *device
,
5079 int (*match
)(struct ipr_cmnd
*, void *))
5081 struct ipr_cmnd
*ipr_cmd
;
5083 unsigned long flags
;
5084 struct ipr_hrr_queue
*hrrq
;
5085 signed long timeout
= IPR_ABORT_TASK_TIMEOUT
;
5086 DECLARE_COMPLETION_ONSTACK(comp
);
5092 for_each_hrrq(hrrq
, ioa_cfg
) {
5093 spin_lock_irqsave(hrrq
->lock
, flags
);
5094 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5095 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5096 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5097 if (match(ipr_cmd
, device
)) {
5098 ipr_cmd
->eh_comp
= &comp
;
5103 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5107 timeout
= wait_for_completion_timeout(&comp
, timeout
);
5112 for_each_hrrq(hrrq
, ioa_cfg
) {
5113 spin_lock_irqsave(hrrq
->lock
, flags
);
5114 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5115 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5116 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5117 if (match(ipr_cmd
, device
)) {
5118 ipr_cmd
->eh_comp
= NULL
;
5123 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5127 dev_err(&ioa_cfg
->pdev
->dev
, "Timed out waiting for aborted commands\n");
5129 return wait
? FAILED
: SUCCESS
;
5138 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
5140 struct ipr_ioa_cfg
*ioa_cfg
;
5141 unsigned long lock_flags
= 0;
5145 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5146 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5148 if (!ioa_cfg
->in_reset_reload
&& !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5149 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5150 dev_err(&ioa_cfg
->pdev
->dev
,
5151 "Adapter being reset as a result of error recovery.\n");
5153 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5154 ioa_cfg
->sdt_state
= GET_DUMP
;
5157 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5158 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5159 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5161 /* If we got hit with a host reset while we were already resetting
5162 the adapter for some reason, and the reset failed. */
5163 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5168 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5174 * ipr_device_reset - Reset the device
5175 * @ioa_cfg: ioa config struct
5176 * @res: resource entry struct
5178 * This function issues a device reset to the affected device.
5179 * If the device is a SCSI device, a LUN reset will be sent
5180 * to the device first. If that does not work, a target reset
5181 * will be sent. If the device is a SATA device, a PHY reset will
5185 * 0 on success / non-zero on failure
5187 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
5188 struct ipr_resource_entry
*res
)
5190 struct ipr_cmnd
*ipr_cmd
;
5191 struct ipr_ioarcb
*ioarcb
;
5192 struct ipr_cmd_pkt
*cmd_pkt
;
5193 struct ipr_ioarcb_ata_regs
*regs
;
5197 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5198 ioarcb
= &ipr_cmd
->ioarcb
;
5199 cmd_pkt
= &ioarcb
->cmd_pkt
;
5201 if (ipr_cmd
->ioa_cfg
->sis64
) {
5202 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
5203 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
5205 regs
= &ioarcb
->u
.add_data
.u
.regs
;
5207 ioarcb
->res_handle
= res
->res_handle
;
5208 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5209 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5210 if (ipr_is_gata(res
)) {
5211 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
5212 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
5213 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
5216 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5217 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5218 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5219 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
5220 if (ipr_cmd
->ioa_cfg
->sis64
)
5221 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
5222 sizeof(struct ipr_ioasa_gata
));
5224 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
5225 sizeof(struct ipr_ioasa_gata
));
5229 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
5233 * ipr_sata_reset - Reset the SATA port
5234 * @link: SATA link to reset
5235 * @classes: class of the attached device
5237 * This function issues a SATA phy reset to the affected ATA link.
5240 * 0 on success / non-zero on failure
5242 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
5243 unsigned long deadline
)
5245 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
5246 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
5247 struct ipr_resource_entry
*res
;
5248 unsigned long lock_flags
= 0;
5249 int rc
= -ENXIO
, ret
;
5252 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5253 while (ioa_cfg
->in_reset_reload
) {
5254 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5255 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5256 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5259 res
= sata_port
->res
;
5261 rc
= ipr_device_reset(ioa_cfg
, res
);
5262 *classes
= res
->ata_class
;
5263 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5265 ret
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5266 if (ret
!= SUCCESS
) {
5267 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5268 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5269 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5271 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5274 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5281 * ipr_eh_dev_reset - Reset the device
5282 * @scsi_cmd: scsi command struct
5284 * This function issues a device reset to the affected device.
5285 * A LUN reset will be sent to the device first. If that does
5286 * not work, a target reset will be sent.
5291 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
5293 struct ipr_cmnd
*ipr_cmd
;
5294 struct ipr_ioa_cfg
*ioa_cfg
;
5295 struct ipr_resource_entry
*res
;
5296 struct ata_port
*ap
;
5298 struct ipr_hrr_queue
*hrrq
;
5301 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5302 res
= scsi_cmd
->device
->hostdata
;
5305 * If we are currently going through reset/reload, return failed. This will force the
5306 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5309 if (ioa_cfg
->in_reset_reload
)
5311 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5314 for_each_hrrq(hrrq
, ioa_cfg
) {
5315 spin_lock(&hrrq
->_lock
);
5316 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5317 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5319 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
5322 if (ipr_cmnd_is_free(ipr_cmd
))
5325 ipr_cmd
->done
= ipr_sata_eh_done
;
5326 if (!(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
5327 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
5328 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
5332 spin_unlock(&hrrq
->_lock
);
5334 res
->resetting_device
= 1;
5335 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
5337 if (ipr_is_gata(res
) && res
->sata_port
) {
5338 ap
= res
->sata_port
->ap
;
5339 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
5340 ata_std_error_handler(ap
);
5341 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
5343 rc
= ipr_device_reset(ioa_cfg
, res
);
5344 res
->resetting_device
= 0;
5345 res
->reset_occurred
= 1;
5348 return rc
? FAILED
: SUCCESS
;
5351 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
5354 struct ipr_ioa_cfg
*ioa_cfg
;
5355 struct ipr_resource_entry
*res
;
5357 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5358 res
= cmd
->device
->hostdata
;
5363 spin_lock_irq(cmd
->device
->host
->host_lock
);
5364 rc
= __ipr_eh_dev_reset(cmd
);
5365 spin_unlock_irq(cmd
->device
->host
->host_lock
);
5367 if (rc
== SUCCESS
) {
5368 if (ipr_is_gata(res
) && res
->sata_port
)
5369 rc
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5371 rc
= ipr_wait_for_ops(ioa_cfg
, cmd
->device
, ipr_match_lun
);
5378 * ipr_bus_reset_done - Op done function for bus reset.
5379 * @ipr_cmd: ipr command struct
5381 * This function is the op done function for a bus reset
5386 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
5388 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5389 struct ipr_resource_entry
*res
;
5392 if (!ioa_cfg
->sis64
)
5393 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5394 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
5395 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
5401 * If abort has not completed, indicate the reset has, else call the
5402 * abort's done function to wake the sleeping eh thread
5404 if (ipr_cmd
->sibling
->sibling
)
5405 ipr_cmd
->sibling
->sibling
= NULL
;
5407 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
5409 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5414 * ipr_abort_timeout - An abort task has timed out
5415 * @ipr_cmd: ipr command struct
5417 * This function handles when an abort task times out. If this
5418 * happens we issue a bus reset since we have resources tied
5419 * up that must be freed before returning to the midlayer.
5424 static void ipr_abort_timeout(struct timer_list
*t
)
5426 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
5427 struct ipr_cmnd
*reset_cmd
;
5428 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5429 struct ipr_cmd_pkt
*cmd_pkt
;
5430 unsigned long lock_flags
= 0;
5433 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5434 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
5435 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5439 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5440 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5441 ipr_cmd
->sibling
= reset_cmd
;
5442 reset_cmd
->sibling
= ipr_cmd
;
5443 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5444 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5445 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5446 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5447 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5449 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5450 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5455 * ipr_cancel_op - Cancel specified op
5456 * @scsi_cmd: scsi command struct
5458 * This function cancels specified op.
5463 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5465 struct ipr_cmnd
*ipr_cmd
;
5466 struct ipr_ioa_cfg
*ioa_cfg
;
5467 struct ipr_resource_entry
*res
;
5468 struct ipr_cmd_pkt
*cmd_pkt
;
5470 int i
, op_found
= 0;
5471 struct ipr_hrr_queue
*hrrq
;
5474 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5475 res
= scsi_cmd
->device
->hostdata
;
5477 /* If we are currently going through reset/reload, return failed.
5478 * This will force the mid-layer to call ipr_eh_host_reset,
5479 * which will then go to sleep and wait for the reset to complete
5481 if (ioa_cfg
->in_reset_reload
||
5482 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5488 * If we are aborting a timed out op, chances are that the timeout was caused
5489 * by a still not detected EEH error. In such cases, reading a register will
5490 * trigger the EEH recovery infrastructure.
5492 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5494 if (!ipr_is_gscsi(res
))
5497 for_each_hrrq(hrrq
, ioa_cfg
) {
5498 spin_lock(&hrrq
->_lock
);
5499 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5500 if (ioa_cfg
->ipr_cmnd_list
[i
]->scsi_cmd
== scsi_cmd
) {
5501 if (!ipr_cmnd_is_free(ioa_cfg
->ipr_cmnd_list
[i
])) {
5507 spin_unlock(&hrrq
->_lock
);
5513 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5514 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5515 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5516 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5517 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5518 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5520 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5522 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5523 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5526 * If the abort task timed out and we sent a bus reset, we will get
5527 * one the following responses to the abort
5529 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5534 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5535 if (!ipr_is_naca_model(res
))
5536 res
->needs_sync_complete
= 1;
5539 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5543 * ipr_eh_abort - Abort a single op
5544 * @scsi_cmd: scsi command struct
5547 * 0 if scan in progress / 1 if scan is complete
5549 static int ipr_scan_finished(struct Scsi_Host
*shost
, unsigned long elapsed_time
)
5551 unsigned long lock_flags
;
5552 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
5555 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
5556 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
|| ioa_cfg
->scan_done
)
5558 if ((elapsed_time
/HZ
) > (ioa_cfg
->transop_timeout
* 2))
5560 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
5565 * ipr_eh_host_reset - Reset the host adapter
5566 * @scsi_cmd: scsi command struct
5571 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5573 unsigned long flags
;
5575 struct ipr_ioa_cfg
*ioa_cfg
;
5579 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5581 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5582 rc
= ipr_cancel_op(scsi_cmd
);
5583 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5586 rc
= ipr_wait_for_ops(ioa_cfg
, scsi_cmd
->device
, ipr_match_lun
);
5592 * ipr_handle_other_interrupt - Handle "other" interrupts
5593 * @ioa_cfg: ioa config struct
5594 * @int_reg: interrupt register
5597 * IRQ_NONE / IRQ_HANDLED
5599 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5602 irqreturn_t rc
= IRQ_HANDLED
;
5605 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5606 int_reg
&= ~int_mask_reg
;
5608 /* If an interrupt on the adapter did not occur, ignore it.
5609 * Or in the case of SIS 64, check for a stage change interrupt.
5611 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5612 if (ioa_cfg
->sis64
) {
5613 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5614 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5615 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5617 /* clear stage change */
5618 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5619 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5620 list_del(&ioa_cfg
->reset_cmd
->queue
);
5621 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5622 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5630 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5631 /* Mask the interrupt */
5632 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5633 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5635 list_del(&ioa_cfg
->reset_cmd
->queue
);
5636 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5637 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5638 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5639 if (ioa_cfg
->clear_isr
) {
5640 if (ipr_debug
&& printk_ratelimit())
5641 dev_err(&ioa_cfg
->pdev
->dev
,
5642 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5643 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5644 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5648 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5649 ioa_cfg
->ioa_unit_checked
= 1;
5650 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5651 dev_err(&ioa_cfg
->pdev
->dev
,
5652 "No Host RRQ. 0x%08X\n", int_reg
);
5654 dev_err(&ioa_cfg
->pdev
->dev
,
5655 "Permanent IOA failure. 0x%08X\n", int_reg
);
5657 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5658 ioa_cfg
->sdt_state
= GET_DUMP
;
5660 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5661 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5668 * ipr_isr_eh - Interrupt service routine error handler
5669 * @ioa_cfg: ioa config struct
5670 * @msg: message to log
5675 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5677 ioa_cfg
->errors_logged
++;
5678 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5680 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5681 ioa_cfg
->sdt_state
= GET_DUMP
;
5683 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5686 static int ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
, int budget
,
5687 struct list_head
*doneq
)
5691 struct ipr_cmnd
*ipr_cmd
;
5692 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5695 /* If interrupts are disabled, ignore the interrupt */
5696 if (!hrr_queue
->allow_interrupts
)
5699 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5700 hrr_queue
->toggle_bit
) {
5702 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5703 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5704 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5706 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5707 cmd_index
< hrr_queue
->min_cmd_id
)) {
5709 "Invalid response handle from IOA: ",
5714 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5715 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5717 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5719 list_move_tail(&ipr_cmd
->queue
, doneq
);
5721 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5722 hrr_queue
->hrrq_curr
++;
5724 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5725 hrr_queue
->toggle_bit
^= 1u;
5728 if (budget
> 0 && num_hrrq
>= budget
)
5735 static int ipr_iopoll(struct irq_poll
*iop
, int budget
)
5737 struct ipr_ioa_cfg
*ioa_cfg
;
5738 struct ipr_hrr_queue
*hrrq
;
5739 struct ipr_cmnd
*ipr_cmd
, *temp
;
5740 unsigned long hrrq_flags
;
5744 hrrq
= container_of(iop
, struct ipr_hrr_queue
, iopoll
);
5745 ioa_cfg
= hrrq
->ioa_cfg
;
5747 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5748 completed_ops
= ipr_process_hrrq(hrrq
, budget
, &doneq
);
5750 if (completed_ops
< budget
)
5751 irq_poll_complete(iop
);
5752 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5754 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5755 list_del(&ipr_cmd
->queue
);
5756 del_timer(&ipr_cmd
->timer
);
5757 ipr_cmd
->fast_done(ipr_cmd
);
5760 return completed_ops
;
5764 * ipr_isr - Interrupt service routine
5766 * @devp: pointer to ioa config struct
5769 * IRQ_NONE / IRQ_HANDLED
5771 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5773 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5774 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5775 unsigned long hrrq_flags
= 0;
5779 struct ipr_cmnd
*ipr_cmd
, *temp
;
5780 irqreturn_t rc
= IRQ_NONE
;
5783 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5784 /* If interrupts are disabled, ignore the interrupt */
5785 if (!hrrq
->allow_interrupts
) {
5786 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5791 if (ipr_process_hrrq(hrrq
, -1, &doneq
)) {
5794 if (!ioa_cfg
->clear_isr
)
5797 /* Clear the PCI interrupt */
5800 writel(IPR_PCII_HRRQ_UPDATED
,
5801 ioa_cfg
->regs
.clr_interrupt_reg32
);
5802 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5803 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5804 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5806 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5807 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5809 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5810 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5812 "Error clearing HRRQ: ", num_hrrq
);
5819 if (unlikely(rc
== IRQ_NONE
))
5820 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5822 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5823 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5824 list_del(&ipr_cmd
->queue
);
5825 del_timer(&ipr_cmd
->timer
);
5826 ipr_cmd
->fast_done(ipr_cmd
);
5832 * ipr_isr_mhrrq - Interrupt service routine
5834 * @devp: pointer to ioa config struct
5837 * IRQ_NONE / IRQ_HANDLED
5839 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5841 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5842 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5843 unsigned long hrrq_flags
= 0;
5844 struct ipr_cmnd
*ipr_cmd
, *temp
;
5845 irqreturn_t rc
= IRQ_NONE
;
5848 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5850 /* If interrupts are disabled, ignore the interrupt */
5851 if (!hrrq
->allow_interrupts
) {
5852 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5856 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
5857 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5859 irq_poll_sched(&hrrq
->iopoll
);
5860 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5864 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5867 if (ipr_process_hrrq(hrrq
, -1, &doneq
))
5871 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5873 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5874 list_del(&ipr_cmd
->queue
);
5875 del_timer(&ipr_cmd
->timer
);
5876 ipr_cmd
->fast_done(ipr_cmd
);
5882 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5883 * @ioa_cfg: ioa config struct
5884 * @ipr_cmd: ipr command struct
5887 * 0 on success / -1 on failure
5889 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5890 struct ipr_cmnd
*ipr_cmd
)
5893 struct scatterlist
*sg
;
5895 u32 ioadl_flags
= 0;
5896 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5897 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5898 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5900 length
= scsi_bufflen(scsi_cmd
);
5904 nseg
= scsi_dma_map(scsi_cmd
);
5906 if (printk_ratelimit())
5907 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5911 ipr_cmd
->dma_use_sg
= nseg
;
5913 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5915 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5917 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5918 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5919 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5920 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5921 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5923 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5924 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5925 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5926 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5929 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5934 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5935 * @ioa_cfg: ioa config struct
5936 * @ipr_cmd: ipr command struct
5939 * 0 on success / -1 on failure
5941 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5942 struct ipr_cmnd
*ipr_cmd
)
5945 struct scatterlist
*sg
;
5947 u32 ioadl_flags
= 0;
5948 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5949 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5950 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5952 length
= scsi_bufflen(scsi_cmd
);
5956 nseg
= scsi_dma_map(scsi_cmd
);
5958 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5962 ipr_cmd
->dma_use_sg
= nseg
;
5964 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5965 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5966 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5967 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5969 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5970 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5971 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5972 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5973 ioarcb
->read_ioadl_len
=
5974 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5977 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
5978 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
5979 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
5980 offsetof(struct ipr_ioarcb
, u
.add_data
));
5981 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5984 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5985 ioadl
[i
].flags_and_data_len
=
5986 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
5987 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
5990 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5995 * __ipr_erp_done - Process completion of ERP for a device
5996 * @ipr_cmd: ipr command struct
5998 * This function copies the sense buffer into the scsi_cmd
5999 * struct and pushes the scsi_done function.
6004 static void __ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6006 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6007 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6008 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6010 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6011 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6012 scmd_printk(KERN_ERR
, scsi_cmd
,
6013 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
6015 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
6016 SCSI_SENSE_BUFFERSIZE
);
6020 if (!ipr_is_naca_model(res
))
6021 res
->needs_sync_complete
= 1;
6024 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6025 scsi_cmd
->scsi_done(scsi_cmd
);
6026 if (ipr_cmd
->eh_comp
)
6027 complete(ipr_cmd
->eh_comp
);
6028 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6032 * ipr_erp_done - Process completion of ERP for a device
6033 * @ipr_cmd: ipr command struct
6035 * This function copies the sense buffer into the scsi_cmd
6036 * struct and pushes the scsi_done function.
6041 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6043 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6044 unsigned long hrrq_flags
;
6046 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6047 __ipr_erp_done(ipr_cmd
);
6048 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6052 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6053 * @ipr_cmd: ipr command struct
6058 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
6060 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6061 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6062 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6064 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
6065 ioarcb
->data_transfer_length
= 0;
6066 ioarcb
->read_data_transfer_length
= 0;
6067 ioarcb
->ioadl_len
= 0;
6068 ioarcb
->read_ioadl_len
= 0;
6069 ioasa
->hdr
.ioasc
= 0;
6070 ioasa
->hdr
.residual_data_len
= 0;
6072 if (ipr_cmd
->ioa_cfg
->sis64
)
6073 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6074 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
6076 ioarcb
->write_ioadl_addr
=
6077 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
6078 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
6083 * __ipr_erp_request_sense - Send request sense to a device
6084 * @ipr_cmd: ipr command struct
6086 * This function sends a request sense to a device as a result
6087 * of a check condition.
6092 static void __ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6094 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6095 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6097 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6098 __ipr_erp_done(ipr_cmd
);
6102 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6104 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
6105 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
6106 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
6107 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
6108 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6109 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
6111 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
6112 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
6114 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
6115 IPR_REQUEST_SENSE_TIMEOUT
* 2);
6119 * ipr_erp_request_sense - Send request sense to a device
6120 * @ipr_cmd: ipr command struct
6122 * This function sends a request sense to a device as a result
6123 * of a check condition.
6128 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6130 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6131 unsigned long hrrq_flags
;
6133 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6134 __ipr_erp_request_sense(ipr_cmd
);
6135 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6139 * ipr_erp_cancel_all - Send cancel all to a device
6140 * @ipr_cmd: ipr command struct
6142 * This function sends a cancel all to a device to clear the
6143 * queue. If we are running TCQ on the device, QERR is set to 1,
6144 * which means all outstanding ops have been dropped on the floor.
6145 * Cancel all will return them to us.
6150 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
6152 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6153 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6154 struct ipr_cmd_pkt
*cmd_pkt
;
6158 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6160 if (!scsi_cmd
->device
->simple_tags
) {
6161 __ipr_erp_request_sense(ipr_cmd
);
6165 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6166 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
6167 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
6169 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
6170 IPR_CANCEL_ALL_TIMEOUT
);
6174 * ipr_dump_ioasa - Dump contents of IOASA
6175 * @ioa_cfg: ioa config struct
6176 * @ipr_cmd: ipr command struct
6177 * @res: resource entry struct
6179 * This function is invoked by the interrupt handler when ops
6180 * fail. It will log the IOASA if appropriate. Only called
6186 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
6187 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
6191 u32 ioasc
, fd_ioasc
;
6192 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6193 __be32
*ioasa_data
= (__be32
*)ioasa
;
6196 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
6197 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
6202 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
6205 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
6206 error_index
= ipr_get_error(fd_ioasc
);
6208 error_index
= ipr_get_error(ioasc
);
6210 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
6211 /* Don't log an error if the IOA already logged one */
6212 if (ioasa
->hdr
.ilid
!= 0)
6215 if (!ipr_is_gscsi(res
))
6218 if (ipr_error_table
[error_index
].log_ioasa
== 0)
6222 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
6224 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
6225 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
6226 data_len
= sizeof(struct ipr_ioasa64
);
6227 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
6228 data_len
= sizeof(struct ipr_ioasa
);
6230 ipr_err("IOASA Dump:\n");
6232 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
6233 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
6234 be32_to_cpu(ioasa_data
[i
]),
6235 be32_to_cpu(ioasa_data
[i
+1]),
6236 be32_to_cpu(ioasa_data
[i
+2]),
6237 be32_to_cpu(ioasa_data
[i
+3]));
6242 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6244 * @sense_buf: sense data buffer
6249 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
6252 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
6253 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
6254 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6255 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
6257 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
6259 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
6262 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
6264 if (ipr_is_vset_device(res
) &&
6265 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
6266 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
6267 sense_buf
[0] = 0x72;
6268 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
6269 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
6270 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
6274 sense_buf
[9] = 0x0A;
6275 sense_buf
[10] = 0x80;
6277 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
6279 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
6280 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
6281 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
6282 sense_buf
[15] = failing_lba
& 0x000000ff;
6284 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6286 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
6287 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
6288 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
6289 sense_buf
[19] = failing_lba
& 0x000000ff;
6291 sense_buf
[0] = 0x70;
6292 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
6293 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
6294 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
6296 /* Illegal request */
6297 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
6298 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
6299 sense_buf
[7] = 10; /* additional length */
6301 /* IOARCB was in error */
6302 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
6303 sense_buf
[15] = 0xC0;
6304 else /* Parameter data was invalid */
6305 sense_buf
[15] = 0x80;
6308 ((IPR_FIELD_POINTER_MASK
&
6309 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
6311 (IPR_FIELD_POINTER_MASK
&
6312 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
6314 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
6315 if (ipr_is_vset_device(res
))
6316 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6318 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
6320 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
6321 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
6322 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
6323 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
6324 sense_buf
[6] = failing_lba
& 0x000000ff;
6327 sense_buf
[7] = 6; /* additional length */
6333 * ipr_get_autosense - Copy autosense data to sense buffer
6334 * @ipr_cmd: ipr command struct
6336 * This function copies the autosense buffer to the buffer
6337 * in the scsi_cmd, if there is autosense available.
6340 * 1 if autosense was available / 0 if not
6342 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
6344 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6345 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
6347 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
6350 if (ipr_cmd
->ioa_cfg
->sis64
)
6351 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
6352 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
6353 SCSI_SENSE_BUFFERSIZE
));
6355 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
6356 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
6357 SCSI_SENSE_BUFFERSIZE
));
6362 * ipr_erp_start - Process an error response for a SCSI op
6363 * @ioa_cfg: ioa config struct
6364 * @ipr_cmd: ipr command struct
6366 * This function determines whether or not to initiate ERP
6367 * on the affected device.
6372 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
6373 struct ipr_cmnd
*ipr_cmd
)
6375 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6376 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6377 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6378 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
6381 __ipr_scsi_eh_done(ipr_cmd
);
6385 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
6386 ipr_gen_sense(ipr_cmd
);
6388 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6390 switch (masked_ioasc
) {
6391 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
6392 if (ipr_is_naca_model(res
))
6393 scsi_cmd
->result
|= (DID_ABORT
<< 16);
6395 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6397 case IPR_IOASC_IR_RESOURCE_HANDLE
:
6398 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
6399 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6401 case IPR_IOASC_HW_SEL_TIMEOUT
:
6402 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6403 if (!ipr_is_naca_model(res
))
6404 res
->needs_sync_complete
= 1;
6406 case IPR_IOASC_SYNC_REQUIRED
:
6408 res
->needs_sync_complete
= 1;
6409 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6411 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
6412 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
6414 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6415 * so SCSI mid-layer and upper layers handle it accordingly.
6417 if (scsi_cmd
->result
!= SAM_STAT_CHECK_CONDITION
)
6418 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
6420 case IPR_IOASC_BUS_WAS_RESET
:
6421 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
6423 * Report the bus reset and ask for a retry. The device
6424 * will give CC/UA the next command.
6426 if (!res
->resetting_device
)
6427 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
6428 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6429 if (!ipr_is_naca_model(res
))
6430 res
->needs_sync_complete
= 1;
6432 case IPR_IOASC_HW_DEV_BUS_STATUS
:
6433 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
6434 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
6435 if (!ipr_get_autosense(ipr_cmd
)) {
6436 if (!ipr_is_naca_model(res
)) {
6437 ipr_erp_cancel_all(ipr_cmd
);
6442 if (!ipr_is_naca_model(res
))
6443 res
->needs_sync_complete
= 1;
6445 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
6447 case IPR_IOASC_IR_NON_OPTIMIZED
:
6448 if (res
->raw_mode
) {
6450 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6452 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6455 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6456 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6457 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
6458 res
->needs_sync_complete
= 1;
6462 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6463 scsi_cmd
->scsi_done(scsi_cmd
);
6464 if (ipr_cmd
->eh_comp
)
6465 complete(ipr_cmd
->eh_comp
);
6466 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6470 * ipr_scsi_done - mid-layer done function
6471 * @ipr_cmd: ipr command struct
6473 * This function is invoked by the interrupt handler for
6474 * ops generated by the SCSI mid-layer
6479 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
6481 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6482 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6483 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6484 unsigned long lock_flags
;
6486 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
6488 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
6489 scsi_dma_unmap(scsi_cmd
);
6491 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, lock_flags
);
6492 scsi_cmd
->scsi_done(scsi_cmd
);
6493 if (ipr_cmd
->eh_comp
)
6494 complete(ipr_cmd
->eh_comp
);
6495 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6496 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, lock_flags
);
6498 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6499 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6500 ipr_erp_start(ioa_cfg
, ipr_cmd
);
6501 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6502 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6507 * ipr_queuecommand - Queue a mid-layer request
6508 * @shost: scsi host struct
6509 * @scsi_cmd: scsi command struct
6511 * This function queues a request generated by the mid-layer.
6515 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6516 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6518 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6519 struct scsi_cmnd
*scsi_cmd
)
6521 struct ipr_ioa_cfg
*ioa_cfg
;
6522 struct ipr_resource_entry
*res
;
6523 struct ipr_ioarcb
*ioarcb
;
6524 struct ipr_cmnd
*ipr_cmd
;
6525 unsigned long hrrq_flags
, lock_flags
;
6527 struct ipr_hrr_queue
*hrrq
;
6530 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6532 scsi_cmd
->result
= (DID_OK
<< 16);
6533 res
= scsi_cmd
->device
->hostdata
;
6535 if (ipr_is_gata(res
) && res
->sata_port
) {
6536 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6537 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6538 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6542 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6543 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6545 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6547 * We are currently blocking all devices due to a host reset
6548 * We have told the host to stop giving us new requests, but
6549 * ERP ops don't count. FIXME
6551 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
&& !hrrq
->removing_ioa
)) {
6552 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6553 return SCSI_MLQUEUE_HOST_BUSY
;
6557 * FIXME - Create scsi_set_host_offline interface
6558 * and the ioa_is_dead check can be removed
6560 if (unlikely(hrrq
->ioa_is_dead
|| hrrq
->removing_ioa
|| !res
)) {
6561 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6565 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6566 if (ipr_cmd
== NULL
) {
6567 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6568 return SCSI_MLQUEUE_HOST_BUSY
;
6570 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6572 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6573 ioarcb
= &ipr_cmd
->ioarcb
;
6575 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6576 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6577 ipr_cmd
->done
= ipr_scsi_eh_done
;
6579 if (ipr_is_gscsi(res
)) {
6580 if (scsi_cmd
->underflow
== 0)
6581 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6583 if (res
->reset_occurred
) {
6584 res
->reset_occurred
= 0;
6585 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6589 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6590 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6592 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6593 if (scsi_cmd
->flags
& SCMD_TAGGED
)
6594 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_SIMPLE_TASK
;
6596 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_UNTAGGED_TASK
;
6599 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6600 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6601 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6603 if (res
->raw_mode
&& ipr_is_af_dasd_device(res
)) {
6604 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_PIPE
;
6606 if (scsi_cmd
->underflow
== 0)
6607 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6611 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6613 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6615 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6616 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6617 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6618 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6620 scsi_dma_unmap(scsi_cmd
);
6621 return SCSI_MLQUEUE_HOST_BUSY
;
6624 if (unlikely(hrrq
->ioa_is_dead
)) {
6625 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6626 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6627 scsi_dma_unmap(scsi_cmd
);
6631 ioarcb
->res_handle
= res
->res_handle
;
6632 if (res
->needs_sync_complete
) {
6633 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6634 res
->needs_sync_complete
= 0;
6636 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6637 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6638 ipr_send_command(ipr_cmd
);
6639 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6643 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6644 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6645 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6646 scsi_cmd
->scsi_done(scsi_cmd
);
6647 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6652 * ipr_ioctl - IOCTL handler
6653 * @sdev: scsi device struct
6658 * 0 on success / other on failure
6660 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
6662 struct ipr_resource_entry
*res
;
6664 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6665 if (res
&& ipr_is_gata(res
)) {
6666 if (cmd
== HDIO_GET_IDENTITY
)
6668 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6675 * ipr_info - Get information about the card/driver
6676 * @scsi_host: scsi host struct
6679 * pointer to buffer with description string
6681 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6683 static char buffer
[512];
6684 struct ipr_ioa_cfg
*ioa_cfg
;
6685 unsigned long lock_flags
= 0;
6687 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6689 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6690 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6691 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6696 static struct scsi_host_template driver_template
= {
6697 .module
= THIS_MODULE
,
6699 .info
= ipr_ioa_info
,
6701 .queuecommand
= ipr_queuecommand
,
6702 .eh_abort_handler
= ipr_eh_abort
,
6703 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6704 .eh_host_reset_handler
= ipr_eh_host_reset
,
6705 .slave_alloc
= ipr_slave_alloc
,
6706 .slave_configure
= ipr_slave_configure
,
6707 .slave_destroy
= ipr_slave_destroy
,
6708 .scan_finished
= ipr_scan_finished
,
6709 .target_alloc
= ipr_target_alloc
,
6710 .target_destroy
= ipr_target_destroy
,
6711 .change_queue_depth
= ipr_change_queue_depth
,
6712 .bios_param
= ipr_biosparam
,
6713 .can_queue
= IPR_MAX_COMMANDS
,
6715 .sg_tablesize
= IPR_MAX_SGLIST
,
6716 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6717 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6718 .use_clustering
= ENABLE_CLUSTERING
,
6719 .shost_attrs
= ipr_ioa_attrs
,
6720 .sdev_attrs
= ipr_dev_attrs
,
6721 .proc_name
= IPR_NAME
,
6725 * ipr_ata_phy_reset - libata phy_reset handler
6726 * @ap: ata port to reset
6729 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6731 unsigned long flags
;
6732 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6733 struct ipr_resource_entry
*res
= sata_port
->res
;
6734 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6738 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6739 while (ioa_cfg
->in_reset_reload
) {
6740 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6741 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6742 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6745 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6748 rc
= ipr_device_reset(ioa_cfg
, res
);
6751 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6755 ap
->link
.device
[0].class = res
->ata_class
;
6756 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6757 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6760 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6765 * ipr_ata_post_internal - Cleanup after an internal command
6766 * @qc: ATA queued command
6771 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6773 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6774 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6775 struct ipr_cmnd
*ipr_cmd
;
6776 struct ipr_hrr_queue
*hrrq
;
6777 unsigned long flags
;
6779 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6780 while (ioa_cfg
->in_reset_reload
) {
6781 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6782 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6783 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6786 for_each_hrrq(hrrq
, ioa_cfg
) {
6787 spin_lock(&hrrq
->_lock
);
6788 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6789 if (ipr_cmd
->qc
== qc
) {
6790 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6794 spin_unlock(&hrrq
->_lock
);
6796 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6800 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6801 * @regs: destination
6802 * @tf: source ATA taskfile
6807 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6808 struct ata_taskfile
*tf
)
6810 regs
->feature
= tf
->feature
;
6811 regs
->nsect
= tf
->nsect
;
6812 regs
->lbal
= tf
->lbal
;
6813 regs
->lbam
= tf
->lbam
;
6814 regs
->lbah
= tf
->lbah
;
6815 regs
->device
= tf
->device
;
6816 regs
->command
= tf
->command
;
6817 regs
->hob_feature
= tf
->hob_feature
;
6818 regs
->hob_nsect
= tf
->hob_nsect
;
6819 regs
->hob_lbal
= tf
->hob_lbal
;
6820 regs
->hob_lbam
= tf
->hob_lbam
;
6821 regs
->hob_lbah
= tf
->hob_lbah
;
6822 regs
->ctl
= tf
->ctl
;
6826 * ipr_sata_done - done function for SATA commands
6827 * @ipr_cmd: ipr command struct
6829 * This function is invoked by the interrupt handler for
6830 * ops generated by the SCSI mid-layer to SATA devices
6835 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6837 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6838 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6839 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6840 struct ipr_resource_entry
*res
= sata_port
->res
;
6841 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6843 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6844 if (ipr_cmd
->ioa_cfg
->sis64
)
6845 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6846 sizeof(struct ipr_ioasa_gata
));
6848 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6849 sizeof(struct ipr_ioasa_gata
));
6850 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6852 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6853 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6855 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6856 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6858 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6859 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6860 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6861 ata_qc_complete(qc
);
6865 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6866 * @ipr_cmd: ipr command struct
6867 * @qc: ATA queued command
6870 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6871 struct ata_queued_cmd
*qc
)
6873 u32 ioadl_flags
= 0;
6874 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6875 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ata_ioadl
.ioadl64
;
6876 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6877 int len
= qc
->nbytes
;
6878 struct scatterlist
*sg
;
6880 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6885 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6886 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6887 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6888 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6889 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6891 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6893 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6894 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6895 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
.ioadl64
));
6897 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6898 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6899 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6900 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6902 last_ioadl64
= ioadl64
;
6906 if (likely(last_ioadl64
))
6907 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6911 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6912 * @ipr_cmd: ipr command struct
6913 * @qc: ATA queued command
6916 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6917 struct ata_queued_cmd
*qc
)
6919 u32 ioadl_flags
= 0;
6920 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6921 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6922 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6923 int len
= qc
->nbytes
;
6924 struct scatterlist
*sg
;
6930 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6931 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6932 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6933 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6935 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6936 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6937 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6938 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6939 ioarcb
->read_ioadl_len
=
6940 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6943 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6944 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6945 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6951 if (likely(last_ioadl
))
6952 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6956 * ipr_qc_defer - Get a free ipr_cmd
6957 * @qc: queued command
6962 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
6964 struct ata_port
*ap
= qc
->ap
;
6965 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6966 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6967 struct ipr_cmnd
*ipr_cmd
;
6968 struct ipr_hrr_queue
*hrrq
;
6971 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6972 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6974 qc
->lldd_task
= NULL
;
6975 spin_lock(&hrrq
->_lock
);
6976 if (unlikely(hrrq
->ioa_is_dead
)) {
6977 spin_unlock(&hrrq
->_lock
);
6981 if (unlikely(!hrrq
->allow_cmds
)) {
6982 spin_unlock(&hrrq
->_lock
);
6983 return ATA_DEFER_LINK
;
6986 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6987 if (ipr_cmd
== NULL
) {
6988 spin_unlock(&hrrq
->_lock
);
6989 return ATA_DEFER_LINK
;
6992 qc
->lldd_task
= ipr_cmd
;
6993 spin_unlock(&hrrq
->_lock
);
6998 * ipr_qc_issue - Issue a SATA qc to a device
6999 * @qc: queued command
7004 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
7006 struct ata_port
*ap
= qc
->ap
;
7007 struct ipr_sata_port
*sata_port
= ap
->private_data
;
7008 struct ipr_resource_entry
*res
= sata_port
->res
;
7009 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
7010 struct ipr_cmnd
*ipr_cmd
;
7011 struct ipr_ioarcb
*ioarcb
;
7012 struct ipr_ioarcb_ata_regs
*regs
;
7014 if (qc
->lldd_task
== NULL
)
7017 ipr_cmd
= qc
->lldd_task
;
7018 if (ipr_cmd
== NULL
)
7019 return AC_ERR_SYSTEM
;
7021 qc
->lldd_task
= NULL
;
7022 spin_lock(&ipr_cmd
->hrrq
->_lock
);
7023 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
7024 ipr_cmd
->hrrq
->ioa_is_dead
)) {
7025 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7026 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7027 return AC_ERR_SYSTEM
;
7030 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
7031 ioarcb
= &ipr_cmd
->ioarcb
;
7033 if (ioa_cfg
->sis64
) {
7034 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
7035 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
7037 regs
= &ioarcb
->u
.add_data
.u
.regs
;
7039 memset(regs
, 0, sizeof(*regs
));
7040 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
7042 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7044 ipr_cmd
->done
= ipr_sata_done
;
7045 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
7046 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
7047 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
7048 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
7049 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
7052 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
7054 ipr_build_ata_ioadl(ipr_cmd
, qc
);
7056 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
7057 ipr_copy_sata_tf(regs
, &qc
->tf
);
7058 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
7059 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
7061 switch (qc
->tf
.protocol
) {
7062 case ATA_PROT_NODATA
:
7067 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7070 case ATAPI_PROT_PIO
:
7071 case ATAPI_PROT_NODATA
:
7072 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7075 case ATAPI_PROT_DMA
:
7076 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7077 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7082 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7083 return AC_ERR_INVALID
;
7086 ipr_send_command(ipr_cmd
);
7087 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7093 * ipr_qc_fill_rtf - Read result TF
7094 * @qc: ATA queued command
7099 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
7101 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
7102 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
7103 struct ata_taskfile
*tf
= &qc
->result_tf
;
7105 tf
->feature
= g
->error
;
7106 tf
->nsect
= g
->nsect
;
7110 tf
->device
= g
->device
;
7111 tf
->command
= g
->status
;
7112 tf
->hob_nsect
= g
->hob_nsect
;
7113 tf
->hob_lbal
= g
->hob_lbal
;
7114 tf
->hob_lbam
= g
->hob_lbam
;
7115 tf
->hob_lbah
= g
->hob_lbah
;
7120 static struct ata_port_operations ipr_sata_ops
= {
7121 .phy_reset
= ipr_ata_phy_reset
,
7122 .hardreset
= ipr_sata_reset
,
7123 .post_internal_cmd
= ipr_ata_post_internal
,
7124 .qc_prep
= ata_noop_qc_prep
,
7125 .qc_defer
= ipr_qc_defer
,
7126 .qc_issue
= ipr_qc_issue
,
7127 .qc_fill_rtf
= ipr_qc_fill_rtf
,
7128 .port_start
= ata_sas_port_start
,
7129 .port_stop
= ata_sas_port_stop
7132 static struct ata_port_info sata_port_info
= {
7133 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
|
7135 .pio_mask
= ATA_PIO4_ONLY
,
7136 .mwdma_mask
= ATA_MWDMA2
,
7137 .udma_mask
= ATA_UDMA6
,
7138 .port_ops
= &ipr_sata_ops
7141 #ifdef CONFIG_PPC_PSERIES
7142 static const u16 ipr_blocked_processors
[] = {
7154 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7155 * @ioa_cfg: ioa cfg struct
7157 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7158 * certain pSeries hardware. This function determines if the given
7159 * adapter is in one of these confgurations or not.
7162 * 1 if adapter is not supported / 0 if adapter is supported
7164 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
7168 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
7169 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
7170 if (pvr_version_is(ipr_blocked_processors
[i
]))
7177 #define ipr_invalid_adapter(ioa_cfg) 0
7181 * ipr_ioa_bringdown_done - IOA bring down completion.
7182 * @ipr_cmd: ipr command struct
7184 * This function processes the completion of an adapter bring down.
7185 * It wakes any reset sleepers.
7190 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
7192 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7196 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
7198 ioa_cfg
->scsi_unblock
= 1;
7199 schedule_work(&ioa_cfg
->work_q
);
7202 ioa_cfg
->in_reset_reload
= 0;
7203 ioa_cfg
->reset_retries
= 0;
7204 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
7205 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
7206 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
7207 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
7211 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7212 wake_up_all(&ioa_cfg
->reset_wait_q
);
7215 return IPR_RC_JOB_RETURN
;
7219 * ipr_ioa_reset_done - IOA reset completion.
7220 * @ipr_cmd: ipr command struct
7222 * This function processes the completion of an adapter reset.
7223 * It schedules any necessary mid-layer add/removes and
7224 * wakes any reset sleepers.
7229 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
7231 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7232 struct ipr_resource_entry
*res
;
7236 ioa_cfg
->in_reset_reload
= 0;
7237 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
7238 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
7239 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
7240 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
7243 ioa_cfg
->reset_cmd
= NULL
;
7244 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
7246 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
7247 if (res
->add_to_ml
|| res
->del_from_ml
) {
7252 schedule_work(&ioa_cfg
->work_q
);
7254 for (j
= 0; j
< IPR_NUM_HCAMS
; j
++) {
7255 list_del_init(&ioa_cfg
->hostrcb
[j
]->queue
);
7256 if (j
< IPR_NUM_LOG_HCAMS
)
7257 ipr_send_hcam(ioa_cfg
,
7258 IPR_HCAM_CDB_OP_CODE_LOG_DATA
,
7259 ioa_cfg
->hostrcb
[j
]);
7261 ipr_send_hcam(ioa_cfg
,
7262 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
7263 ioa_cfg
->hostrcb
[j
]);
7266 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
7267 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
7269 ioa_cfg
->reset_retries
= 0;
7270 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7271 wake_up_all(&ioa_cfg
->reset_wait_q
);
7273 ioa_cfg
->scsi_unblock
= 1;
7274 schedule_work(&ioa_cfg
->work_q
);
7276 return IPR_RC_JOB_RETURN
;
7280 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7281 * @supported_dev: supported device struct
7282 * @vpids: vendor product id struct
7287 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
7288 struct ipr_std_inq_vpids
*vpids
)
7290 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
7291 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
7292 supported_dev
->num_records
= 1;
7293 supported_dev
->data_length
=
7294 cpu_to_be16(sizeof(struct ipr_supported_device
));
7295 supported_dev
->reserved
= 0;
7299 * ipr_set_supported_devs - Send Set Supported Devices for a device
7300 * @ipr_cmd: ipr command struct
7302 * This function sends a Set Supported Devices to the adapter
7305 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7307 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
7309 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7310 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
7311 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7312 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
7314 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
7316 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
7317 if (!ipr_is_scsi_disk(res
))
7320 ipr_cmd
->u
.res
= res
;
7321 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
7323 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7324 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7325 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7327 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
7328 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
7329 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
7330 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
7332 ipr_init_ioadl(ipr_cmd
,
7333 ioa_cfg
->vpd_cbs_dma
+
7334 offsetof(struct ipr_misc_cbs
, supp_dev
),
7335 sizeof(struct ipr_supported_device
),
7336 IPR_IOADL_FLAGS_WRITE_LAST
);
7338 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7339 IPR_SET_SUP_DEVICE_TIMEOUT
);
7341 if (!ioa_cfg
->sis64
)
7342 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7344 return IPR_RC_JOB_RETURN
;
7348 return IPR_RC_JOB_CONTINUE
;
7352 * ipr_get_mode_page - Locate specified mode page
7353 * @mode_pages: mode page buffer
7354 * @page_code: page code to find
7355 * @len: minimum required length for mode page
7358 * pointer to mode page / NULL on failure
7360 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
7361 u32 page_code
, u32 len
)
7363 struct ipr_mode_page_hdr
*mode_hdr
;
7367 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
7370 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
7371 mode_hdr
= (struct ipr_mode_page_hdr
*)
7372 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
7375 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
7376 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
7380 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
7381 mode_hdr
->page_length
);
7382 length
-= page_length
;
7383 mode_hdr
= (struct ipr_mode_page_hdr
*)
7384 ((unsigned long)mode_hdr
+ page_length
);
7391 * ipr_check_term_power - Check for term power errors
7392 * @ioa_cfg: ioa config struct
7393 * @mode_pages: IOAFP mode pages buffer
7395 * Check the IOAFP's mode page 28 for term power errors
7400 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
7401 struct ipr_mode_pages
*mode_pages
)
7405 struct ipr_dev_bus_entry
*bus
;
7406 struct ipr_mode_page28
*mode_page
;
7408 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7409 sizeof(struct ipr_mode_page28
));
7411 entry_length
= mode_page
->entry_length
;
7413 bus
= mode_page
->bus
;
7415 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
7416 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
7417 dev_err(&ioa_cfg
->pdev
->dev
,
7418 "Term power is absent on scsi bus %d\n",
7422 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
7427 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7428 * @ioa_cfg: ioa config struct
7430 * Looks through the config table checking for SES devices. If
7431 * the SES device is in the SES table indicating a maximum SCSI
7432 * bus speed, the speed is limited for the bus.
7437 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
7442 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
7443 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
7444 ioa_cfg
->bus_attr
[i
].bus_width
);
7446 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
7447 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
7452 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7453 * @ioa_cfg: ioa config struct
7454 * @mode_pages: mode page 28 buffer
7456 * Updates mode page 28 based on driver configuration
7461 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
7462 struct ipr_mode_pages
*mode_pages
)
7464 int i
, entry_length
;
7465 struct ipr_dev_bus_entry
*bus
;
7466 struct ipr_bus_attributes
*bus_attr
;
7467 struct ipr_mode_page28
*mode_page
;
7469 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7470 sizeof(struct ipr_mode_page28
));
7472 entry_length
= mode_page
->entry_length
;
7474 /* Loop for each device bus entry */
7475 for (i
= 0, bus
= mode_page
->bus
;
7476 i
< mode_page
->num_entries
;
7477 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
7478 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
7479 dev_err(&ioa_cfg
->pdev
->dev
,
7480 "Invalid resource address reported: 0x%08X\n",
7481 IPR_GET_PHYS_LOC(bus
->res_addr
));
7485 bus_attr
= &ioa_cfg
->bus_attr
[i
];
7486 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
7487 bus
->bus_width
= bus_attr
->bus_width
;
7488 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
7489 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
7490 if (bus_attr
->qas_enabled
)
7491 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
7493 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
7498 * ipr_build_mode_select - Build a mode select command
7499 * @ipr_cmd: ipr command struct
7500 * @res_handle: resource handle to send command to
7501 * @parm: Byte 2 of Mode Sense command
7502 * @dma_addr: DMA buffer address
7503 * @xfer_len: data transfer length
7508 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
7509 __be32 res_handle
, u8 parm
,
7510 dma_addr_t dma_addr
, u8 xfer_len
)
7512 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7514 ioarcb
->res_handle
= res_handle
;
7515 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7516 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7517 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
7518 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
7519 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7521 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
7525 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7526 * @ipr_cmd: ipr command struct
7528 * This function sets up the SCSI bus attributes and sends
7529 * a Mode Select for Page 28 to activate them.
7534 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7536 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7537 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7541 ipr_scsi_bus_speed_limit(ioa_cfg
);
7542 ipr_check_term_power(ioa_cfg
, mode_pages
);
7543 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7544 length
= mode_pages
->hdr
.length
+ 1;
7545 mode_pages
->hdr
.length
= 0;
7547 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7548 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7551 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7552 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7553 struct ipr_resource_entry
, queue
);
7554 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7557 return IPR_RC_JOB_RETURN
;
7561 * ipr_build_mode_sense - Builds a mode sense command
7562 * @ipr_cmd: ipr command struct
7563 * @res: resource entry struct
7564 * @parm: Byte 2 of mode sense command
7565 * @dma_addr: DMA address of mode sense buffer
7566 * @xfer_len: Size of DMA buffer
7571 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7573 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7575 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7577 ioarcb
->res_handle
= res_handle
;
7578 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7579 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7580 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7581 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7583 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7587 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7588 * @ipr_cmd: ipr command struct
7590 * This function handles the failure of an IOA bringup command.
7595 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7597 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7598 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7600 dev_err(&ioa_cfg
->pdev
->dev
,
7601 "0x%02X failed with IOASC: 0x%08X\n",
7602 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7604 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7605 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7606 return IPR_RC_JOB_RETURN
;
7610 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7611 * @ipr_cmd: ipr command struct
7613 * This function handles the failure of a Mode Sense to the IOAFP.
7614 * Some adapters do not handle all mode pages.
7617 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7619 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7621 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7622 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7624 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7625 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7626 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7627 struct ipr_resource_entry
, queue
);
7628 return IPR_RC_JOB_CONTINUE
;
7631 return ipr_reset_cmd_failed(ipr_cmd
);
7635 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7636 * @ipr_cmd: ipr command struct
7638 * This function send a Page 28 mode sense to the IOA to
7639 * retrieve SCSI bus attributes.
7644 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7646 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7649 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7650 0x28, ioa_cfg
->vpd_cbs_dma
+
7651 offsetof(struct ipr_misc_cbs
, mode_pages
),
7652 sizeof(struct ipr_mode_pages
));
7654 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7655 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7657 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7660 return IPR_RC_JOB_RETURN
;
7664 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7665 * @ipr_cmd: ipr command struct
7667 * This function enables dual IOA RAID support if possible.
7672 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7674 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7675 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7676 struct ipr_mode_page24
*mode_page
;
7680 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7681 sizeof(struct ipr_mode_page24
));
7684 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7686 length
= mode_pages
->hdr
.length
+ 1;
7687 mode_pages
->hdr
.length
= 0;
7689 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7690 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7693 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7694 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7697 return IPR_RC_JOB_RETURN
;
7701 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7702 * @ipr_cmd: ipr command struct
7704 * This function handles the failure of a Mode Sense to the IOAFP.
7705 * Some adapters do not handle all mode pages.
7708 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7710 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7712 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7714 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7715 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7716 return IPR_RC_JOB_CONTINUE
;
7719 return ipr_reset_cmd_failed(ipr_cmd
);
7723 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7724 * @ipr_cmd: ipr command struct
7726 * This function send a mode sense to the IOA to retrieve
7727 * the IOA Advanced Function Control mode page.
7732 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7734 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7737 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7738 0x24, ioa_cfg
->vpd_cbs_dma
+
7739 offsetof(struct ipr_misc_cbs
, mode_pages
),
7740 sizeof(struct ipr_mode_pages
));
7742 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7743 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7745 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7748 return IPR_RC_JOB_RETURN
;
7752 * ipr_init_res_table - Initialize the resource table
7753 * @ipr_cmd: ipr command struct
7755 * This function looks through the existing resource table, comparing
7756 * it with the config table. This function will take care of old/new
7757 * devices and schedule adding/removing them from the mid-layer
7761 * IPR_RC_JOB_CONTINUE
7763 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7765 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7766 struct ipr_resource_entry
*res
, *temp
;
7767 struct ipr_config_table_entry_wrapper cfgtew
;
7768 int entries
, found
, flag
, i
;
7773 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7775 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7777 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7778 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7780 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7781 list_move_tail(&res
->queue
, &old_res
);
7784 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7786 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7788 for (i
= 0; i
< entries
; i
++) {
7790 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7792 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7795 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7796 if (ipr_is_same_device(res
, &cfgtew
)) {
7797 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7804 if (list_empty(&ioa_cfg
->free_res_q
)) {
7805 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7810 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7811 struct ipr_resource_entry
, queue
);
7812 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7813 ipr_init_res_entry(res
, &cfgtew
);
7815 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7816 res
->sdev
->allow_restart
= 1;
7819 ipr_update_res_entry(res
, &cfgtew
);
7822 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7824 res
->del_from_ml
= 1;
7825 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7826 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7830 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7831 ipr_clear_res_target(res
);
7832 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7835 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7836 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7838 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7841 return IPR_RC_JOB_CONTINUE
;
7845 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7846 * @ipr_cmd: ipr command struct
7848 * This function sends a Query IOA Configuration command
7849 * to the adapter to retrieve the IOA configuration table.
7854 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7856 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7857 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7858 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7859 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7862 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7863 ioa_cfg
->dual_raid
= 1;
7864 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7865 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7866 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7867 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7868 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7870 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7871 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7872 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7873 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7875 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7876 IPR_IOADL_FLAGS_READ_LAST
);
7878 ipr_cmd
->job_step
= ipr_init_res_table
;
7880 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7883 return IPR_RC_JOB_RETURN
;
7886 static int ipr_ioa_service_action_failed(struct ipr_cmnd
*ipr_cmd
)
7888 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7890 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
)
7891 return IPR_RC_JOB_CONTINUE
;
7893 return ipr_reset_cmd_failed(ipr_cmd
);
7896 static void ipr_build_ioa_service_action(struct ipr_cmnd
*ipr_cmd
,
7897 __be32 res_handle
, u8 sa_code
)
7899 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7901 ioarcb
->res_handle
= res_handle
;
7902 ioarcb
->cmd_pkt
.cdb
[0] = IPR_IOA_SERVICE_ACTION
;
7903 ioarcb
->cmd_pkt
.cdb
[1] = sa_code
;
7904 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7908 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7914 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd
*ipr_cmd
)
7916 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7917 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7918 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
7922 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7924 if (pageC4
->cache_cap
[0] & IPR_CAP_SYNC_CACHE
) {
7925 ipr_build_ioa_service_action(ipr_cmd
,
7926 cpu_to_be32(IPR_IOA_RES_HANDLE
),
7927 IPR_IOA_SA_CHANGE_CACHE_PARAMS
);
7929 ioarcb
->cmd_pkt
.cdb
[2] = 0x40;
7931 ipr_cmd
->job_step_failed
= ipr_ioa_service_action_failed
;
7932 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7933 IPR_SET_SUP_DEVICE_TIMEOUT
);
7936 return IPR_RC_JOB_RETURN
;
7940 return IPR_RC_JOB_CONTINUE
;
7944 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7945 * @ipr_cmd: ipr command struct
7947 * This utility function sends an inquiry to the adapter.
7952 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7953 dma_addr_t dma_addr
, u8 xfer_len
)
7955 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7958 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7959 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7961 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7962 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7963 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7964 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7966 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7968 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7973 * ipr_inquiry_page_supported - Is the given inquiry page supported
7974 * @page0: inquiry page 0 buffer
7977 * This function determines if the specified inquiry page is supported.
7980 * 1 if page is supported / 0 if not
7982 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
7986 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
7987 if (page0
->page
[i
] == page
)
7994 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7995 * @ipr_cmd: ipr command struct
7997 * This function sends a Page 0xC4 inquiry to the adapter
7998 * to retrieve software VPD information.
8001 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8003 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd
*ipr_cmd
)
8005 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8006 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8007 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
8010 ipr_cmd
->job_step
= ipr_ioafp_set_caching_parameters
;
8011 memset(pageC4
, 0, sizeof(*pageC4
));
8013 if (ipr_inquiry_page_supported(page0
, 0xC4)) {
8014 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xC4,
8015 (ioa_cfg
->vpd_cbs_dma
8016 + offsetof(struct ipr_misc_cbs
,
8018 sizeof(struct ipr_inquiry_pageC4
));
8019 return IPR_RC_JOB_RETURN
;
8023 return IPR_RC_JOB_CONTINUE
;
8027 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8028 * @ipr_cmd: ipr command struct
8030 * This function sends a Page 0xD0 inquiry to the adapter
8031 * to retrieve adapter capabilities.
8034 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8036 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
8038 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8039 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8040 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
8043 ipr_cmd
->job_step
= ipr_ioafp_pageC4_inquiry
;
8044 memset(cap
, 0, sizeof(*cap
));
8046 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
8047 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
8048 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
8049 sizeof(struct ipr_inquiry_cap
));
8050 return IPR_RC_JOB_RETURN
;
8054 return IPR_RC_JOB_CONTINUE
;
8058 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8059 * @ipr_cmd: ipr command struct
8061 * This function sends a Page 3 inquiry to the adapter
8062 * to retrieve software VPD information.
8065 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8067 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
8069 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8073 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
8075 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
8076 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
8077 sizeof(struct ipr_inquiry_page3
));
8080 return IPR_RC_JOB_RETURN
;
8084 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8085 * @ipr_cmd: ipr command struct
8087 * This function sends a Page 0 inquiry to the adapter
8088 * to retrieve supported inquiry pages.
8091 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8093 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
8095 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8100 /* Grab the type out of the VPD and store it away */
8101 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
8103 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
8105 if (ipr_invalid_adapter(ioa_cfg
)) {
8106 dev_err(&ioa_cfg
->pdev
->dev
,
8107 "Adapter not supported in this hardware configuration.\n");
8109 if (!ipr_testmode
) {
8110 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
8111 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8112 list_add_tail(&ipr_cmd
->queue
,
8113 &ioa_cfg
->hrrq
->hrrq_free_q
);
8114 return IPR_RC_JOB_RETURN
;
8118 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
8120 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
8121 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
8122 sizeof(struct ipr_inquiry_page0
));
8125 return IPR_RC_JOB_RETURN
;
8129 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8130 * @ipr_cmd: ipr command struct
8132 * This function sends a standard inquiry to the adapter.
8137 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
8139 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8142 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
8144 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
8145 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
8146 sizeof(struct ipr_ioa_vpd
));
8149 return IPR_RC_JOB_RETURN
;
8153 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8154 * @ipr_cmd: ipr command struct
8156 * This function send an Identify Host Request Response Queue
8157 * command to establish the HRRQ with the adapter.
8162 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
8164 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8165 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
8166 struct ipr_hrr_queue
*hrrq
;
8169 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
8170 if (ioa_cfg
->identify_hrrq_index
== 0)
8171 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
8173 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
8174 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
8176 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
8177 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8179 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8181 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
8183 if (ioa_cfg
->nvectors
== 1)
8184 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
8186 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
8188 ioarcb
->cmd_pkt
.cdb
[2] =
8189 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
8190 ioarcb
->cmd_pkt
.cdb
[3] =
8191 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
8192 ioarcb
->cmd_pkt
.cdb
[4] =
8193 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
8194 ioarcb
->cmd_pkt
.cdb
[5] =
8195 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
8196 ioarcb
->cmd_pkt
.cdb
[7] =
8197 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
8198 ioarcb
->cmd_pkt
.cdb
[8] =
8199 (sizeof(u32
) * hrrq
->size
) & 0xff;
8201 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8202 ioarcb
->cmd_pkt
.cdb
[9] =
8203 ioa_cfg
->identify_hrrq_index
;
8205 if (ioa_cfg
->sis64
) {
8206 ioarcb
->cmd_pkt
.cdb
[10] =
8207 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
8208 ioarcb
->cmd_pkt
.cdb
[11] =
8209 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
8210 ioarcb
->cmd_pkt
.cdb
[12] =
8211 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
8212 ioarcb
->cmd_pkt
.cdb
[13] =
8213 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
8216 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8217 ioarcb
->cmd_pkt
.cdb
[14] =
8218 ioa_cfg
->identify_hrrq_index
;
8220 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8221 IPR_INTERNAL_TIMEOUT
);
8223 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
8224 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8227 return IPR_RC_JOB_RETURN
;
8231 return IPR_RC_JOB_CONTINUE
;
8235 * ipr_reset_timer_done - Adapter reset timer function
8236 * @ipr_cmd: ipr command struct
8238 * Description: This function is used in adapter reset processing
8239 * for timing events. If the reset_cmd pointer in the IOA
8240 * config struct is not this adapter's we are doing nested
8241 * resets and fail_all_ops will take care of freeing the
8247 static void ipr_reset_timer_done(struct timer_list
*t
)
8249 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
8250 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8251 unsigned long lock_flags
= 0;
8253 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8255 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
8256 list_del(&ipr_cmd
->queue
);
8257 ipr_cmd
->done(ipr_cmd
);
8260 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8264 * ipr_reset_start_timer - Start a timer for adapter reset job
8265 * @ipr_cmd: ipr command struct
8266 * @timeout: timeout value
8268 * Description: This function is used in adapter reset processing
8269 * for timing events. If the reset_cmd pointer in the IOA
8270 * config struct is not this adapter's we are doing nested
8271 * resets and fail_all_ops will take care of freeing the
8277 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
8278 unsigned long timeout
)
8282 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8283 ipr_cmd
->done
= ipr_reset_ioa_job
;
8285 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
8286 ipr_cmd
->timer
.function
= ipr_reset_timer_done
;
8287 add_timer(&ipr_cmd
->timer
);
8291 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8292 * @ioa_cfg: ioa cfg struct
8297 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8299 struct ipr_hrr_queue
*hrrq
;
8301 for_each_hrrq(hrrq
, ioa_cfg
) {
8302 spin_lock(&hrrq
->_lock
);
8303 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
8305 /* Initialize Host RRQ pointers */
8306 hrrq
->hrrq_start
= hrrq
->host_rrq
;
8307 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
8308 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
8309 hrrq
->toggle_bit
= 1;
8310 spin_unlock(&hrrq
->_lock
);
8314 ioa_cfg
->identify_hrrq_index
= 0;
8315 if (ioa_cfg
->hrrq_num
== 1)
8316 atomic_set(&ioa_cfg
->hrrq_index
, 0);
8318 atomic_set(&ioa_cfg
->hrrq_index
, 1);
8320 /* Zero out config table */
8321 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
8325 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8326 * @ipr_cmd: ipr command struct
8329 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8331 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
8333 unsigned long stage
, stage_time
;
8335 volatile u32 int_reg
;
8336 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8339 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
8340 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
8341 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
8343 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
8345 /* sanity check the stage_time value */
8346 if (stage_time
== 0)
8347 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
8348 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
8349 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
8350 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
8351 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
8353 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
8354 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8355 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8356 stage_time
= ioa_cfg
->transop_timeout
;
8357 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8358 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
8359 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8360 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8361 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8362 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8363 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
8364 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8365 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8366 return IPR_RC_JOB_CONTINUE
;
8370 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
8371 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8372 ipr_cmd
->done
= ipr_reset_ioa_job
;
8373 add_timer(&ipr_cmd
->timer
);
8375 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8377 return IPR_RC_JOB_RETURN
;
8381 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8382 * @ipr_cmd: ipr command struct
8384 * This function reinitializes some control blocks and
8385 * enables destructive diagnostics on the adapter.
8390 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
8392 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8393 volatile u32 int_reg
;
8394 volatile u64 maskval
;
8398 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8399 ipr_init_ioa_mem(ioa_cfg
);
8401 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8402 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8403 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
8404 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8407 if (ioa_cfg
->sis64
) {
8408 /* Set the adapter to the correct endian mode. */
8409 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8410 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8413 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8415 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8416 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
8417 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8418 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8419 return IPR_RC_JOB_CONTINUE
;
8422 /* Enable destructive diagnostics on IOA */
8423 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8425 if (ioa_cfg
->sis64
) {
8426 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8427 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
8428 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
8430 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8432 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8434 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
8436 if (ioa_cfg
->sis64
) {
8437 ipr_cmd
->job_step
= ipr_reset_next_stage
;
8438 return IPR_RC_JOB_CONTINUE
;
8441 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
8442 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8443 ipr_cmd
->done
= ipr_reset_ioa_job
;
8444 add_timer(&ipr_cmd
->timer
);
8445 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8448 return IPR_RC_JOB_RETURN
;
8452 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8453 * @ipr_cmd: ipr command struct
8455 * This function is invoked when an adapter dump has run out
8456 * of processing time.
8459 * IPR_RC_JOB_CONTINUE
8461 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
8463 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8465 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8466 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8467 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8468 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8470 ioa_cfg
->dump_timeout
= 1;
8471 ipr_cmd
->job_step
= ipr_reset_alert
;
8473 return IPR_RC_JOB_CONTINUE
;
8477 * ipr_unit_check_no_data - Log a unit check/no data error log
8478 * @ioa_cfg: ioa config struct
8480 * Logs an error indicating the adapter unit checked, but for some
8481 * reason, we were unable to fetch the unit check buffer.
8486 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
8488 ioa_cfg
->errors_logged
++;
8489 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
8493 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8494 * @ioa_cfg: ioa config struct
8496 * Fetches the unit check buffer from the adapter by clocking the data
8497 * through the mailbox register.
8502 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
8504 unsigned long mailbox
;
8505 struct ipr_hostrcb
*hostrcb
;
8506 struct ipr_uc_sdt sdt
;
8510 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
8512 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
8513 ipr_unit_check_no_data(ioa_cfg
);
8517 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
8518 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
8519 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
8521 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
8522 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
8523 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
8524 ipr_unit_check_no_data(ioa_cfg
);
8528 /* Find length of the first sdt entry (UC buffer) */
8529 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
8530 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
8532 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
8533 be32_to_cpu(sdt
.entry
[0].start_token
)) &
8534 IPR_FMT2_MBX_ADDR_MASK
;
8536 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
8537 struct ipr_hostrcb
, queue
);
8538 list_del_init(&hostrcb
->queue
);
8539 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
8541 rc
= ipr_get_ldump_data_section(ioa_cfg
,
8542 be32_to_cpu(sdt
.entry
[0].start_token
),
8543 (__be32
*)&hostrcb
->hcam
,
8544 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
8547 ipr_handle_log_data(ioa_cfg
, hostrcb
);
8548 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
8549 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
8550 ioa_cfg
->sdt_state
== GET_DUMP
)
8551 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8553 ipr_unit_check_no_data(ioa_cfg
);
8555 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
8559 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8560 * @ipr_cmd: ipr command struct
8562 * Description: This function will call to get the unit check buffer.
8567 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
8569 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8572 ioa_cfg
->ioa_unit_checked
= 0;
8573 ipr_get_unit_check_buffer(ioa_cfg
);
8574 ipr_cmd
->job_step
= ipr_reset_alert
;
8575 ipr_reset_start_timer(ipr_cmd
, 0);
8578 return IPR_RC_JOB_RETURN
;
8581 static int ipr_dump_mailbox_wait(struct ipr_cmnd
*ipr_cmd
)
8583 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8587 if (ioa_cfg
->sdt_state
!= GET_DUMP
)
8588 return IPR_RC_JOB_RETURN
;
8590 if (!ioa_cfg
->sis64
|| !ipr_cmd
->u
.time_left
||
8591 (readl(ioa_cfg
->regs
.sense_interrupt_reg
) &
8592 IPR_PCII_MAILBOX_STABLE
)) {
8594 if (!ipr_cmd
->u
.time_left
)
8595 dev_err(&ioa_cfg
->pdev
->dev
,
8596 "Timed out waiting for Mailbox register.\n");
8598 ioa_cfg
->sdt_state
= READ_DUMP
;
8599 ioa_cfg
->dump_timeout
= 0;
8601 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8603 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8604 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8605 schedule_work(&ioa_cfg
->work_q
);
8608 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8609 ipr_reset_start_timer(ipr_cmd
,
8610 IPR_CHECK_FOR_RESET_TIMEOUT
);
8614 return IPR_RC_JOB_RETURN
;
8618 * ipr_reset_restore_cfg_space - Restore PCI config space.
8619 * @ipr_cmd: ipr command struct
8621 * Description: This function restores the saved PCI config space of
8622 * the adapter, fails all outstanding ops back to the callers, and
8623 * fetches the dump/unit check if applicable to this reset.
8626 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8628 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
8630 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8634 ioa_cfg
->pdev
->state_saved
= true;
8635 pci_restore_state(ioa_cfg
->pdev
);
8637 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
8638 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8639 return IPR_RC_JOB_CONTINUE
;
8642 ipr_fail_all_ops(ioa_cfg
);
8644 if (ioa_cfg
->sis64
) {
8645 /* Set the adapter to the correct endian mode. */
8646 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8647 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8650 if (ioa_cfg
->ioa_unit_checked
) {
8651 if (ioa_cfg
->sis64
) {
8652 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
8653 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
8654 return IPR_RC_JOB_RETURN
;
8656 ioa_cfg
->ioa_unit_checked
= 0;
8657 ipr_get_unit_check_buffer(ioa_cfg
);
8658 ipr_cmd
->job_step
= ipr_reset_alert
;
8659 ipr_reset_start_timer(ipr_cmd
, 0);
8660 return IPR_RC_JOB_RETURN
;
8664 if (ioa_cfg
->in_ioa_bringdown
) {
8665 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8666 } else if (ioa_cfg
->sdt_state
== GET_DUMP
) {
8667 ipr_cmd
->job_step
= ipr_dump_mailbox_wait
;
8668 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_MAILBOX
;
8670 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8674 return IPR_RC_JOB_CONTINUE
;
8678 * ipr_reset_bist_done - BIST has completed on the adapter.
8679 * @ipr_cmd: ipr command struct
8681 * Description: Unblock config space and resume the reset process.
8684 * IPR_RC_JOB_CONTINUE
8686 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8688 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8691 if (ioa_cfg
->cfg_locked
)
8692 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8693 ioa_cfg
->cfg_locked
= 0;
8694 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8696 return IPR_RC_JOB_CONTINUE
;
8700 * ipr_reset_start_bist - Run BIST on the adapter.
8701 * @ipr_cmd: ipr command struct
8703 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8706 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8708 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8710 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8711 int rc
= PCIBIOS_SUCCESSFUL
;
8714 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8715 writel(IPR_UPROCI_SIS64_START_BIST
,
8716 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8718 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8720 if (rc
== PCIBIOS_SUCCESSFUL
) {
8721 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8722 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8723 rc
= IPR_RC_JOB_RETURN
;
8725 if (ioa_cfg
->cfg_locked
)
8726 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8727 ioa_cfg
->cfg_locked
= 0;
8728 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8729 rc
= IPR_RC_JOB_CONTINUE
;
8737 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8738 * @ipr_cmd: ipr command struct
8740 * Description: This clears PCI reset to the adapter and delays two seconds.
8745 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8748 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8749 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8751 return IPR_RC_JOB_RETURN
;
8755 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8756 * @work: work struct
8758 * Description: This pulses warm reset to a slot.
8761 static void ipr_reset_reset_work(struct work_struct
*work
)
8763 struct ipr_cmnd
*ipr_cmd
= container_of(work
, struct ipr_cmnd
, work
);
8764 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8765 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8766 unsigned long lock_flags
= 0;
8769 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8770 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT
));
8771 pci_set_pcie_reset_state(pdev
, pcie_deassert_reset
);
8773 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8774 if (ioa_cfg
->reset_cmd
== ipr_cmd
)
8775 ipr_reset_ioa_job(ipr_cmd
);
8776 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8781 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8782 * @ipr_cmd: ipr command struct
8784 * Description: This asserts PCI reset to the adapter.
8789 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8791 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8794 INIT_WORK(&ipr_cmd
->work
, ipr_reset_reset_work
);
8795 queue_work(ioa_cfg
->reset_work_q
, &ipr_cmd
->work
);
8796 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8798 return IPR_RC_JOB_RETURN
;
8802 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8803 * @ipr_cmd: ipr command struct
8805 * Description: This attempts to block config access to the IOA.
8808 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8810 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8812 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8813 int rc
= IPR_RC_JOB_CONTINUE
;
8815 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8816 ioa_cfg
->cfg_locked
= 1;
8817 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8819 if (ipr_cmd
->u
.time_left
) {
8820 rc
= IPR_RC_JOB_RETURN
;
8821 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8822 ipr_reset_start_timer(ipr_cmd
,
8823 IPR_CHECK_FOR_RESET_TIMEOUT
);
8825 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8826 dev_err(&ioa_cfg
->pdev
->dev
,
8827 "Timed out waiting to lock config access. Resetting anyway.\n");
8835 * ipr_reset_block_config_access - Block config access to the IOA
8836 * @ipr_cmd: ipr command struct
8838 * Description: This attempts to block config access to the IOA
8841 * IPR_RC_JOB_CONTINUE
8843 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8845 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8846 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8847 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8848 return IPR_RC_JOB_CONTINUE
;
8852 * ipr_reset_allowed - Query whether or not IOA can be reset
8853 * @ioa_cfg: ioa config struct
8856 * 0 if reset not allowed / non-zero if reset is allowed
8858 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8860 volatile u32 temp_reg
;
8862 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8863 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8867 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8868 * @ipr_cmd: ipr command struct
8870 * Description: This function waits for adapter permission to run BIST,
8871 * then runs BIST. If the adapter does not give permission after a
8872 * reasonable time, we will reset the adapter anyway. The impact of
8873 * resetting the adapter without warning the adapter is the risk of
8874 * losing the persistent error log on the adapter. If the adapter is
8875 * reset while it is writing to the flash on the adapter, the flash
8876 * segment will have bad ECC and be zeroed.
8879 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8881 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8883 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8884 int rc
= IPR_RC_JOB_RETURN
;
8886 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8887 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8888 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8890 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8891 rc
= IPR_RC_JOB_CONTINUE
;
8898 * ipr_reset_alert - Alert the adapter of a pending reset
8899 * @ipr_cmd: ipr command struct
8901 * Description: This function alerts the adapter that it will be reset.
8902 * If memory space is not currently enabled, proceed directly
8903 * to running BIST on the adapter. The timer must always be started
8904 * so we guarantee we do not run BIST from ipr_isr.
8909 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8911 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8916 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8918 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8919 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8920 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8921 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8923 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8926 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8927 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8930 return IPR_RC_JOB_RETURN
;
8934 * ipr_reset_quiesce_done - Complete IOA disconnect
8935 * @ipr_cmd: ipr command struct
8937 * Description: Freeze the adapter to complete quiesce processing
8940 * IPR_RC_JOB_CONTINUE
8942 static int ipr_reset_quiesce_done(struct ipr_cmnd
*ipr_cmd
)
8944 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8947 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8948 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
8950 return IPR_RC_JOB_CONTINUE
;
8954 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8955 * @ipr_cmd: ipr command struct
8957 * Description: Ensure nothing is outstanding to the IOA and
8958 * proceed with IOA disconnect. Otherwise reset the IOA.
8961 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8963 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd
*ipr_cmd
)
8965 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8966 struct ipr_cmnd
*loop_cmd
;
8967 struct ipr_hrr_queue
*hrrq
;
8968 int rc
= IPR_RC_JOB_CONTINUE
;
8972 ipr_cmd
->job_step
= ipr_reset_quiesce_done
;
8974 for_each_hrrq(hrrq
, ioa_cfg
) {
8975 spin_lock(&hrrq
->_lock
);
8976 list_for_each_entry(loop_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
8978 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8979 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
8980 rc
= IPR_RC_JOB_RETURN
;
8983 spin_unlock(&hrrq
->_lock
);
8994 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8995 * @ipr_cmd: ipr command struct
8997 * Description: Cancel any oustanding HCAMs to the IOA.
9000 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9002 static int ipr_reset_cancel_hcam(struct ipr_cmnd
*ipr_cmd
)
9004 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9005 int rc
= IPR_RC_JOB_CONTINUE
;
9006 struct ipr_cmd_pkt
*cmd_pkt
;
9007 struct ipr_cmnd
*hcam_cmd
;
9008 struct ipr_hrr_queue
*hrrq
= &ioa_cfg
->hrrq
[IPR_INIT_HRRQ
];
9011 ipr_cmd
->job_step
= ipr_reset_cancel_hcam_done
;
9013 if (!hrrq
->ioa_is_dead
) {
9014 if (!list_empty(&ioa_cfg
->hostrcb_pending_q
)) {
9015 list_for_each_entry(hcam_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
9016 if (hcam_cmd
->ioarcb
.cmd_pkt
.cdb
[0] != IPR_HOST_CONTROLLED_ASYNC
)
9019 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9020 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9021 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
9022 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
9023 cmd_pkt
->cdb
[0] = IPR_CANCEL_REQUEST
;
9024 cmd_pkt
->cdb
[1] = IPR_CANCEL_64BIT_IOARCB
;
9025 cmd_pkt
->cdb
[10] = ((u64
) hcam_cmd
->dma_addr
>> 56) & 0xff;
9026 cmd_pkt
->cdb
[11] = ((u64
) hcam_cmd
->dma_addr
>> 48) & 0xff;
9027 cmd_pkt
->cdb
[12] = ((u64
) hcam_cmd
->dma_addr
>> 40) & 0xff;
9028 cmd_pkt
->cdb
[13] = ((u64
) hcam_cmd
->dma_addr
>> 32) & 0xff;
9029 cmd_pkt
->cdb
[2] = ((u64
) hcam_cmd
->dma_addr
>> 24) & 0xff;
9030 cmd_pkt
->cdb
[3] = ((u64
) hcam_cmd
->dma_addr
>> 16) & 0xff;
9031 cmd_pkt
->cdb
[4] = ((u64
) hcam_cmd
->dma_addr
>> 8) & 0xff;
9032 cmd_pkt
->cdb
[5] = ((u64
) hcam_cmd
->dma_addr
) & 0xff;
9034 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9035 IPR_CANCEL_TIMEOUT
);
9037 rc
= IPR_RC_JOB_RETURN
;
9038 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9043 ipr_cmd
->job_step
= ipr_reset_alert
;
9050 * ipr_reset_ucode_download_done - Microcode download completion
9051 * @ipr_cmd: ipr command struct
9053 * Description: This function unmaps the microcode download buffer.
9056 * IPR_RC_JOB_CONTINUE
9058 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
9060 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9061 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9063 dma_unmap_sg(&ioa_cfg
->pdev
->dev
, sglist
->scatterlist
,
9064 sglist
->num_sg
, DMA_TO_DEVICE
);
9066 ipr_cmd
->job_step
= ipr_reset_alert
;
9067 return IPR_RC_JOB_CONTINUE
;
9071 * ipr_reset_ucode_download - Download microcode to the adapter
9072 * @ipr_cmd: ipr command struct
9074 * Description: This function checks to see if it there is microcode
9075 * to download to the adapter. If there is, a download is performed.
9078 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9080 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
9082 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9083 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9086 ipr_cmd
->job_step
= ipr_reset_alert
;
9089 return IPR_RC_JOB_CONTINUE
;
9091 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9092 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
9093 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
9094 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
9095 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
9096 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
9097 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
9100 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
9102 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
9103 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
9105 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9106 IPR_WRITE_BUFFER_TIMEOUT
);
9109 return IPR_RC_JOB_RETURN
;
9113 * ipr_reset_shutdown_ioa - Shutdown the adapter
9114 * @ipr_cmd: ipr command struct
9116 * Description: This function issues an adapter shutdown of the
9117 * specified type to the specified adapter as part of the
9118 * adapter reset job.
9121 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9123 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
9125 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9126 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
9127 unsigned long timeout
;
9128 int rc
= IPR_RC_JOB_CONTINUE
;
9131 if (shutdown_type
== IPR_SHUTDOWN_QUIESCE
)
9132 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9133 else if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
9134 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
9135 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9136 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9137 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
9138 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
9140 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
9141 timeout
= IPR_SHUTDOWN_TIMEOUT
;
9142 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
9143 timeout
= IPR_INTERNAL_TIMEOUT
;
9144 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
9145 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
9147 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
9149 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
9151 rc
= IPR_RC_JOB_RETURN
;
9152 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
9154 ipr_cmd
->job_step
= ipr_reset_alert
;
9161 * ipr_reset_ioa_job - Adapter reset job
9162 * @ipr_cmd: ipr command struct
9164 * Description: This function is the job router for the adapter reset job.
9169 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
9172 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9175 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
9177 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
9179 * We are doing nested adapter resets and this is
9180 * not the current reset job.
9182 list_add_tail(&ipr_cmd
->queue
,
9183 &ipr_cmd
->hrrq
->hrrq_free_q
);
9187 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
9188 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
9189 if (rc
== IPR_RC_JOB_RETURN
)
9193 ipr_reinit_ipr_cmnd(ipr_cmd
);
9194 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
9195 rc
= ipr_cmd
->job_step(ipr_cmd
);
9196 } while (rc
== IPR_RC_JOB_CONTINUE
);
9200 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9201 * @ioa_cfg: ioa config struct
9202 * @job_step: first job step of reset job
9203 * @shutdown_type: shutdown type
9205 * Description: This function will initiate the reset of the given adapter
9206 * starting at the selected job step.
9207 * If the caller needs to wait on the completion of the reset,
9208 * the caller must sleep on the reset_wait_q.
9213 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9214 int (*job_step
) (struct ipr_cmnd
*),
9215 enum ipr_shutdown_type shutdown_type
)
9217 struct ipr_cmnd
*ipr_cmd
;
9220 ioa_cfg
->in_reset_reload
= 1;
9221 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9222 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9223 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9224 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9227 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9228 ioa_cfg
->scsi_unblock
= 0;
9229 ioa_cfg
->scsi_blocked
= 1;
9230 scsi_block_requests(ioa_cfg
->host
);
9233 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
9234 ioa_cfg
->reset_cmd
= ipr_cmd
;
9235 ipr_cmd
->job_step
= job_step
;
9236 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
9238 ipr_reset_ioa_job(ipr_cmd
);
9242 * ipr_initiate_ioa_reset - Initiate an adapter reset
9243 * @ioa_cfg: ioa config struct
9244 * @shutdown_type: shutdown type
9246 * Description: This function will initiate the reset of the given adapter.
9247 * If the caller needs to wait on the completion of the reset,
9248 * the caller must sleep on the reset_wait_q.
9253 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9254 enum ipr_shutdown_type shutdown_type
)
9258 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
9261 if (ioa_cfg
->in_reset_reload
) {
9262 if (ioa_cfg
->sdt_state
== GET_DUMP
)
9263 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9264 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
9265 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9268 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
9269 dev_err(&ioa_cfg
->pdev
->dev
,
9270 "IOA taken offline - error recovery failed\n");
9272 ioa_cfg
->reset_retries
= 0;
9273 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9274 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9275 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
9276 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9280 if (ioa_cfg
->in_ioa_bringdown
) {
9281 ioa_cfg
->reset_cmd
= NULL
;
9282 ioa_cfg
->in_reset_reload
= 0;
9283 ipr_fail_all_ops(ioa_cfg
);
9284 wake_up_all(&ioa_cfg
->reset_wait_q
);
9286 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9287 ioa_cfg
->scsi_unblock
= 1;
9288 schedule_work(&ioa_cfg
->work_q
);
9292 ioa_cfg
->in_ioa_bringdown
= 1;
9293 shutdown_type
= IPR_SHUTDOWN_NONE
;
9297 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
9302 * ipr_reset_freeze - Hold off all I/O activity
9303 * @ipr_cmd: ipr command struct
9305 * Description: If the PCI slot is frozen, hold off all I/O
9306 * activity; then, as soon as the slot is available again,
9307 * initiate an adapter reset.
9309 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
9311 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9314 /* Disallow new interrupts, avoid loop */
9315 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9316 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9317 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
9318 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9321 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
9322 ipr_cmd
->done
= ipr_reset_ioa_job
;
9323 return IPR_RC_JOB_RETURN
;
9327 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9328 * @pdev: PCI device struct
9330 * Description: This routine is called to tell us that the MMIO
9331 * access to the IOA has been restored
9333 static pci_ers_result_t
ipr_pci_mmio_enabled(struct pci_dev
*pdev
)
9335 unsigned long flags
= 0;
9336 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9338 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9339 if (!ioa_cfg
->probe_done
)
9340 pci_save_state(pdev
);
9341 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9342 return PCI_ERS_RESULT_NEED_RESET
;
9346 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9347 * @pdev: PCI device struct
9349 * Description: This routine is called to tell us that the PCI bus
9350 * is down. Can't do anything here, except put the device driver
9351 * into a holding pattern, waiting for the PCI bus to come back.
9353 static void ipr_pci_frozen(struct pci_dev
*pdev
)
9355 unsigned long flags
= 0;
9356 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9358 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9359 if (ioa_cfg
->probe_done
)
9360 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
9361 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9365 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9366 * @pdev: PCI device struct
9368 * Description: This routine is called by the pci error recovery
9369 * code after the PCI slot has been reset, just before we
9370 * should resume normal operations.
9372 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
9374 unsigned long flags
= 0;
9375 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9377 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9378 if (ioa_cfg
->probe_done
) {
9379 if (ioa_cfg
->needs_warm_reset
)
9380 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9382 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
9385 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9386 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9387 return PCI_ERS_RESULT_RECOVERED
;
9391 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9392 * @pdev: PCI device struct
9394 * Description: This routine is called when the PCI bus has
9395 * permanently failed.
9397 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
9399 unsigned long flags
= 0;
9400 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9403 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9404 if (ioa_cfg
->probe_done
) {
9405 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9406 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9407 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
- 1;
9408 ioa_cfg
->in_ioa_bringdown
= 1;
9409 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9410 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9411 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9412 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9415 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9417 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9418 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9422 * ipr_pci_error_detected - Called when a PCI error is detected.
9423 * @pdev: PCI device struct
9424 * @state: PCI channel state
9426 * Description: Called when a PCI error is detected.
9429 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9431 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
9432 pci_channel_state_t state
)
9435 case pci_channel_io_frozen
:
9436 ipr_pci_frozen(pdev
);
9437 return PCI_ERS_RESULT_CAN_RECOVER
;
9438 case pci_channel_io_perm_failure
:
9439 ipr_pci_perm_failure(pdev
);
9440 return PCI_ERS_RESULT_DISCONNECT
;
9445 return PCI_ERS_RESULT_NEED_RESET
;
9449 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9450 * @ioa_cfg: ioa cfg struct
9452 * Description: This is the second phase of adapter initialization
9453 * This function takes care of initilizing the adapter to the point
9454 * where it can accept new commands.
9457 * 0 on success / -EIO on failure
9459 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
9462 unsigned long host_lock_flags
= 0;
9465 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9466 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
9467 ioa_cfg
->probe_done
= 1;
9468 if (ioa_cfg
->needs_hard_reset
) {
9469 ioa_cfg
->needs_hard_reset
= 0;
9470 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9472 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
9474 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9481 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9482 * @ioa_cfg: ioa config struct
9487 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9491 if (ioa_cfg
->ipr_cmnd_list
) {
9492 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9493 if (ioa_cfg
->ipr_cmnd_list
[i
])
9494 dma_pool_free(ioa_cfg
->ipr_cmd_pool
,
9495 ioa_cfg
->ipr_cmnd_list
[i
],
9496 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
9498 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
9502 if (ioa_cfg
->ipr_cmd_pool
)
9503 dma_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
9505 kfree(ioa_cfg
->ipr_cmnd_list
);
9506 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
9507 ioa_cfg
->ipr_cmnd_list
= NULL
;
9508 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
9509 ioa_cfg
->ipr_cmd_pool
= NULL
;
9513 * ipr_free_mem - Frees memory allocated for an adapter
9514 * @ioa_cfg: ioa cfg struct
9519 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9523 kfree(ioa_cfg
->res_entries
);
9524 dma_free_coherent(&ioa_cfg
->pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9525 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9526 ipr_free_cmd_blks(ioa_cfg
);
9528 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
9529 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9530 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9531 ioa_cfg
->hrrq
[i
].host_rrq
,
9532 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9534 dma_free_coherent(&ioa_cfg
->pdev
->dev
, ioa_cfg
->cfg_table_size
,
9535 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9537 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9538 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9539 sizeof(struct ipr_hostrcb
),
9540 ioa_cfg
->hostrcb
[i
],
9541 ioa_cfg
->hostrcb_dma
[i
]);
9544 ipr_free_dump(ioa_cfg
);
9545 kfree(ioa_cfg
->trace
);
9549 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9550 * @ioa_cfg: ipr cfg struct
9552 * This function frees all allocated IRQs for the
9553 * specified adapter.
9558 static void ipr_free_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9560 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9563 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
9564 free_irq(pci_irq_vector(pdev
, i
), &ioa_cfg
->hrrq
[i
]);
9565 pci_free_irq_vectors(pdev
);
9569 * ipr_free_all_resources - Free all allocated resources for an adapter.
9570 * @ipr_cmd: ipr command struct
9572 * This function frees all allocated resources for the
9573 * specified adapter.
9578 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
9580 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9583 ipr_free_irqs(ioa_cfg
);
9584 if (ioa_cfg
->reset_work_q
)
9585 destroy_workqueue(ioa_cfg
->reset_work_q
);
9586 iounmap(ioa_cfg
->hdw_dma_regs
);
9587 pci_release_regions(pdev
);
9588 ipr_free_mem(ioa_cfg
);
9589 scsi_host_put(ioa_cfg
->host
);
9590 pci_disable_device(pdev
);
9595 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9596 * @ioa_cfg: ioa config struct
9599 * 0 on success / -ENOMEM on allocation failure
9601 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9603 struct ipr_cmnd
*ipr_cmd
;
9604 struct ipr_ioarcb
*ioarcb
;
9605 dma_addr_t dma_addr
;
9606 int i
, entries_each_hrrq
, hrrq_id
= 0;
9608 ioa_cfg
->ipr_cmd_pool
= dma_pool_create(IPR_NAME
, &ioa_cfg
->pdev
->dev
,
9609 sizeof(struct ipr_cmnd
), 512, 0);
9611 if (!ioa_cfg
->ipr_cmd_pool
)
9614 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
9615 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
9617 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
9618 ipr_free_cmd_blks(ioa_cfg
);
9622 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9623 if (ioa_cfg
->hrrq_num
> 1) {
9625 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
9626 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9627 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9628 (entries_each_hrrq
- 1);
9631 IPR_NUM_BASE_CMD_BLKS
/
9632 (ioa_cfg
->hrrq_num
- 1);
9633 ioa_cfg
->hrrq
[i
].min_cmd_id
=
9634 IPR_NUM_INTERNAL_CMD_BLKS
+
9635 (i
- 1) * entries_each_hrrq
;
9636 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9637 (IPR_NUM_INTERNAL_CMD_BLKS
+
9638 i
* entries_each_hrrq
- 1);
9641 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
9642 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9643 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
9645 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
9648 BUG_ON(ioa_cfg
->hrrq_num
== 0);
9650 i
= IPR_NUM_CMD_BLKS
-
9651 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
9653 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
9654 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
9657 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9658 ipr_cmd
= dma_pool_zalloc(ioa_cfg
->ipr_cmd_pool
,
9659 GFP_KERNEL
, &dma_addr
);
9662 ipr_free_cmd_blks(ioa_cfg
);
9666 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
9667 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
9669 ioarcb
= &ipr_cmd
->ioarcb
;
9670 ipr_cmd
->dma_addr
= dma_addr
;
9672 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
9674 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
9676 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
9677 if (ioa_cfg
->sis64
) {
9678 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
9679 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
9680 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
9681 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
9683 ioarcb
->write_ioadl_addr
=
9684 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
9685 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
9686 ioarcb
->ioasa_host_pci_addr
=
9687 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
9689 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
9690 ipr_cmd
->cmd_index
= i
;
9691 ipr_cmd
->ioa_cfg
= ioa_cfg
;
9692 ipr_cmd
->sense_buffer_dma
= dma_addr
+
9693 offsetof(struct ipr_cmnd
, sense_buffer
);
9695 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
9696 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
9697 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9698 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
9706 * ipr_alloc_mem - Allocate memory for an adapter
9707 * @ioa_cfg: ioa config struct
9710 * 0 on success / non-zero for error
9712 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9714 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9715 int i
, rc
= -ENOMEM
;
9718 ioa_cfg
->res_entries
= kcalloc(ioa_cfg
->max_devs_supported
,
9719 sizeof(struct ipr_resource_entry
),
9722 if (!ioa_cfg
->res_entries
)
9725 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
9726 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
9727 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
9730 ioa_cfg
->vpd_cbs
= dma_alloc_coherent(&pdev
->dev
,
9731 sizeof(struct ipr_misc_cbs
),
9732 &ioa_cfg
->vpd_cbs_dma
,
9735 if (!ioa_cfg
->vpd_cbs
)
9736 goto out_free_res_entries
;
9738 if (ipr_alloc_cmd_blks(ioa_cfg
))
9739 goto out_free_vpd_cbs
;
9741 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9742 ioa_cfg
->hrrq
[i
].host_rrq
= dma_alloc_coherent(&pdev
->dev
,
9743 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9744 &ioa_cfg
->hrrq
[i
].host_rrq_dma
,
9747 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
9749 dma_free_coherent(&pdev
->dev
,
9750 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9751 ioa_cfg
->hrrq
[i
].host_rrq
,
9752 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9753 goto out_ipr_free_cmd_blocks
;
9755 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
9758 ioa_cfg
->u
.cfg_table
= dma_alloc_coherent(&pdev
->dev
,
9759 ioa_cfg
->cfg_table_size
,
9760 &ioa_cfg
->cfg_table_dma
,
9763 if (!ioa_cfg
->u
.cfg_table
)
9764 goto out_free_host_rrq
;
9766 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9767 ioa_cfg
->hostrcb
[i
] = dma_alloc_coherent(&pdev
->dev
,
9768 sizeof(struct ipr_hostrcb
),
9769 &ioa_cfg
->hostrcb_dma
[i
],
9772 if (!ioa_cfg
->hostrcb
[i
])
9773 goto out_free_hostrcb_dma
;
9775 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
9776 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
9777 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
9778 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
9781 ioa_cfg
->trace
= kcalloc(IPR_NUM_TRACE_ENTRIES
,
9782 sizeof(struct ipr_trace_entry
),
9785 if (!ioa_cfg
->trace
)
9786 goto out_free_hostrcb_dma
;
9793 out_free_hostrcb_dma
:
9795 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_hostrcb
),
9796 ioa_cfg
->hostrcb
[i
],
9797 ioa_cfg
->hostrcb_dma
[i
]);
9799 dma_free_coherent(&pdev
->dev
, ioa_cfg
->cfg_table_size
,
9800 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9802 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9803 dma_free_coherent(&pdev
->dev
,
9804 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9805 ioa_cfg
->hrrq
[i
].host_rrq
,
9806 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9808 out_ipr_free_cmd_blocks
:
9809 ipr_free_cmd_blks(ioa_cfg
);
9811 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9812 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9813 out_free_res_entries
:
9814 kfree(ioa_cfg
->res_entries
);
9819 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9820 * @ioa_cfg: ioa config struct
9825 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9829 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9830 ioa_cfg
->bus_attr
[i
].bus
= i
;
9831 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9832 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9833 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9834 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9836 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9841 * ipr_init_regs - Initialize IOA registers
9842 * @ioa_cfg: ioa config struct
9847 static void ipr_init_regs(struct ipr_ioa_cfg
*ioa_cfg
)
9849 const struct ipr_interrupt_offsets
*p
;
9850 struct ipr_interrupts
*t
;
9853 p
= &ioa_cfg
->chip_cfg
->regs
;
9855 base
= ioa_cfg
->hdw_dma_regs
;
9857 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9858 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9859 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9860 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9861 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9862 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9863 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9864 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9865 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9866 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9867 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9868 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9869 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9870 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9871 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9872 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9874 if (ioa_cfg
->sis64
) {
9875 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9876 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9877 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9878 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9883 * ipr_init_ioa_cfg - Initialize IOA config struct
9884 * @ioa_cfg: ioa config struct
9885 * @host: scsi host struct
9886 * @pdev: PCI dev struct
9891 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9892 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9896 ioa_cfg
->host
= host
;
9897 ioa_cfg
->pdev
= pdev
;
9898 ioa_cfg
->log_level
= ipr_log_level
;
9899 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9900 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9901 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9902 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9903 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9904 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9905 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9907 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9908 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9909 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_report_q
);
9910 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9911 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9912 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9913 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9914 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9915 init_waitqueue_head(&ioa_cfg
->eeh_wait_q
);
9916 ioa_cfg
->sdt_state
= INACTIVE
;
9918 ipr_initialize_bus_attr(ioa_cfg
);
9919 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9921 if (ioa_cfg
->sis64
) {
9922 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9923 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9924 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9925 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9926 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9927 + ((sizeof(struct ipr_config_table_entry64
)
9928 * ioa_cfg
->max_devs_supported
)));
9930 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9931 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9932 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9933 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9934 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9935 + ((sizeof(struct ipr_config_table_entry
)
9936 * ioa_cfg
->max_devs_supported
)));
9939 host
->max_channel
= IPR_VSET_BUS
;
9940 host
->unique_id
= host
->host_no
;
9941 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9942 host
->can_queue
= ioa_cfg
->max_cmds
;
9943 pci_set_drvdata(pdev
, ioa_cfg
);
9945 for (i
= 0; i
< ARRAY_SIZE(ioa_cfg
->hrrq
); i
++) {
9946 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
9947 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
9948 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
9950 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
9952 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
9957 * ipr_get_chip_info - Find adapter chip information
9958 * @dev_id: PCI device id struct
9961 * ptr to chip information on success / NULL on failure
9963 static const struct ipr_chip_t
*
9964 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
9968 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
9969 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
9970 ipr_chip
[i
].device
== dev_id
->device
)
9971 return &ipr_chip
[i
];
9976 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9978 * @ioa_cfg: ioa config struct
9983 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg
*ioa_cfg
)
9985 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9987 if (pci_channel_offline(pdev
)) {
9988 wait_event_timeout(ioa_cfg
->eeh_wait_q
,
9989 !pci_channel_offline(pdev
),
9990 IPR_PCI_ERROR_RECOVERY_TIMEOUT
);
9991 pci_restore_state(pdev
);
9995 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
9997 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
9999 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
10000 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
10001 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
10002 ioa_cfg
->vectors_info
[vec_idx
].
10003 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
10007 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
,
10008 struct pci_dev
*pdev
)
10012 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
10013 rc
= request_irq(pci_irq_vector(pdev
, i
),
10016 ioa_cfg
->vectors_info
[i
].desc
,
10017 &ioa_cfg
->hrrq
[i
]);
10020 free_irq(pci_irq_vector(pdev
, i
),
10021 &ioa_cfg
->hrrq
[i
]);
10029 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10030 * @pdev: PCI device struct
10032 * Description: Simply set the msi_received flag to 1 indicating that
10033 * Message Signaled Interrupts are supported.
10036 * 0 on success / non-zero on failure
10038 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
10040 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
10041 unsigned long lock_flags
= 0;
10042 irqreturn_t rc
= IRQ_HANDLED
;
10044 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
10045 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10047 ioa_cfg
->msi_received
= 1;
10048 wake_up(&ioa_cfg
->msi_wait_q
);
10050 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10055 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10056 * @pdev: PCI device struct
10058 * Description: This routine sets up and initiates a test interrupt to determine
10059 * if the interrupt is received via the ipr_test_intr() service routine.
10060 * If the tests fails, the driver will fall back to LSI.
10063 * 0 on success / non-zero on failure
10065 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
10068 volatile u32 int_reg
;
10069 unsigned long lock_flags
= 0;
10070 int irq
= pci_irq_vector(pdev
, 0);
10074 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10075 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
10076 ioa_cfg
->msi_received
= 0;
10077 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10078 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
10079 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
10080 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10082 rc
= request_irq(irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
10084 dev_err(&pdev
->dev
, "Can not assign irq %d\n", irq
);
10086 } else if (ipr_debug
)
10087 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", irq
);
10089 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
10090 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10091 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
10092 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10093 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10095 if (!ioa_cfg
->msi_received
) {
10096 /* MSI test failed */
10097 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
10099 } else if (ipr_debug
)
10100 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
10102 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10104 free_irq(irq
, ioa_cfg
);
10111 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10112 * @pdev: PCI device struct
10113 * @dev_id: PCI device id struct
10116 * 0 on success / non-zero on failure
10118 static int ipr_probe_ioa(struct pci_dev
*pdev
,
10119 const struct pci_device_id
*dev_id
)
10121 struct ipr_ioa_cfg
*ioa_cfg
;
10122 struct Scsi_Host
*host
;
10123 unsigned long ipr_regs_pci
;
10124 void __iomem
*ipr_regs
;
10125 int rc
= PCIBIOS_SUCCESSFUL
;
10126 volatile u32 mask
, uproc
, interrupts
;
10127 unsigned long lock_flags
, driver_lock_flags
;
10128 unsigned int irq_flag
;
10132 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
10133 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
10136 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
10141 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
10142 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
10143 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
10145 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
10147 if (!ioa_cfg
->ipr_chip
) {
10148 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
10149 dev_id
->vendor
, dev_id
->device
);
10150 goto out_scsi_host_put
;
10153 /* set SIS 32 or SIS 64 */
10154 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
10155 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
10156 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
10157 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
10159 if (ipr_transop_timeout
)
10160 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
10161 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
10162 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
10164 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
10166 ioa_cfg
->revid
= pdev
->revision
;
10168 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
10170 ipr_regs_pci
= pci_resource_start(pdev
, 0);
10172 rc
= pci_request_regions(pdev
, IPR_NAME
);
10174 dev_err(&pdev
->dev
,
10175 "Couldn't register memory range of registers\n");
10176 goto out_scsi_host_put
;
10179 rc
= pci_enable_device(pdev
);
10181 if (rc
|| pci_channel_offline(pdev
)) {
10182 if (pci_channel_offline(pdev
)) {
10183 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10184 rc
= pci_enable_device(pdev
);
10188 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
10189 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10190 goto out_release_regions
;
10194 ipr_regs
= pci_ioremap_bar(pdev
, 0);
10197 dev_err(&pdev
->dev
,
10198 "Couldn't map memory range of registers\n");
10203 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
10204 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
10205 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
10207 ipr_init_regs(ioa_cfg
);
10209 if (ioa_cfg
->sis64
) {
10210 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
10212 dev_dbg(&pdev
->dev
, "Failed to set 64 bit DMA mask\n");
10213 rc
= dma_set_mask_and_coherent(&pdev
->dev
,
10217 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
10220 dev_err(&pdev
->dev
, "Failed to set DMA mask\n");
10221 goto cleanup_nomem
;
10224 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
10225 ioa_cfg
->chip_cfg
->cache_line_size
);
10227 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10228 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
10229 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10231 goto cleanup_nomem
;
10234 /* Issue MMIO read to ensure card is not in EEH */
10235 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10236 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10238 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
10239 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
10240 IPR_MAX_MSIX_VECTORS
);
10241 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
10244 irq_flag
= PCI_IRQ_LEGACY
;
10245 if (ioa_cfg
->ipr_chip
->has_msi
)
10246 irq_flag
|= PCI_IRQ_MSI
| PCI_IRQ_MSIX
;
10247 rc
= pci_alloc_irq_vectors(pdev
, 1, ipr_number_of_msix
, irq_flag
);
10249 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10250 goto cleanup_nomem
;
10252 ioa_cfg
->nvectors
= rc
;
10254 if (!pdev
->msi_enabled
&& !pdev
->msix_enabled
)
10255 ioa_cfg
->clear_isr
= 1;
10257 pci_set_master(pdev
);
10259 if (pci_channel_offline(pdev
)) {
10260 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10261 pci_set_master(pdev
);
10262 if (pci_channel_offline(pdev
)) {
10264 goto out_msi_disable
;
10268 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10269 rc
= ipr_test_msi(ioa_cfg
, pdev
);
10272 dev_info(&pdev
->dev
,
10273 "Request for %d MSI%ss succeeded.", ioa_cfg
->nvectors
,
10274 pdev
->msix_enabled
? "-X" : "");
10277 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10278 pci_free_irq_vectors(pdev
);
10280 ioa_cfg
->nvectors
= 1;
10281 ioa_cfg
->clear_isr
= 1;
10284 goto out_msi_disable
;
10288 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
10289 (unsigned int)num_online_cpus(),
10290 (unsigned int)IPR_MAX_HRRQ_NUM
);
10292 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
10293 goto out_msi_disable
;
10295 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
10296 goto out_msi_disable
;
10298 rc
= ipr_alloc_mem(ioa_cfg
);
10300 dev_err(&pdev
->dev
,
10301 "Couldn't allocate enough memory for device driver!\n");
10302 goto out_msi_disable
;
10305 /* Save away PCI config space for use following IOA reset */
10306 rc
= pci_save_state(pdev
);
10308 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10309 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
10311 goto cleanup_nolog
;
10315 * If HRRQ updated interrupt is not masked, or reset alert is set,
10316 * the card is in an unknown state and needs a hard reset
10318 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
10319 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
10320 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
10321 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
10322 ioa_cfg
->needs_hard_reset
= 1;
10323 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
10324 ioa_cfg
->needs_hard_reset
= 1;
10325 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
10326 ioa_cfg
->ioa_unit_checked
= 1;
10328 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10329 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10330 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10332 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10333 name_msi_vectors(ioa_cfg
);
10334 rc
= request_irq(pci_irq_vector(pdev
, 0), ipr_isr
, 0,
10335 ioa_cfg
->vectors_info
[0].desc
,
10336 &ioa_cfg
->hrrq
[0]);
10338 rc
= ipr_request_other_msi_irqs(ioa_cfg
, pdev
);
10340 rc
= request_irq(pdev
->irq
, ipr_isr
,
10342 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
10345 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
10347 goto cleanup_nolog
;
10350 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
10351 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
10352 ioa_cfg
->needs_warm_reset
= 1;
10353 ioa_cfg
->reset
= ipr_reset_slot_reset
;
10355 ioa_cfg
->reset_work_q
= alloc_ordered_workqueue("ipr_reset_%d",
10356 WQ_MEM_RECLAIM
, host
->host_no
);
10358 if (!ioa_cfg
->reset_work_q
) {
10359 dev_err(&pdev
->dev
, "Couldn't register reset workqueue\n");
10364 ioa_cfg
->reset
= ipr_reset_start_bist
;
10366 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10367 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
10368 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10375 ipr_free_irqs(ioa_cfg
);
10377 ipr_free_mem(ioa_cfg
);
10379 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10380 pci_free_irq_vectors(pdev
);
10384 pci_disable_device(pdev
);
10385 out_release_regions
:
10386 pci_release_regions(pdev
);
10388 scsi_host_put(host
);
10393 * ipr_initiate_ioa_bringdown - Bring down an adapter
10394 * @ioa_cfg: ioa config struct
10395 * @shutdown_type: shutdown type
10397 * Description: This function will initiate bringing down the adapter.
10398 * This consists of issuing an IOA shutdown to the adapter
10399 * to flush the cache, and running BIST.
10400 * If the caller needs to wait on the completion of the reset,
10401 * the caller must sleep on the reset_wait_q.
10406 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
10407 enum ipr_shutdown_type shutdown_type
)
10410 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
10411 ioa_cfg
->sdt_state
= ABORT_DUMP
;
10412 ioa_cfg
->reset_retries
= 0;
10413 ioa_cfg
->in_ioa_bringdown
= 1;
10414 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
10419 * __ipr_remove - Remove a single adapter
10420 * @pdev: pci device struct
10422 * Adapter hot plug remove entry point.
10427 static void __ipr_remove(struct pci_dev
*pdev
)
10429 unsigned long host_lock_flags
= 0;
10430 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10432 unsigned long driver_lock_flags
;
10435 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10436 while (ioa_cfg
->in_reset_reload
) {
10437 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10438 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10439 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10442 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
10443 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
10444 ioa_cfg
->hrrq
[i
].removing_ioa
= 1;
10445 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
10448 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
10450 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10451 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10452 flush_work(&ioa_cfg
->work_q
);
10453 if (ioa_cfg
->reset_work_q
)
10454 flush_workqueue(ioa_cfg
->reset_work_q
);
10455 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
10456 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10458 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10459 list_del(&ioa_cfg
->queue
);
10460 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10462 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
10463 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
10464 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10466 ipr_free_all_resources(ioa_cfg
);
10472 * ipr_remove - IOA hot plug remove entry point
10473 * @pdev: pci device struct
10475 * Adapter hot plug remove entry point.
10480 static void ipr_remove(struct pci_dev
*pdev
)
10482 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10486 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10488 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10490 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10491 &ipr_ioa_async_err_log
);
10492 scsi_remove_host(ioa_cfg
->host
);
10494 __ipr_remove(pdev
);
10500 * ipr_probe - Adapter hot plug add entry point
10503 * 0 on success / non-zero on failure
10505 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
10507 struct ipr_ioa_cfg
*ioa_cfg
;
10508 unsigned long flags
;
10511 rc
= ipr_probe_ioa(pdev
, dev_id
);
10516 ioa_cfg
= pci_get_drvdata(pdev
);
10517 rc
= ipr_probe_ioa_part2(ioa_cfg
);
10520 __ipr_remove(pdev
);
10524 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
10527 __ipr_remove(pdev
);
10531 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10535 scsi_remove_host(ioa_cfg
->host
);
10536 __ipr_remove(pdev
);
10540 rc
= sysfs_create_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10541 &ipr_ioa_async_err_log
);
10544 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10546 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10548 scsi_remove_host(ioa_cfg
->host
);
10549 __ipr_remove(pdev
);
10553 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10557 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10558 &ipr_ioa_async_err_log
);
10559 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10561 scsi_remove_host(ioa_cfg
->host
);
10562 __ipr_remove(pdev
);
10565 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10566 ioa_cfg
->scan_enabled
= 1;
10567 schedule_work(&ioa_cfg
->work_q
);
10568 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10570 ioa_cfg
->iopoll_weight
= ioa_cfg
->chip_cfg
->iopoll_weight
;
10572 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10573 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
10574 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
10575 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
10579 scsi_scan_host(ioa_cfg
->host
);
10585 * ipr_shutdown - Shutdown handler.
10586 * @pdev: pci device struct
10588 * This function is invoked upon system shutdown/reboot. It will issue
10589 * an adapter shutdown to the adapter to flush the write cache.
10594 static void ipr_shutdown(struct pci_dev
*pdev
)
10596 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10597 unsigned long lock_flags
= 0;
10598 enum ipr_shutdown_type shutdown_type
= IPR_SHUTDOWN_NORMAL
;
10601 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10602 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10603 ioa_cfg
->iopoll_weight
= 0;
10604 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
10605 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
10608 while (ioa_cfg
->in_reset_reload
) {
10609 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10610 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10611 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10614 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
)
10615 shutdown_type
= IPR_SHUTDOWN_QUIESCE
;
10617 ipr_initiate_ioa_bringdown(ioa_cfg
, shutdown_type
);
10618 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10619 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10620 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
) {
10621 ipr_free_irqs(ioa_cfg
);
10622 pci_disable_device(ioa_cfg
->pdev
);
10626 static struct pci_device_id ipr_pci_table
[] = {
10627 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10628 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
10629 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10630 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
10631 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10632 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
10633 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10634 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
10635 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10636 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
10637 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10638 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
10639 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10640 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
10641 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10642 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
10643 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10644 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10645 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10646 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10647 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10648 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10649 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10650 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10651 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10652 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10653 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10654 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10655 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10656 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10657 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10658 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10659 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10660 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10661 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
10662 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10663 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10664 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
10665 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10666 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
10667 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10668 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
10669 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
10670 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
10671 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
10672 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10673 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
10674 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10675 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
10676 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10677 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10678 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
10679 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10680 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10681 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10683 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
10684 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10685 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
10686 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10687 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
10688 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10689 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
10690 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10691 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
10692 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10693 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
10694 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10695 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
10696 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10697 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
10698 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10699 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
10700 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10701 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
10702 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10703 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
10704 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10705 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10707 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
10708 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10709 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
10710 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10711 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D9
, 0, 0, 0 },
10712 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10713 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57DA
, 0, 0, 0 },
10714 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10715 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EB
, 0, 0, 0 },
10716 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10717 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EC
, 0, 0, 0 },
10718 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10719 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57ED
, 0, 0, 0 },
10720 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10721 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EE
, 0, 0, 0 },
10722 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10723 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EF
, 0, 0, 0 },
10724 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10725 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57F0
, 0, 0, 0 },
10726 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10727 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCA
, 0, 0, 0 },
10728 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10729 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CD2
, 0, 0, 0 },
10730 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10731 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCD
, 0, 0, 0 },
10732 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10733 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580A
, 0, 0, 0 },
10734 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10735 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580B
, 0, 0, 0 },
10738 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
10740 static const struct pci_error_handlers ipr_err_handler
= {
10741 .error_detected
= ipr_pci_error_detected
,
10742 .mmio_enabled
= ipr_pci_mmio_enabled
,
10743 .slot_reset
= ipr_pci_slot_reset
,
10746 static struct pci_driver ipr_driver
= {
10748 .id_table
= ipr_pci_table
,
10749 .probe
= ipr_probe
,
10750 .remove
= ipr_remove
,
10751 .shutdown
= ipr_shutdown
,
10752 .err_handler
= &ipr_err_handler
,
10756 * ipr_halt_done - Shutdown prepare completion
10761 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
10763 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
10767 * ipr_halt - Issue shutdown prepare to all adapters
10770 * NOTIFY_OK on success / NOTIFY_DONE on failure
10772 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
10774 struct ipr_cmnd
*ipr_cmd
;
10775 struct ipr_ioa_cfg
*ioa_cfg
;
10776 unsigned long flags
= 0, driver_lock_flags
;
10778 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
10779 return NOTIFY_DONE
;
10781 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10783 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
10784 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10785 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
10786 (ipr_fast_reboot
&& event
== SYS_RESTART
&& ioa_cfg
->sis64
)) {
10787 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10791 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
10792 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
10793 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
10794 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
10795 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
10797 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
10798 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10800 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10805 static struct notifier_block ipr_notifier
= {
10810 * ipr_init - Module entry point
10813 * 0 on success / negative value on failure
10815 static int __init
ipr_init(void)
10817 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10818 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
10820 register_reboot_notifier(&ipr_notifier
);
10821 return pci_register_driver(&ipr_driver
);
10825 * ipr_exit - Module unload
10827 * Module unload entry point.
10832 static void __exit
ipr_exit(void)
10834 unregister_reboot_notifier(&ipr_notifier
);
10835 pci_unregister_driver(&ipr_driver
);
10838 module_init(ipr_init
);
10839 module_exit(ipr_exit
);