2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock
);
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
109 .cache_line_size
= 0x20,
112 .set_interrupt_mask_reg
= 0x0022C,
113 .clr_interrupt_mask_reg
= 0x00230,
114 .clr_interrupt_mask_reg32
= 0x00230,
115 .sense_interrupt_mask_reg
= 0x0022C,
116 .sense_interrupt_mask_reg32
= 0x0022C,
117 .clr_interrupt_reg
= 0x00228,
118 .clr_interrupt_reg32
= 0x00228,
119 .sense_interrupt_reg
= 0x00224,
120 .sense_interrupt_reg32
= 0x00224,
121 .ioarrin_reg
= 0x00404,
122 .sense_uproc_interrupt_reg
= 0x00214,
123 .sense_uproc_interrupt_reg32
= 0x00214,
124 .set_uproc_interrupt_reg
= 0x00214,
125 .set_uproc_interrupt_reg32
= 0x00214,
126 .clr_uproc_interrupt_reg
= 0x00218,
127 .clr_uproc_interrupt_reg32
= 0x00218
130 { /* Snipe and Scamp */
133 .cache_line_size
= 0x20,
136 .set_interrupt_mask_reg
= 0x00288,
137 .clr_interrupt_mask_reg
= 0x0028C,
138 .clr_interrupt_mask_reg32
= 0x0028C,
139 .sense_interrupt_mask_reg
= 0x00288,
140 .sense_interrupt_mask_reg32
= 0x00288,
141 .clr_interrupt_reg
= 0x00284,
142 .clr_interrupt_reg32
= 0x00284,
143 .sense_interrupt_reg
= 0x00280,
144 .sense_interrupt_reg32
= 0x00280,
145 .ioarrin_reg
= 0x00504,
146 .sense_uproc_interrupt_reg
= 0x00290,
147 .sense_uproc_interrupt_reg32
= 0x00290,
148 .set_uproc_interrupt_reg
= 0x00290,
149 .set_uproc_interrupt_reg32
= 0x00290,
150 .clr_uproc_interrupt_reg
= 0x00294,
151 .clr_uproc_interrupt_reg32
= 0x00294
157 .cache_line_size
= 0x20,
160 .set_interrupt_mask_reg
= 0x00010,
161 .clr_interrupt_mask_reg
= 0x00018,
162 .clr_interrupt_mask_reg32
= 0x0001C,
163 .sense_interrupt_mask_reg
= 0x00010,
164 .sense_interrupt_mask_reg32
= 0x00014,
165 .clr_interrupt_reg
= 0x00008,
166 .clr_interrupt_reg32
= 0x0000C,
167 .sense_interrupt_reg
= 0x00000,
168 .sense_interrupt_reg32
= 0x00004,
169 .ioarrin_reg
= 0x00070,
170 .sense_uproc_interrupt_reg
= 0x00020,
171 .sense_uproc_interrupt_reg32
= 0x00024,
172 .set_uproc_interrupt_reg
= 0x00020,
173 .set_uproc_interrupt_reg32
= 0x00024,
174 .clr_uproc_interrupt_reg
= 0x00028,
175 .clr_uproc_interrupt_reg32
= 0x0002C,
176 .init_feedback_reg
= 0x0005C,
177 .dump_addr_reg
= 0x00064,
178 .dump_data_reg
= 0x00068,
179 .endian_swap_reg
= 0x00084
184 static const struct ipr_chip_t ipr_chip
[] = {
185 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
186 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
187 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
188 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
189 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, IPR_USE_MSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
191 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
196 static int ipr_max_bus_speeds
[] = {
197 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
200 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
201 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
202 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
203 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
204 module_param_named(log_level
, ipr_log_level
, uint
, 0);
205 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
206 module_param_named(testmode
, ipr_testmode
, int, 0);
207 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
208 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
209 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
210 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
211 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
212 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
213 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
214 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
215 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
216 module_param_named(max_devs
, ipr_max_devs
, int, 0);
217 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
218 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
219 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
220 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(IPR_DRIVER_VERSION
);
224 /* A constant array of IOASCs/URCs/Error Messages */
226 struct ipr_error_table_t ipr_error_table
[] = {
227 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
228 "8155: An unknown error was received"},
230 "Soft underlength error"},
232 "Command to be cancelled not found"},
234 "Qualified success"},
235 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
236 "FFFE: Soft device bus error recovered by the IOA"},
237 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
238 "4101: Soft device bus fabric error"},
239 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
240 "FFFC: Logical block guard error recovered by the device"},
241 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
242 "FFFC: Logical block reference tag error recovered by the device"},
243 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
244 "4171: Recovered scatter list tag / sequence number error"},
245 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
246 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
247 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
248 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
249 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
250 "FFFD: Recovered logical block reference tag error detected by the IOA"},
251 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
252 "FFFD: Logical block guard error recovered by the IOA"},
253 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
254 "FFF9: Device sector reassign successful"},
255 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
256 "FFF7: Media error recovered by device rewrite procedures"},
257 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
258 "7001: IOA sector reassignment successful"},
259 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
260 "FFF9: Soft media error. Sector reassignment recommended"},
261 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
262 "FFF7: Media error recovered by IOA rewrite procedures"},
263 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
264 "FF3D: Soft PCI bus error recovered by the IOA"},
265 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
266 "FFF6: Device hardware error recovered by the IOA"},
267 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
268 "FFF6: Device hardware error recovered by the device"},
269 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
270 "FF3D: Soft IOA error recovered by the IOA"},
271 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
272 "FFFA: Undefined device response recovered by the IOA"},
273 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
274 "FFF6: Device bus error, message or command phase"},
275 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
276 "FFFE: Task Management Function failed"},
277 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
278 "FFF6: Failure prediction threshold exceeded"},
279 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
280 "8009: Impending cache battery pack failure"},
282 "34FF: Disk device format in progress"},
283 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
284 "9070: IOA requested reset"},
286 "Synchronization required"},
288 "No ready, IOA shutdown"},
290 "Not ready, IOA has been shutdown"},
291 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
292 "3020: Storage subsystem configuration error"},
294 "FFF5: Medium error, data unreadable, recommend reassign"},
296 "7000: Medium error, data unreadable, do not reassign"},
297 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
298 "FFF3: Disk media format bad"},
299 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
300 "3002: Addressed device failed to respond to selection"},
301 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
302 "3100: Device bus error"},
303 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
304 "3109: IOA timed out a device command"},
306 "3120: SCSI bus is not operational"},
307 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
308 "4100: Hard device bus fabric error"},
309 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
310 "310C: Logical block guard error detected by the device"},
311 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
312 "310C: Logical block reference tag error detected by the device"},
313 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
314 "4170: Scatter list tag / sequence number error"},
315 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
316 "8150: Logical block CRC error on IOA to Host transfer"},
317 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
318 "4170: Logical block sequence number error on IOA to Host transfer"},
319 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
320 "310D: Logical block reference tag error detected by the IOA"},
321 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
322 "310D: Logical block guard error detected by the IOA"},
323 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
324 "9000: IOA reserved area data check"},
325 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
326 "9001: IOA reserved area invalid data pattern"},
327 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
328 "9002: IOA reserved area LRC error"},
329 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
330 "Hardware Error, IOA metadata access error"},
331 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
332 "102E: Out of alternate sectors for disk storage"},
333 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
334 "FFF4: Data transfer underlength error"},
335 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
336 "FFF4: Data transfer overlength error"},
337 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
338 "3400: Logical unit failure"},
339 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
340 "FFF4: Device microcode is corrupt"},
341 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
342 "8150: PCI bus error"},
344 "Unsupported device bus message received"},
345 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
346 "FFF4: Disk device problem"},
347 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
348 "8150: Permanent IOA failure"},
349 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
350 "3010: Disk device returned wrong response to IOA"},
351 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
352 "8151: IOA microcode error"},
354 "Device bus status error"},
355 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
356 "8157: IOA error requiring IOA reset to recover"},
358 "ATA device status error"},
360 "Message reject received from the device"},
361 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
362 "8008: A permanent cache battery pack failure occurred"},
363 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
364 "9090: Disk unit has been modified after the last known status"},
365 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
366 "9081: IOA detected device error"},
367 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
368 "9082: IOA detected device error"},
369 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
370 "3110: Device bus error, message or command phase"},
371 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
372 "3110: SAS Command / Task Management Function failed"},
373 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
374 "9091: Incorrect hardware configuration change has been detected"},
375 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
376 "9073: Invalid multi-adapter configuration"},
377 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
378 "4010: Incorrect connection between cascaded expanders"},
379 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
380 "4020: Connections exceed IOA design limits"},
381 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
382 "4030: Incorrect multipath connection"},
383 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
384 "4110: Unsupported enclosure function"},
385 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
386 "FFF4: Command to logical unit failed"},
388 "Illegal request, invalid request type or request packet"},
390 "Illegal request, invalid resource handle"},
392 "Illegal request, commands not allowed to this device"},
394 "Illegal request, command not allowed to a secondary adapter"},
396 "Illegal request, command not allowed to a non-optimized resource"},
398 "Illegal request, invalid field in parameter list"},
400 "Illegal request, parameter not supported"},
402 "Illegal request, parameter value invalid"},
404 "Illegal request, command sequence error"},
406 "Illegal request, dual adapter support not enabled"},
407 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
408 "9031: Array protection temporarily suspended, protection resuming"},
409 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
410 "9040: Array protection temporarily suspended, protection resuming"},
411 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
412 "3140: Device bus not ready to ready transition"},
413 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
414 "FFFB: SCSI bus was reset"},
416 "FFFE: SCSI bus transition to single ended"},
418 "FFFE: SCSI bus transition to LVD"},
419 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
420 "FFFB: SCSI bus was reset by another initiator"},
421 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
422 "3029: A device replacement has occurred"},
423 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
424 "9051: IOA cache data exists for a missing or failed device"},
425 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
426 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
427 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
428 "9025: Disk unit is not supported at its physical location"},
429 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
430 "3020: IOA detected a SCSI bus configuration error"},
431 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
432 "3150: SCSI bus configuration error"},
433 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
434 "9074: Asymmetric advanced function disk configuration"},
435 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
436 "4040: Incomplete multipath connection between IOA and enclosure"},
437 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
438 "4041: Incomplete multipath connection between enclosure and device"},
439 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
440 "9075: Incomplete multipath connection between IOA and remote IOA"},
441 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
442 "9076: Configuration error, missing remote IOA"},
443 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
444 "4050: Enclosure does not support a required multipath function"},
445 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
446 "4070: Logically bad block written on device"},
447 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
448 "9041: Array protection temporarily suspended"},
449 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
450 "9042: Corrupt array parity detected on specified device"},
451 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
452 "9030: Array no longer protected due to missing or failed disk unit"},
453 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
454 "9071: Link operational transition"},
455 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
456 "9072: Link not operational transition"},
457 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
458 "9032: Array exposed but still protected"},
459 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL
+ 1,
460 "70DD: Device forced failed by disrupt device command"},
461 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
462 "4061: Multipath redundancy level got better"},
463 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
464 "4060: Multipath redundancy level got worse"},
466 "Failure due to other device"},
467 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
468 "9008: IOA does not support functions expected by devices"},
469 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
470 "9010: Cache data associated with attached devices cannot be found"},
471 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
472 "9011: Cache data belongs to devices other than those attached"},
473 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
474 "9020: Array missing 2 or more devices with only 1 device present"},
475 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
476 "9021: Array missing 2 or more devices with 2 or more devices present"},
477 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
478 "9022: Exposed array is missing a required device"},
479 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
480 "9023: Array member(s) not at required physical locations"},
481 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
482 "9024: Array not functional due to present hardware configuration"},
483 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
484 "9026: Array not functional due to present hardware configuration"},
485 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
486 "9027: Array is missing a device and parity is out of sync"},
487 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
488 "9028: Maximum number of arrays already exist"},
489 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
490 "9050: Required cache data cannot be located for a disk unit"},
491 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
492 "9052: Cache data exists for a device that has been modified"},
493 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
494 "9054: IOA resources not available due to previous problems"},
495 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
496 "9092: Disk unit requires initialization before use"},
497 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
498 "9029: Incorrect hardware configuration change has been detected"},
499 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
500 "9060: One or more disk pairs are missing from an array"},
501 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
502 "9061: One or more disks are missing from an array"},
503 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
504 "9062: One or more disks are missing from an array"},
505 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
506 "9063: Maximum number of functional arrays has been exceeded"},
508 "Aborted command, invalid descriptor"},
510 "Command terminated by host"}
513 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
514 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
515 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
516 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
517 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
518 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
519 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
520 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
521 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
522 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
523 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
524 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
530 * Function Prototypes
532 static int ipr_reset_alert(struct ipr_cmnd
*);
533 static void ipr_process_ccn(struct ipr_cmnd
*);
534 static void ipr_process_error(struct ipr_cmnd
*);
535 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
536 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
537 enum ipr_shutdown_type
);
539 #ifdef CONFIG_SCSI_IPR_TRACE
541 * ipr_trc_hook - Add a trace entry to the driver trace
542 * @ipr_cmd: ipr command struct
544 * @add_data: additional data
549 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
550 u8 type
, u32 add_data
)
552 struct ipr_trace_entry
*trace_entry
;
553 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
555 trace_entry
= &ioa_cfg
->trace
[atomic_add_return
556 (1, &ioa_cfg
->trace_index
)%IPR_NUM_TRACE_ENTRIES
];
557 trace_entry
->time
= jiffies
;
558 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
559 trace_entry
->type
= type
;
560 if (ipr_cmd
->ioa_cfg
->sis64
)
561 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
563 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
564 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
565 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
566 trace_entry
->u
.add_data
= add_data
;
570 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
574 * ipr_lock_and_done - Acquire lock and complete command
575 * @ipr_cmd: ipr command struct
580 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
582 unsigned long lock_flags
;
583 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
585 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
586 ipr_cmd
->done(ipr_cmd
);
587 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
591 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
592 * @ipr_cmd: ipr command struct
597 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
599 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
600 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
601 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
602 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
605 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
606 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
607 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
608 ioarcb
->data_transfer_length
= 0;
609 ioarcb
->read_data_transfer_length
= 0;
610 ioarcb
->ioadl_len
= 0;
611 ioarcb
->read_ioadl_len
= 0;
613 if (ipr_cmd
->ioa_cfg
->sis64
) {
614 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
615 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
616 ioasa64
->u
.gata
.status
= 0;
618 ioarcb
->write_ioadl_addr
=
619 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
620 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
621 ioasa
->u
.gata
.status
= 0;
624 ioasa
->hdr
.ioasc
= 0;
625 ioasa
->hdr
.residual_data_len
= 0;
626 ipr_cmd
->scsi_cmd
= NULL
;
628 ipr_cmd
->sense_buffer
[0] = 0;
629 ipr_cmd
->dma_use_sg
= 0;
633 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
634 * @ipr_cmd: ipr command struct
639 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
640 void (*fast_done
) (struct ipr_cmnd
*))
642 ipr_reinit_ipr_cmnd(ipr_cmd
);
643 ipr_cmd
->u
.scratch
= 0;
644 ipr_cmd
->sibling
= NULL
;
645 ipr_cmd
->fast_done
= fast_done
;
646 init_timer(&ipr_cmd
->timer
);
650 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
651 * @ioa_cfg: ioa config struct
654 * pointer to ipr command struct
657 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
659 struct ipr_cmnd
*ipr_cmd
= NULL
;
661 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
662 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
663 struct ipr_cmnd
, queue
);
664 list_del(&ipr_cmd
->queue
);
672 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
673 * @ioa_cfg: ioa config struct
676 * pointer to ipr command struct
679 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
681 struct ipr_cmnd
*ipr_cmd
=
682 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
683 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
688 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
689 * @ioa_cfg: ioa config struct
690 * @clr_ints: interrupts to clear
692 * This function masks all interrupts on the adapter, then clears the
693 * interrupts specified in the mask
698 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
701 volatile u32 int_reg
;
704 /* Stop new interrupts */
705 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
706 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
707 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
708 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
712 /* Set interrupt mask to stop all new interrupts */
714 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
716 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
718 /* Clear any pending interrupts */
720 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
721 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
722 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
726 * ipr_save_pcix_cmd_reg - Save PCI-X command register
727 * @ioa_cfg: ioa config struct
730 * 0 on success / -EIO on failure
732 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
734 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
736 if (pcix_cmd_reg
== 0)
739 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
740 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
741 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
745 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
750 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
751 * @ioa_cfg: ioa config struct
754 * 0 on success / -EIO on failure
756 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
758 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
761 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
762 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
763 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
772 * ipr_sata_eh_done - done function for aborted SATA commands
773 * @ipr_cmd: ipr command struct
775 * This function is invoked for ops generated to SATA
776 * devices which are being aborted.
781 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
783 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
784 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
786 qc
->err_mask
|= AC_ERR_OTHER
;
787 sata_port
->ioasa
.status
|= ATA_BUSY
;
788 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
793 * ipr_scsi_eh_done - mid-layer done function for aborted ops
794 * @ipr_cmd: ipr command struct
796 * This function is invoked by the interrupt handler for
797 * ops generated by the SCSI mid-layer which are being aborted.
802 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
804 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
806 scsi_cmd
->result
|= (DID_ERROR
<< 16);
808 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
809 scsi_cmd
->scsi_done(scsi_cmd
);
810 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
814 * ipr_fail_all_ops - Fails all outstanding ops.
815 * @ioa_cfg: ioa config struct
817 * This function fails all outstanding ops.
822 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
824 struct ipr_cmnd
*ipr_cmd
, *temp
;
825 struct ipr_hrr_queue
*hrrq
;
828 for_each_hrrq(hrrq
, ioa_cfg
) {
829 spin_lock(&hrrq
->_lock
);
830 list_for_each_entry_safe(ipr_cmd
,
831 temp
, &hrrq
->hrrq_pending_q
, queue
) {
832 list_del(&ipr_cmd
->queue
);
834 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
835 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
836 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
837 cpu_to_be32(IPR_DRIVER_ILID
);
839 if (ipr_cmd
->scsi_cmd
)
840 ipr_cmd
->done
= ipr_scsi_eh_done
;
841 else if (ipr_cmd
->qc
)
842 ipr_cmd
->done
= ipr_sata_eh_done
;
844 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
845 IPR_IOASC_IOA_WAS_RESET
);
846 del_timer(&ipr_cmd
->timer
);
847 ipr_cmd
->done(ipr_cmd
);
849 spin_unlock(&hrrq
->_lock
);
855 * ipr_send_command - Send driver initiated requests.
856 * @ipr_cmd: ipr command struct
858 * This function sends a command to the adapter using the correct write call.
859 * In the case of sis64, calculate the ioarcb size required. Then or in the
865 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
867 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
868 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
870 if (ioa_cfg
->sis64
) {
871 /* The default size is 256 bytes */
872 send_dma_addr
|= 0x1;
874 /* If the number of ioadls * size of ioadl > 128 bytes,
875 then use a 512 byte ioarcb */
876 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
877 send_dma_addr
|= 0x4;
878 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
880 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
884 * ipr_do_req - Send driver initiated requests.
885 * @ipr_cmd: ipr command struct
886 * @done: done function
887 * @timeout_func: timeout function
888 * @timeout: timeout value
890 * This function sends the specified command to the adapter with the
891 * timeout given. The done function is invoked on command completion.
896 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
897 void (*done
) (struct ipr_cmnd
*),
898 void (*timeout_func
) (struct ipr_cmnd
*), u32 timeout
)
900 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
902 ipr_cmd
->done
= done
;
904 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
905 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
906 ipr_cmd
->timer
.function
= (void (*)(unsigned long))timeout_func
;
908 add_timer(&ipr_cmd
->timer
);
910 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
912 ipr_send_command(ipr_cmd
);
916 * ipr_internal_cmd_done - Op done function for an internally generated op.
917 * @ipr_cmd: ipr command struct
919 * This function is the op done function for an internally generated,
920 * blocking op. It simply wakes the sleeping thread.
925 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
927 if (ipr_cmd
->sibling
)
928 ipr_cmd
->sibling
= NULL
;
930 complete(&ipr_cmd
->completion
);
934 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
935 * @ipr_cmd: ipr command struct
936 * @dma_addr: dma address
937 * @len: transfer length
938 * @flags: ioadl flag value
940 * This function initializes an ioadl in the case where there is only a single
946 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
949 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
950 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
952 ipr_cmd
->dma_use_sg
= 1;
954 if (ipr_cmd
->ioa_cfg
->sis64
) {
955 ioadl64
->flags
= cpu_to_be32(flags
);
956 ioadl64
->data_len
= cpu_to_be32(len
);
957 ioadl64
->address
= cpu_to_be64(dma_addr
);
959 ipr_cmd
->ioarcb
.ioadl_len
=
960 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
961 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
963 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
964 ioadl
->address
= cpu_to_be32(dma_addr
);
966 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
967 ipr_cmd
->ioarcb
.read_ioadl_len
=
968 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
969 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
971 ipr_cmd
->ioarcb
.ioadl_len
=
972 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
973 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
979 * ipr_send_blocking_cmd - Send command and sleep on its completion.
980 * @ipr_cmd: ipr command struct
981 * @timeout_func: function to invoke if command times out
987 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
988 void (*timeout_func
) (struct ipr_cmnd
*ipr_cmd
),
991 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
993 init_completion(&ipr_cmd
->completion
);
994 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
996 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
997 wait_for_completion(&ipr_cmd
->completion
);
998 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1001 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1003 if (ioa_cfg
->hrrq_num
== 1)
1006 return (atomic_add_return(1, &ioa_cfg
->hrrq_index
) % (ioa_cfg
->hrrq_num
- 1)) + 1;
1010 * ipr_send_hcam - Send an HCAM to the adapter.
1011 * @ioa_cfg: ioa config struct
1013 * @hostrcb: hostrcb struct
1015 * This function will send a Host Controlled Async command to the adapter.
1016 * If HCAMs are currently not allowed to be issued to the adapter, it will
1017 * place the hostrcb on the free queue.
1022 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1023 struct ipr_hostrcb
*hostrcb
)
1025 struct ipr_cmnd
*ipr_cmd
;
1026 struct ipr_ioarcb
*ioarcb
;
1028 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1029 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1030 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1031 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1033 ipr_cmd
->u
.hostrcb
= hostrcb
;
1034 ioarcb
= &ipr_cmd
->ioarcb
;
1036 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1037 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1038 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1039 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1040 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1041 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1043 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1044 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1046 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1047 ipr_cmd
->done
= ipr_process_ccn
;
1049 ipr_cmd
->done
= ipr_process_error
;
1051 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1053 ipr_send_command(ipr_cmd
);
1055 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1060 * ipr_update_ata_class - Update the ata class in the resource entry
1061 * @res: resource entry struct
1062 * @proto: cfgte device bus protocol value
1067 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1070 case IPR_PROTO_SATA
:
1071 case IPR_PROTO_SAS_STP
:
1072 res
->ata_class
= ATA_DEV_ATA
;
1074 case IPR_PROTO_SATA_ATAPI
:
1075 case IPR_PROTO_SAS_STP_ATAPI
:
1076 res
->ata_class
= ATA_DEV_ATAPI
;
1079 res
->ata_class
= ATA_DEV_UNKNOWN
;
1085 * ipr_init_res_entry - Initialize a resource entry struct.
1086 * @res: resource entry struct
1087 * @cfgtew: config table entry wrapper struct
1092 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1093 struct ipr_config_table_entry_wrapper
*cfgtew
)
1097 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1098 struct ipr_resource_entry
*gscsi_res
= NULL
;
1100 res
->needs_sync_complete
= 0;
1103 res
->del_from_ml
= 0;
1104 res
->resetting_device
= 0;
1106 res
->sata_port
= NULL
;
1108 if (ioa_cfg
->sis64
) {
1109 proto
= cfgtew
->u
.cfgte64
->proto
;
1110 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1111 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1112 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1114 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1115 sizeof(res
->res_path
));
1118 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1119 sizeof(res
->dev_lun
.scsi_lun
));
1120 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1122 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1123 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1124 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1126 res
->target
= gscsi_res
->target
;
1131 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1132 ioa_cfg
->max_devs_supported
);
1133 set_bit(res
->target
, ioa_cfg
->target_ids
);
1135 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1136 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1138 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1139 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1140 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1141 ioa_cfg
->max_devs_supported
);
1142 set_bit(res
->target
, ioa_cfg
->array_ids
);
1143 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1144 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1145 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1146 ioa_cfg
->max_devs_supported
);
1147 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1149 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1150 ioa_cfg
->max_devs_supported
);
1151 set_bit(res
->target
, ioa_cfg
->target_ids
);
1154 proto
= cfgtew
->u
.cfgte
->proto
;
1155 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1156 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1157 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1158 res
->type
= IPR_RES_TYPE_IOAFP
;
1160 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1162 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1163 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1164 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1165 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1168 ipr_update_ata_class(res
, proto
);
1172 * ipr_is_same_device - Determine if two devices are the same.
1173 * @res: resource entry struct
1174 * @cfgtew: config table entry wrapper struct
1177 * 1 if the devices are the same / 0 otherwise
1179 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1180 struct ipr_config_table_entry_wrapper
*cfgtew
)
1182 if (res
->ioa_cfg
->sis64
) {
1183 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1184 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1185 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1186 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1190 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1191 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1192 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1200 * __ipr_format_res_path - Format the resource path for printing.
1201 * @res_path: resource path
1203 * @len: length of buffer provided
1208 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1214 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1215 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1216 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1222 * ipr_format_res_path - Format the resource path for printing.
1223 * @ioa_cfg: ioa config struct
1224 * @res_path: resource path
1226 * @len: length of buffer provided
1231 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1232 u8
*res_path
, char *buffer
, int len
)
1237 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1238 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1243 * ipr_update_res_entry - Update the resource entry.
1244 * @res: resource entry struct
1245 * @cfgtew: config table entry wrapper struct
1250 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1251 struct ipr_config_table_entry_wrapper
*cfgtew
)
1253 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1257 if (res
->ioa_cfg
->sis64
) {
1258 res
->flags
= cfgtew
->u
.cfgte64
->flags
;
1259 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1260 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1262 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1263 sizeof(struct ipr_std_inq_data
));
1265 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1266 proto
= cfgtew
->u
.cfgte64
->proto
;
1267 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1268 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1270 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1271 sizeof(res
->dev_lun
.scsi_lun
));
1273 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1274 sizeof(res
->res_path
))) {
1275 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1276 sizeof(res
->res_path
));
1280 if (res
->sdev
&& new_path
)
1281 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1282 ipr_format_res_path(res
->ioa_cfg
,
1283 res
->res_path
, buffer
, sizeof(buffer
)));
1285 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1286 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1287 res
->type
= IPR_RES_TYPE_IOAFP
;
1289 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1291 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1292 sizeof(struct ipr_std_inq_data
));
1294 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1295 proto
= cfgtew
->u
.cfgte
->proto
;
1296 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1299 ipr_update_ata_class(res
, proto
);
1303 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1305 * @res: resource entry struct
1306 * @cfgtew: config table entry wrapper struct
1311 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1313 struct ipr_resource_entry
*gscsi_res
= NULL
;
1314 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1316 if (!ioa_cfg
->sis64
)
1319 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1320 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1321 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1322 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1323 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1324 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1325 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1327 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1329 } else if (res
->bus
== 0)
1330 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1334 * ipr_handle_config_change - Handle a config change from the adapter
1335 * @ioa_cfg: ioa config struct
1341 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1342 struct ipr_hostrcb
*hostrcb
)
1344 struct ipr_resource_entry
*res
= NULL
;
1345 struct ipr_config_table_entry_wrapper cfgtew
;
1346 __be32 cc_res_handle
;
1350 if (ioa_cfg
->sis64
) {
1351 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1352 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1354 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1355 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1358 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1359 if (res
->res_handle
== cc_res_handle
) {
1366 if (list_empty(&ioa_cfg
->free_res_q
)) {
1367 ipr_send_hcam(ioa_cfg
,
1368 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1373 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1374 struct ipr_resource_entry
, queue
);
1376 list_del(&res
->queue
);
1377 ipr_init_res_entry(res
, &cfgtew
);
1378 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1381 ipr_update_res_entry(res
, &cfgtew
);
1383 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1385 res
->del_from_ml
= 1;
1386 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1387 if (ioa_cfg
->allow_ml_add_del
)
1388 schedule_work(&ioa_cfg
->work_q
);
1390 ipr_clear_res_target(res
);
1391 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1393 } else if (!res
->sdev
|| res
->del_from_ml
) {
1395 if (ioa_cfg
->allow_ml_add_del
)
1396 schedule_work(&ioa_cfg
->work_q
);
1399 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1403 * ipr_process_ccn - Op done function for a CCN.
1404 * @ipr_cmd: ipr command struct
1406 * This function is the op done function for a configuration
1407 * change notification host controlled async from the adapter.
1412 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1414 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1415 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1416 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1418 list_del(&hostrcb
->queue
);
1419 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1422 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
)
1423 dev_err(&ioa_cfg
->pdev
->dev
,
1424 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1426 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1428 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1433 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1434 * @i: index into buffer
1435 * @buf: string to modify
1437 * This function will strip all trailing whitespace, pad the end
1438 * of the string with a single space, and NULL terminate the string.
1441 * new length of string
1443 static int strip_and_pad_whitespace(int i
, char *buf
)
1445 while (i
&& buf
[i
] == ' ')
1453 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1454 * @prefix: string to print at start of printk
1455 * @hostrcb: hostrcb pointer
1456 * @vpd: vendor/product id/sn struct
1461 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1462 struct ipr_vpd
*vpd
)
1464 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1467 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1468 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1470 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1471 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1473 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1474 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1476 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1480 * ipr_log_vpd - Log the passed VPD to the error log.
1481 * @vpd: vendor/product id/sn struct
1486 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1488 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1489 + IPR_SERIAL_NUM_LEN
];
1491 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1492 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1494 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1495 ipr_err("Vendor/Product ID: %s\n", buffer
);
1497 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1498 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1499 ipr_err(" Serial Number: %s\n", buffer
);
1503 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1504 * @prefix: string to print at start of printk
1505 * @hostrcb: hostrcb pointer
1506 * @vpd: vendor/product id/sn/wwn struct
1511 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1512 struct ipr_ext_vpd
*vpd
)
1514 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1515 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1516 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1520 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1521 * @vpd: vendor/product id/sn/wwn struct
1526 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1528 ipr_log_vpd(&vpd
->vpd
);
1529 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1530 be32_to_cpu(vpd
->wwid
[1]));
1534 * ipr_log_enhanced_cache_error - Log a cache error.
1535 * @ioa_cfg: ioa config struct
1536 * @hostrcb: hostrcb struct
1541 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1542 struct ipr_hostrcb
*hostrcb
)
1544 struct ipr_hostrcb_type_12_error
*error
;
1547 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1549 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1551 ipr_err("-----Current Configuration-----\n");
1552 ipr_err("Cache Directory Card Information:\n");
1553 ipr_log_ext_vpd(&error
->ioa_vpd
);
1554 ipr_err("Adapter Card Information:\n");
1555 ipr_log_ext_vpd(&error
->cfc_vpd
);
1557 ipr_err("-----Expected Configuration-----\n");
1558 ipr_err("Cache Directory Card Information:\n");
1559 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1560 ipr_err("Adapter Card Information:\n");
1561 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1563 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1564 be32_to_cpu(error
->ioa_data
[0]),
1565 be32_to_cpu(error
->ioa_data
[1]),
1566 be32_to_cpu(error
->ioa_data
[2]));
1570 * ipr_log_cache_error - Log a cache error.
1571 * @ioa_cfg: ioa config struct
1572 * @hostrcb: hostrcb struct
1577 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1578 struct ipr_hostrcb
*hostrcb
)
1580 struct ipr_hostrcb_type_02_error
*error
=
1581 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1583 ipr_err("-----Current Configuration-----\n");
1584 ipr_err("Cache Directory Card Information:\n");
1585 ipr_log_vpd(&error
->ioa_vpd
);
1586 ipr_err("Adapter Card Information:\n");
1587 ipr_log_vpd(&error
->cfc_vpd
);
1589 ipr_err("-----Expected Configuration-----\n");
1590 ipr_err("Cache Directory Card Information:\n");
1591 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1592 ipr_err("Adapter Card Information:\n");
1593 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1595 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1596 be32_to_cpu(error
->ioa_data
[0]),
1597 be32_to_cpu(error
->ioa_data
[1]),
1598 be32_to_cpu(error
->ioa_data
[2]));
1602 * ipr_log_enhanced_config_error - Log a configuration error.
1603 * @ioa_cfg: ioa config struct
1604 * @hostrcb: hostrcb struct
1609 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1610 struct ipr_hostrcb
*hostrcb
)
1612 int errors_logged
, i
;
1613 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1614 struct ipr_hostrcb_type_13_error
*error
;
1616 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1617 errors_logged
= be32_to_cpu(error
->errors_logged
);
1619 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1620 be32_to_cpu(error
->errors_detected
), errors_logged
);
1622 dev_entry
= error
->dev
;
1624 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1627 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1628 ipr_log_ext_vpd(&dev_entry
->vpd
);
1630 ipr_err("-----New Device Information-----\n");
1631 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1633 ipr_err("Cache Directory Card Information:\n");
1634 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1636 ipr_err("Adapter Card Information:\n");
1637 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1642 * ipr_log_sis64_config_error - Log a device error.
1643 * @ioa_cfg: ioa config struct
1644 * @hostrcb: hostrcb struct
1649 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1650 struct ipr_hostrcb
*hostrcb
)
1652 int errors_logged
, i
;
1653 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1654 struct ipr_hostrcb_type_23_error
*error
;
1655 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1657 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1658 errors_logged
= be32_to_cpu(error
->errors_logged
);
1660 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1661 be32_to_cpu(error
->errors_detected
), errors_logged
);
1663 dev_entry
= error
->dev
;
1665 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1668 ipr_err("Device %d : %s", i
+ 1,
1669 __ipr_format_res_path(dev_entry
->res_path
,
1670 buffer
, sizeof(buffer
)));
1671 ipr_log_ext_vpd(&dev_entry
->vpd
);
1673 ipr_err("-----New Device Information-----\n");
1674 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1676 ipr_err("Cache Directory Card Information:\n");
1677 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1679 ipr_err("Adapter Card Information:\n");
1680 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1685 * ipr_log_config_error - Log a configuration error.
1686 * @ioa_cfg: ioa config struct
1687 * @hostrcb: hostrcb struct
1692 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1693 struct ipr_hostrcb
*hostrcb
)
1695 int errors_logged
, i
;
1696 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1697 struct ipr_hostrcb_type_03_error
*error
;
1699 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1700 errors_logged
= be32_to_cpu(error
->errors_logged
);
1702 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703 be32_to_cpu(error
->errors_detected
), errors_logged
);
1705 dev_entry
= error
->dev
;
1707 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1710 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1711 ipr_log_vpd(&dev_entry
->vpd
);
1713 ipr_err("-----New Device Information-----\n");
1714 ipr_log_vpd(&dev_entry
->new_vpd
);
1716 ipr_err("Cache Directory Card Information:\n");
1717 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1719 ipr_err("Adapter Card Information:\n");
1720 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1722 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1723 be32_to_cpu(dev_entry
->ioa_data
[0]),
1724 be32_to_cpu(dev_entry
->ioa_data
[1]),
1725 be32_to_cpu(dev_entry
->ioa_data
[2]),
1726 be32_to_cpu(dev_entry
->ioa_data
[3]),
1727 be32_to_cpu(dev_entry
->ioa_data
[4]));
1732 * ipr_log_enhanced_array_error - Log an array configuration error.
1733 * @ioa_cfg: ioa config struct
1734 * @hostrcb: hostrcb struct
1739 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1740 struct ipr_hostrcb
*hostrcb
)
1743 struct ipr_hostrcb_type_14_error
*error
;
1744 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1745 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1747 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1751 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1752 error
->protection_level
,
1753 ioa_cfg
->host
->host_no
,
1754 error
->last_func_vset_res_addr
.bus
,
1755 error
->last_func_vset_res_addr
.target
,
1756 error
->last_func_vset_res_addr
.lun
);
1760 array_entry
= error
->array_member
;
1761 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1762 ARRAY_SIZE(error
->array_member
));
1764 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1765 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1768 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1769 ipr_err("Exposed Array Member %d:\n", i
);
1771 ipr_err("Array Member %d:\n", i
);
1773 ipr_log_ext_vpd(&array_entry
->vpd
);
1774 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1775 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1776 "Expected Location");
1783 * ipr_log_array_error - Log an array configuration error.
1784 * @ioa_cfg: ioa config struct
1785 * @hostrcb: hostrcb struct
1790 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1791 struct ipr_hostrcb
*hostrcb
)
1794 struct ipr_hostrcb_type_04_error
*error
;
1795 struct ipr_hostrcb_array_data_entry
*array_entry
;
1796 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1798 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1802 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1803 error
->protection_level
,
1804 ioa_cfg
->host
->host_no
,
1805 error
->last_func_vset_res_addr
.bus
,
1806 error
->last_func_vset_res_addr
.target
,
1807 error
->last_func_vset_res_addr
.lun
);
1811 array_entry
= error
->array_member
;
1813 for (i
= 0; i
< 18; i
++) {
1814 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1817 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1818 ipr_err("Exposed Array Member %d:\n", i
);
1820 ipr_err("Array Member %d:\n", i
);
1822 ipr_log_vpd(&array_entry
->vpd
);
1824 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1825 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1826 "Expected Location");
1831 array_entry
= error
->array_member2
;
1838 * ipr_log_hex_data - Log additional hex IOA error data.
1839 * @ioa_cfg: ioa config struct
1840 * @data: IOA error data
1846 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, u32
*data
, int len
)
1853 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1854 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1856 for (i
= 0; i
< len
/ 4; i
+= 4) {
1857 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1858 be32_to_cpu(data
[i
]),
1859 be32_to_cpu(data
[i
+1]),
1860 be32_to_cpu(data
[i
+2]),
1861 be32_to_cpu(data
[i
+3]));
1866 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1867 * @ioa_cfg: ioa config struct
1868 * @hostrcb: hostrcb struct
1873 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1874 struct ipr_hostrcb
*hostrcb
)
1876 struct ipr_hostrcb_type_17_error
*error
;
1879 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1881 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1883 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1884 strim(error
->failure_reason
);
1886 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1887 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1888 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1889 ipr_log_hex_data(ioa_cfg
, error
->data
,
1890 be32_to_cpu(hostrcb
->hcam
.length
) -
1891 (offsetof(struct ipr_hostrcb_error
, u
) +
1892 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1896 * ipr_log_dual_ioa_error - Log a dual adapter error.
1897 * @ioa_cfg: ioa config struct
1898 * @hostrcb: hostrcb struct
1903 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1904 struct ipr_hostrcb
*hostrcb
)
1906 struct ipr_hostrcb_type_07_error
*error
;
1908 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
1909 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1910 strim(error
->failure_reason
);
1912 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1913 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1914 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1915 ipr_log_hex_data(ioa_cfg
, error
->data
,
1916 be32_to_cpu(hostrcb
->hcam
.length
) -
1917 (offsetof(struct ipr_hostrcb_error
, u
) +
1918 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
1921 static const struct {
1924 } path_active_desc
[] = {
1925 { IPR_PATH_NO_INFO
, "Path" },
1926 { IPR_PATH_ACTIVE
, "Active path" },
1927 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
1930 static const struct {
1933 } path_state_desc
[] = {
1934 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
1935 { IPR_PATH_HEALTHY
, "is healthy" },
1936 { IPR_PATH_DEGRADED
, "is degraded" },
1937 { IPR_PATH_FAILED
, "is failed" }
1941 * ipr_log_fabric_path - Log a fabric path error
1942 * @hostrcb: hostrcb struct
1943 * @fabric: fabric descriptor
1948 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
1949 struct ipr_hostrcb_fabric_desc
*fabric
)
1952 u8 path_state
= fabric
->path_state
;
1953 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
1954 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
1956 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
1957 if (path_active_desc
[i
].active
!= active
)
1960 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
1961 if (path_state_desc
[j
].state
!= state
)
1964 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
1965 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
1966 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
1968 } else if (fabric
->cascaded_expander
== 0xff) {
1969 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
1970 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
1971 fabric
->ioa_port
, fabric
->phy
);
1972 } else if (fabric
->phy
== 0xff) {
1973 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
1974 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
1975 fabric
->ioa_port
, fabric
->cascaded_expander
);
1977 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1978 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
1979 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
1985 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
1986 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
1990 * ipr_log64_fabric_path - Log a fabric path error
1991 * @hostrcb: hostrcb struct
1992 * @fabric: fabric descriptor
1997 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
1998 struct ipr_hostrcb64_fabric_desc
*fabric
)
2001 u8 path_state
= fabric
->path_state
;
2002 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2003 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2004 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2006 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2007 if (path_active_desc
[i
].active
!= active
)
2010 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2011 if (path_state_desc
[j
].state
!= state
)
2014 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2015 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2016 ipr_format_res_path(hostrcb
->ioa_cfg
,
2018 buffer
, sizeof(buffer
)));
2023 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2024 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2025 buffer
, sizeof(buffer
)));
2028 static const struct {
2031 } path_type_desc
[] = {
2032 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2033 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2034 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2035 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2038 static const struct {
2041 } path_status_desc
[] = {
2042 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2043 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2044 { IPR_PATH_CFG_FAILED
, "Failed" },
2045 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2046 { IPR_PATH_NOT_DETECTED
, "Missing" },
2047 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2050 static const char *link_rate
[] = {
2053 "phy reset problem",
2070 * ipr_log_path_elem - Log a fabric path element.
2071 * @hostrcb: hostrcb struct
2072 * @cfg: fabric path element struct
2077 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2078 struct ipr_hostrcb_config_element
*cfg
)
2081 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2082 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2084 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2087 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2088 if (path_type_desc
[i
].type
!= type
)
2091 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2092 if (path_status_desc
[j
].status
!= status
)
2095 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2096 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2097 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2098 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2099 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2101 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2102 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2103 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2104 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2105 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2106 } else if (cfg
->cascaded_expander
== 0xff) {
2107 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2108 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2109 path_type_desc
[i
].desc
, cfg
->phy
,
2110 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2111 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2112 } else if (cfg
->phy
== 0xff) {
2113 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2114 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2115 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2116 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2117 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2119 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2120 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2121 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2122 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2123 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2130 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2131 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2132 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2133 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2137 * ipr_log64_path_elem - Log a fabric path element.
2138 * @hostrcb: hostrcb struct
2139 * @cfg: fabric path element struct
2144 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2145 struct ipr_hostrcb64_config_element
*cfg
)
2148 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2149 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2150 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2151 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2153 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2156 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2157 if (path_type_desc
[i
].type
!= type
)
2160 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2161 if (path_status_desc
[j
].status
!= status
)
2164 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2165 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2166 ipr_format_res_path(hostrcb
->ioa_cfg
,
2167 cfg
->res_path
, buffer
, sizeof(buffer
)),
2168 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2169 be32_to_cpu(cfg
->wwid
[0]),
2170 be32_to_cpu(cfg
->wwid
[1]));
2174 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2175 "WWN=%08X%08X\n", cfg
->type_status
,
2176 ipr_format_res_path(hostrcb
->ioa_cfg
,
2177 cfg
->res_path
, buffer
, sizeof(buffer
)),
2178 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2179 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2183 * ipr_log_fabric_error - Log a fabric error.
2184 * @ioa_cfg: ioa config struct
2185 * @hostrcb: hostrcb struct
2190 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2191 struct ipr_hostrcb
*hostrcb
)
2193 struct ipr_hostrcb_type_20_error
*error
;
2194 struct ipr_hostrcb_fabric_desc
*fabric
;
2195 struct ipr_hostrcb_config_element
*cfg
;
2198 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2199 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2200 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2202 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2203 (offsetof(struct ipr_hostrcb_error
, u
) +
2204 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2206 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2207 ipr_log_fabric_path(hostrcb
, fabric
);
2208 for_each_fabric_cfg(fabric
, cfg
)
2209 ipr_log_path_elem(hostrcb
, cfg
);
2211 add_len
-= be16_to_cpu(fabric
->length
);
2212 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2213 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2216 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2220 * ipr_log_sis64_array_error - Log a sis64 array error.
2221 * @ioa_cfg: ioa config struct
2222 * @hostrcb: hostrcb struct
2227 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2228 struct ipr_hostrcb
*hostrcb
)
2231 struct ipr_hostrcb_type_24_error
*error
;
2232 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2233 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2234 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2236 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2240 ipr_err("RAID %s Array Configuration: %s\n",
2241 error
->protection_level
,
2242 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2243 buffer
, sizeof(buffer
)));
2247 array_entry
= error
->array_member
;
2248 num_entries
= min_t(u32
, error
->num_entries
,
2249 ARRAY_SIZE(error
->array_member
));
2251 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2253 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2256 if (error
->exposed_mode_adn
== i
)
2257 ipr_err("Exposed Array Member %d:\n", i
);
2259 ipr_err("Array Member %d:\n", i
);
2261 ipr_err("Array Member %d:\n", i
);
2262 ipr_log_ext_vpd(&array_entry
->vpd
);
2263 ipr_err("Current Location: %s\n",
2264 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2265 buffer
, sizeof(buffer
)));
2266 ipr_err("Expected Location: %s\n",
2267 ipr_format_res_path(ioa_cfg
,
2268 array_entry
->expected_res_path
,
2269 buffer
, sizeof(buffer
)));
2276 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2277 * @ioa_cfg: ioa config struct
2278 * @hostrcb: hostrcb struct
2283 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2284 struct ipr_hostrcb
*hostrcb
)
2286 struct ipr_hostrcb_type_30_error
*error
;
2287 struct ipr_hostrcb64_fabric_desc
*fabric
;
2288 struct ipr_hostrcb64_config_element
*cfg
;
2291 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2293 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2294 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2296 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2297 (offsetof(struct ipr_hostrcb64_error
, u
) +
2298 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2300 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2301 ipr_log64_fabric_path(hostrcb
, fabric
);
2302 for_each_fabric_cfg(fabric
, cfg
)
2303 ipr_log64_path_elem(hostrcb
, cfg
);
2305 add_len
-= be16_to_cpu(fabric
->length
);
2306 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2307 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2310 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2314 * ipr_log_generic_error - Log an adapter error.
2315 * @ioa_cfg: ioa config struct
2316 * @hostrcb: hostrcb struct
2321 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2322 struct ipr_hostrcb
*hostrcb
)
2324 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2325 be32_to_cpu(hostrcb
->hcam
.length
));
2329 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2332 * This function will return the index of into the ipr_error_table
2333 * for the specified IOASC. If the IOASC is not in the table,
2334 * 0 will be returned, which points to the entry used for unknown errors.
2337 * index into the ipr_error_table
2339 static u32
ipr_get_error(u32 ioasc
)
2343 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2344 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2351 * ipr_handle_log_data - Log an adapter error.
2352 * @ioa_cfg: ioa config struct
2353 * @hostrcb: hostrcb struct
2355 * This function logs an adapter error to the system.
2360 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2361 struct ipr_hostrcb
*hostrcb
)
2366 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2369 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2370 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2373 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2375 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2377 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2378 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2379 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2380 scsi_report_bus_reset(ioa_cfg
->host
,
2381 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2384 error_index
= ipr_get_error(ioasc
);
2386 if (!ipr_error_table
[error_index
].log_hcam
)
2389 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2391 /* Set indication we have logged an error */
2392 ioa_cfg
->errors_logged
++;
2394 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2396 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2397 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2399 switch (hostrcb
->hcam
.overlay_id
) {
2400 case IPR_HOST_RCB_OVERLAY_ID_2
:
2401 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2403 case IPR_HOST_RCB_OVERLAY_ID_3
:
2404 ipr_log_config_error(ioa_cfg
, hostrcb
);
2406 case IPR_HOST_RCB_OVERLAY_ID_4
:
2407 case IPR_HOST_RCB_OVERLAY_ID_6
:
2408 ipr_log_array_error(ioa_cfg
, hostrcb
);
2410 case IPR_HOST_RCB_OVERLAY_ID_7
:
2411 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2413 case IPR_HOST_RCB_OVERLAY_ID_12
:
2414 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2416 case IPR_HOST_RCB_OVERLAY_ID_13
:
2417 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2419 case IPR_HOST_RCB_OVERLAY_ID_14
:
2420 case IPR_HOST_RCB_OVERLAY_ID_16
:
2421 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2423 case IPR_HOST_RCB_OVERLAY_ID_17
:
2424 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2426 case IPR_HOST_RCB_OVERLAY_ID_20
:
2427 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2429 case IPR_HOST_RCB_OVERLAY_ID_23
:
2430 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2432 case IPR_HOST_RCB_OVERLAY_ID_24
:
2433 case IPR_HOST_RCB_OVERLAY_ID_26
:
2434 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2436 case IPR_HOST_RCB_OVERLAY_ID_30
:
2437 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2439 case IPR_HOST_RCB_OVERLAY_ID_1
:
2440 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2442 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2448 * ipr_process_error - Op done function for an adapter error log.
2449 * @ipr_cmd: ipr command struct
2451 * This function is the op done function for an error log host
2452 * controlled async from the adapter. It will log the error and
2453 * send the HCAM back to the adapter.
2458 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2460 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2461 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2462 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2466 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2468 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2470 list_del(&hostrcb
->queue
);
2471 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2474 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2475 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2476 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2477 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
2478 dev_err(&ioa_cfg
->pdev
->dev
,
2479 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2482 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2486 * ipr_timeout - An internally generated op has timed out.
2487 * @ipr_cmd: ipr command struct
2489 * This function blocks host requests and initiates an
2495 static void ipr_timeout(struct ipr_cmnd
*ipr_cmd
)
2497 unsigned long lock_flags
= 0;
2498 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2501 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2503 ioa_cfg
->errors_logged
++;
2504 dev_err(&ioa_cfg
->pdev
->dev
,
2505 "Adapter being reset due to command timeout.\n");
2507 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2508 ioa_cfg
->sdt_state
= GET_DUMP
;
2510 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2511 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2513 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2518 * ipr_oper_timeout - Adapter timed out transitioning to operational
2519 * @ipr_cmd: ipr command struct
2521 * This function blocks host requests and initiates an
2527 static void ipr_oper_timeout(struct ipr_cmnd
*ipr_cmd
)
2529 unsigned long lock_flags
= 0;
2530 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2533 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2535 ioa_cfg
->errors_logged
++;
2536 dev_err(&ioa_cfg
->pdev
->dev
,
2537 "Adapter timed out transitioning to operational.\n");
2539 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2540 ioa_cfg
->sdt_state
= GET_DUMP
;
2542 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2544 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2545 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2548 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2553 * ipr_reset_reload - Reset/Reload the IOA
2554 * @ioa_cfg: ioa config struct
2555 * @shutdown_type: shutdown type
2557 * This function resets the adapter and re-initializes it.
2558 * This function assumes that all new host commands have been stopped.
2562 static int ipr_reset_reload(struct ipr_ioa_cfg
*ioa_cfg
,
2563 enum ipr_shutdown_type shutdown_type
)
2565 if (!ioa_cfg
->in_reset_reload
)
2566 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
2568 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
2569 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2570 spin_lock_irq(ioa_cfg
->host
->host_lock
);
2572 /* If we got hit with a host reset while we were already resetting
2573 the adapter for some reason, and the reset failed. */
2574 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
2583 * ipr_find_ses_entry - Find matching SES in SES table
2584 * @res: resource entry struct of SES
2587 * pointer to SES table entry / NULL on failure
2589 static const struct ipr_ses_table_entry
*
2590 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2593 struct ipr_std_inq_vpids
*vpids
;
2594 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2596 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2597 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2598 if (ste
->compare_product_id_byte
[j
] == 'X') {
2599 vpids
= &res
->std_inq_data
.vpids
;
2600 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2608 if (matches
== IPR_PROD_ID_LEN
)
2616 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2617 * @ioa_cfg: ioa config struct
2619 * @bus_width: bus width
2622 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2623 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2624 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2625 * max 160MHz = max 320MB/sec).
2627 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2629 struct ipr_resource_entry
*res
;
2630 const struct ipr_ses_table_entry
*ste
;
2631 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2633 /* Loop through each config table entry in the config table buffer */
2634 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2635 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2638 if (bus
!= res
->bus
)
2641 if (!(ste
= ipr_find_ses_entry(res
)))
2644 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2647 return max_xfer_rate
;
2651 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2652 * @ioa_cfg: ioa config struct
2653 * @max_delay: max delay in micro-seconds to wait
2655 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2658 * 0 on success / other on failure
2660 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2662 volatile u32 pcii_reg
;
2665 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2666 while (delay
< max_delay
) {
2667 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2669 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2672 /* udelay cannot be used if delay is more than a few milliseconds */
2673 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2674 mdelay(delay
/ 1000);
2684 * ipr_get_sis64_dump_data_section - Dump IOA memory
2685 * @ioa_cfg: ioa config struct
2686 * @start_addr: adapter address to dump
2687 * @dest: destination kernel buffer
2688 * @length_in_words: length to dump in 4 byte words
2693 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2695 __be32
*dest
, u32 length_in_words
)
2699 for (i
= 0; i
< length_in_words
; i
++) {
2700 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2701 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2709 * ipr_get_ldump_data_section - Dump IOA memory
2710 * @ioa_cfg: ioa config struct
2711 * @start_addr: adapter address to dump
2712 * @dest: destination kernel buffer
2713 * @length_in_words: length to dump in 4 byte words
2716 * 0 on success / -EIO on failure
2718 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2720 __be32
*dest
, u32 length_in_words
)
2722 volatile u32 temp_pcii_reg
;
2726 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2727 dest
, length_in_words
);
2729 /* Write IOA interrupt reg starting LDUMP state */
2730 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2731 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2733 /* Wait for IO debug acknowledge */
2734 if (ipr_wait_iodbg_ack(ioa_cfg
,
2735 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2736 dev_err(&ioa_cfg
->pdev
->dev
,
2737 "IOA dump long data transfer timeout\n");
2741 /* Signal LDUMP interlocked - clear IO debug ack */
2742 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2743 ioa_cfg
->regs
.clr_interrupt_reg
);
2745 /* Write Mailbox with starting address */
2746 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2748 /* Signal address valid - clear IOA Reset alert */
2749 writel(IPR_UPROCI_RESET_ALERT
,
2750 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2752 for (i
= 0; i
< length_in_words
; i
++) {
2753 /* Wait for IO debug acknowledge */
2754 if (ipr_wait_iodbg_ack(ioa_cfg
,
2755 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2756 dev_err(&ioa_cfg
->pdev
->dev
,
2757 "IOA dump short data transfer timeout\n");
2761 /* Read data from mailbox and increment destination pointer */
2762 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2765 /* For all but the last word of data, signal data received */
2766 if (i
< (length_in_words
- 1)) {
2767 /* Signal dump data received - Clear IO debug Ack */
2768 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2769 ioa_cfg
->regs
.clr_interrupt_reg
);
2773 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2774 writel(IPR_UPROCI_RESET_ALERT
,
2775 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2777 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2778 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2780 /* Signal dump data received - Clear IO debug Ack */
2781 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2782 ioa_cfg
->regs
.clr_interrupt_reg
);
2784 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2785 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2787 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2789 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2799 #ifdef CONFIG_SCSI_IPR_DUMP
2801 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2802 * @ioa_cfg: ioa config struct
2803 * @pci_address: adapter address
2804 * @length: length of data to copy
2806 * Copy data from PCI adapter to kernel buffer.
2807 * Note: length MUST be a 4 byte multiple
2809 * 0 on success / other on failure
2811 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2812 unsigned long pci_address
, u32 length
)
2814 int bytes_copied
= 0;
2815 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2817 unsigned long lock_flags
= 0;
2818 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2821 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2823 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2825 while (bytes_copied
< length
&&
2826 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2827 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2828 ioa_dump
->page_offset
== 0) {
2829 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
2833 return bytes_copied
;
2836 ioa_dump
->page_offset
= 0;
2837 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
2838 ioa_dump
->next_page_index
++;
2840 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
2842 rem_len
= length
- bytes_copied
;
2843 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
2844 cur_len
= min(rem_len
, rem_page_len
);
2846 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2847 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
2850 rc
= ipr_get_ldump_data_section(ioa_cfg
,
2851 pci_address
+ bytes_copied
,
2852 &page
[ioa_dump
->page_offset
/ 4],
2853 (cur_len
/ sizeof(u32
)));
2855 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2858 ioa_dump
->page_offset
+= cur_len
;
2859 bytes_copied
+= cur_len
;
2867 return bytes_copied
;
2871 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2872 * @hdr: dump entry header struct
2877 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
2879 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
2881 hdr
->offset
= sizeof(*hdr
);
2882 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
2886 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2887 * @ioa_cfg: ioa config struct
2888 * @driver_dump: driver dump struct
2893 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
2894 struct ipr_driver_dump
*driver_dump
)
2896 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
2898 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
2899 driver_dump
->ioa_type_entry
.hdr
.len
=
2900 sizeof(struct ipr_dump_ioa_type_entry
) -
2901 sizeof(struct ipr_dump_entry_header
);
2902 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2903 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
2904 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
2905 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
2906 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
2907 ucode_vpd
->minor_release
[1];
2908 driver_dump
->hdr
.num_entries
++;
2912 * ipr_dump_version_data - Fill in the driver version in the dump.
2913 * @ioa_cfg: ioa config struct
2914 * @driver_dump: driver dump struct
2919 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
2920 struct ipr_driver_dump
*driver_dump
)
2922 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
2923 driver_dump
->version_entry
.hdr
.len
=
2924 sizeof(struct ipr_dump_version_entry
) -
2925 sizeof(struct ipr_dump_entry_header
);
2926 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
2927 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
2928 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
2929 driver_dump
->hdr
.num_entries
++;
2933 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2934 * @ioa_cfg: ioa config struct
2935 * @driver_dump: driver dump struct
2940 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
2941 struct ipr_driver_dump
*driver_dump
)
2943 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
2944 driver_dump
->trace_entry
.hdr
.len
=
2945 sizeof(struct ipr_dump_trace_entry
) -
2946 sizeof(struct ipr_dump_entry_header
);
2947 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2948 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
2949 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
2950 driver_dump
->hdr
.num_entries
++;
2954 * ipr_dump_location_data - Fill in the IOA location in the dump.
2955 * @ioa_cfg: ioa config struct
2956 * @driver_dump: driver dump struct
2961 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
2962 struct ipr_driver_dump
*driver_dump
)
2964 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
2965 driver_dump
->location_entry
.hdr
.len
=
2966 sizeof(struct ipr_dump_location_entry
) -
2967 sizeof(struct ipr_dump_entry_header
);
2968 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
2969 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
2970 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
2971 driver_dump
->hdr
.num_entries
++;
2975 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2976 * @ioa_cfg: ioa config struct
2977 * @dump: dump struct
2982 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
2984 unsigned long start_addr
, sdt_word
;
2985 unsigned long lock_flags
= 0;
2986 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
2987 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
2988 u32 num_entries
, max_num_entries
, start_off
, end_off
;
2989 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
2990 struct ipr_sdt
*sdt
;
2996 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2998 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
2999 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3003 if (ioa_cfg
->sis64
) {
3004 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3005 ssleep(IPR_DUMP_DELAY_SECONDS
);
3006 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3009 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3011 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3012 dev_err(&ioa_cfg
->pdev
->dev
,
3013 "Invalid dump table format: %lx\n", start_addr
);
3014 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3018 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3020 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3022 /* Initialize the overall dump header */
3023 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3024 driver_dump
->hdr
.num_entries
= 1;
3025 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3026 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3027 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3028 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3030 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3031 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3032 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3033 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3035 /* Update dump_header */
3036 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3038 /* IOA Dump entry */
3039 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3040 ioa_dump
->hdr
.len
= 0;
3041 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3042 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3044 /* First entries in sdt are actually a list of dump addresses and
3045 lengths to gather the real dump data. sdt represents the pointer
3046 to the ioa generated dump table. Dump data will be extracted based
3047 on entries in this table */
3048 sdt
= &ioa_dump
->sdt
;
3050 if (ioa_cfg
->sis64
) {
3051 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3052 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3054 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3055 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3058 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3059 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3060 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3061 bytes_to_copy
/ sizeof(__be32
));
3063 /* Smart Dump table is ready to use and the first entry is valid */
3064 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3065 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3066 dev_err(&ioa_cfg
->pdev
->dev
,
3067 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3068 rc
, be32_to_cpu(sdt
->hdr
.state
));
3069 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3070 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3071 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3075 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3077 if (num_entries
> max_num_entries
)
3078 num_entries
= max_num_entries
;
3080 /* Update dump length to the actual data to be copied */
3081 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3083 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3085 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3087 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3089 for (i
= 0; i
< num_entries
; i
++) {
3090 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3091 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3095 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3096 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3098 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3100 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3101 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3103 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3104 bytes_to_copy
= end_off
- start_off
;
3109 if (bytes_to_copy
> max_dump_size
) {
3110 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3114 /* Copy data from adapter to driver buffers */
3115 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3118 ioa_dump
->hdr
.len
+= bytes_copied
;
3120 if (bytes_copied
!= bytes_to_copy
) {
3121 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3128 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3130 /* Update dump_header */
3131 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3133 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3138 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3142 * ipr_release_dump - Free adapter dump memory
3143 * @kref: kref struct
3148 static void ipr_release_dump(struct kref
*kref
)
3150 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3151 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3152 unsigned long lock_flags
= 0;
3156 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3157 ioa_cfg
->dump
= NULL
;
3158 ioa_cfg
->sdt_state
= INACTIVE
;
3159 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3161 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3162 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3164 vfree(dump
->ioa_dump
.ioa_data
);
3170 * ipr_worker_thread - Worker thread
3171 * @work: ioa config struct
3173 * Called at task level from a work thread. This function takes care
3174 * of adding and removing device from the mid-layer as configuration
3175 * changes are detected by the adapter.
3180 static void ipr_worker_thread(struct work_struct
*work
)
3182 unsigned long lock_flags
;
3183 struct ipr_resource_entry
*res
;
3184 struct scsi_device
*sdev
;
3185 struct ipr_dump
*dump
;
3186 struct ipr_ioa_cfg
*ioa_cfg
=
3187 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3188 u8 bus
, target
, lun
;
3192 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3194 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3195 dump
= ioa_cfg
->dump
;
3197 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3200 kref_get(&dump
->kref
);
3201 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3202 ipr_get_ioa_dump(ioa_cfg
, dump
);
3203 kref_put(&dump
->kref
, ipr_release_dump
);
3205 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3206 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3207 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3208 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3215 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
3216 !ioa_cfg
->allow_ml_add_del
) {
3217 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3221 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3222 if (res
->del_from_ml
&& res
->sdev
) {
3225 if (!scsi_device_get(sdev
)) {
3226 if (!res
->add_to_ml
)
3227 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3229 res
->del_from_ml
= 0;
3230 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3231 scsi_remove_device(sdev
);
3232 scsi_device_put(sdev
);
3233 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3240 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3241 if (res
->add_to_ml
) {
3243 target
= res
->target
;
3246 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3247 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3248 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3253 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3254 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3258 #ifdef CONFIG_SCSI_IPR_TRACE
3260 * ipr_read_trace - Dump the adapter trace
3261 * @filp: open sysfs file
3262 * @kobj: kobject struct
3263 * @bin_attr: bin_attribute struct
3266 * @count: buffer size
3269 * number of bytes printed to buffer
3271 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3272 struct bin_attribute
*bin_attr
,
3273 char *buf
, loff_t off
, size_t count
)
3275 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3276 struct Scsi_Host
*shost
= class_to_shost(dev
);
3277 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3278 unsigned long lock_flags
= 0;
3281 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3282 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3284 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3289 static struct bin_attribute ipr_trace_attr
= {
3295 .read
= ipr_read_trace
,
3300 * ipr_show_fw_version - Show the firmware version
3301 * @dev: class device struct
3305 * number of bytes printed to buffer
3307 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3308 struct device_attribute
*attr
, char *buf
)
3310 struct Scsi_Host
*shost
= class_to_shost(dev
);
3311 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3312 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3313 unsigned long lock_flags
= 0;
3316 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3317 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3318 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3319 ucode_vpd
->minor_release
[0],
3320 ucode_vpd
->minor_release
[1]);
3321 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3325 static struct device_attribute ipr_fw_version_attr
= {
3327 .name
= "fw_version",
3330 .show
= ipr_show_fw_version
,
3334 * ipr_show_log_level - Show the adapter's error logging level
3335 * @dev: class device struct
3339 * number of bytes printed to buffer
3341 static ssize_t
ipr_show_log_level(struct device
*dev
,
3342 struct device_attribute
*attr
, char *buf
)
3344 struct Scsi_Host
*shost
= class_to_shost(dev
);
3345 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3346 unsigned long lock_flags
= 0;
3349 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3350 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3351 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3356 * ipr_store_log_level - Change the adapter's error logging level
3357 * @dev: class device struct
3361 * number of bytes printed to buffer
3363 static ssize_t
ipr_store_log_level(struct device
*dev
,
3364 struct device_attribute
*attr
,
3365 const char *buf
, size_t count
)
3367 struct Scsi_Host
*shost
= class_to_shost(dev
);
3368 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3369 unsigned long lock_flags
= 0;
3371 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3372 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3373 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3377 static struct device_attribute ipr_log_level_attr
= {
3379 .name
= "log_level",
3380 .mode
= S_IRUGO
| S_IWUSR
,
3382 .show
= ipr_show_log_level
,
3383 .store
= ipr_store_log_level
3387 * ipr_store_diagnostics - IOA Diagnostics interface
3388 * @dev: device struct
3390 * @count: buffer size
3392 * This function will reset the adapter and wait a reasonable
3393 * amount of time for any errors that the adapter might log.
3396 * count on success / other on failure
3398 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3399 struct device_attribute
*attr
,
3400 const char *buf
, size_t count
)
3402 struct Scsi_Host
*shost
= class_to_shost(dev
);
3403 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3404 unsigned long lock_flags
= 0;
3407 if (!capable(CAP_SYS_ADMIN
))
3410 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3411 while (ioa_cfg
->in_reset_reload
) {
3412 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3413 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3414 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3417 ioa_cfg
->errors_logged
= 0;
3418 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3420 if (ioa_cfg
->in_reset_reload
) {
3421 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3422 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3424 /* Wait for a second for any errors to be logged */
3427 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3431 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3432 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3434 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3439 static struct device_attribute ipr_diagnostics_attr
= {
3441 .name
= "run_diagnostics",
3444 .store
= ipr_store_diagnostics
3448 * ipr_show_adapter_state - Show the adapter's state
3449 * @class_dev: device struct
3453 * number of bytes printed to buffer
3455 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3456 struct device_attribute
*attr
, char *buf
)
3458 struct Scsi_Host
*shost
= class_to_shost(dev
);
3459 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3460 unsigned long lock_flags
= 0;
3463 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3464 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3465 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3467 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3468 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3473 * ipr_store_adapter_state - Change adapter state
3474 * @dev: device struct
3476 * @count: buffer size
3478 * This function will change the adapter's state.
3481 * count on success / other on failure
3483 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3484 struct device_attribute
*attr
,
3485 const char *buf
, size_t count
)
3487 struct Scsi_Host
*shost
= class_to_shost(dev
);
3488 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3489 unsigned long lock_flags
;
3490 int result
= count
, i
;
3492 if (!capable(CAP_SYS_ADMIN
))
3495 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3496 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3497 !strncmp(buf
, "online", 6)) {
3498 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3499 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3500 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3501 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3504 ioa_cfg
->reset_retries
= 0;
3505 ioa_cfg
->in_ioa_bringdown
= 0;
3506 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3508 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3509 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3514 static struct device_attribute ipr_ioa_state_attr
= {
3516 .name
= "online_state",
3517 .mode
= S_IRUGO
| S_IWUSR
,
3519 .show
= ipr_show_adapter_state
,
3520 .store
= ipr_store_adapter_state
3524 * ipr_store_reset_adapter - Reset the adapter
3525 * @dev: device struct
3527 * @count: buffer size
3529 * This function will reset the adapter.
3532 * count on success / other on failure
3534 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3535 struct device_attribute
*attr
,
3536 const char *buf
, size_t count
)
3538 struct Scsi_Host
*shost
= class_to_shost(dev
);
3539 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3540 unsigned long lock_flags
;
3543 if (!capable(CAP_SYS_ADMIN
))
3546 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3547 if (!ioa_cfg
->in_reset_reload
)
3548 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3549 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3550 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3555 static struct device_attribute ipr_ioa_reset_attr
= {
3557 .name
= "reset_host",
3560 .store
= ipr_store_reset_adapter
3564 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3565 * @buf_len: buffer length
3567 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3568 * list to use for microcode download
3571 * pointer to sglist / NULL on failure
3573 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3575 int sg_size
, order
, bsize_elem
, num_elem
, i
, j
;
3576 struct ipr_sglist
*sglist
;
3577 struct scatterlist
*scatterlist
;
3580 /* Get the minimum size per scatter/gather element */
3581 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3583 /* Get the actual size per element */
3584 order
= get_order(sg_size
);
3586 /* Determine the actual number of bytes per element */
3587 bsize_elem
= PAGE_SIZE
* (1 << order
);
3589 /* Determine the actual number of sg entries needed */
3590 if (buf_len
% bsize_elem
)
3591 num_elem
= (buf_len
/ bsize_elem
) + 1;
3593 num_elem
= buf_len
/ bsize_elem
;
3595 /* Allocate a scatter/gather list for the DMA */
3596 sglist
= kzalloc(sizeof(struct ipr_sglist
) +
3597 (sizeof(struct scatterlist
) * (num_elem
- 1)),
3600 if (sglist
== NULL
) {
3605 scatterlist
= sglist
->scatterlist
;
3606 sg_init_table(scatterlist
, num_elem
);
3608 sglist
->order
= order
;
3609 sglist
->num_sg
= num_elem
;
3611 /* Allocate a bunch of sg elements */
3612 for (i
= 0; i
< num_elem
; i
++) {
3613 page
= alloc_pages(GFP_KERNEL
, order
);
3617 /* Free up what we already allocated */
3618 for (j
= i
- 1; j
>= 0; j
--)
3619 __free_pages(sg_page(&scatterlist
[j
]), order
);
3624 sg_set_page(&scatterlist
[i
], page
, 0, 0);
3631 * ipr_free_ucode_buffer - Frees a microcode download buffer
3632 * @p_dnld: scatter/gather list pointer
3634 * Free a DMA'able ucode download buffer previously allocated with
3635 * ipr_alloc_ucode_buffer
3640 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3644 for (i
= 0; i
< sglist
->num_sg
; i
++)
3645 __free_pages(sg_page(&sglist
->scatterlist
[i
]), sglist
->order
);
3651 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3652 * @sglist: scatter/gather list pointer
3653 * @buffer: buffer pointer
3654 * @len: buffer length
3656 * Copy a microcode image from a user buffer into a buffer allocated by
3657 * ipr_alloc_ucode_buffer
3660 * 0 on success / other on failure
3662 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3663 u8
*buffer
, u32 len
)
3665 int bsize_elem
, i
, result
= 0;
3666 struct scatterlist
*scatterlist
;
3669 /* Determine the actual number of bytes per element */
3670 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3672 scatterlist
= sglist
->scatterlist
;
3674 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3675 struct page
*page
= sg_page(&scatterlist
[i
]);
3678 memcpy(kaddr
, buffer
, bsize_elem
);
3681 scatterlist
[i
].length
= bsize_elem
;
3689 if (len
% bsize_elem
) {
3690 struct page
*page
= sg_page(&scatterlist
[i
]);
3693 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3696 scatterlist
[i
].length
= len
% bsize_elem
;
3699 sglist
->buffer_len
= len
;
3704 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3705 * @ipr_cmd: ipr command struct
3706 * @sglist: scatter/gather list
3708 * Builds a microcode download IOA data list (IOADL).
3711 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3712 struct ipr_sglist
*sglist
)
3714 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3715 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3716 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3719 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3720 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3721 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3724 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3725 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3726 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3727 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3728 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3731 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3735 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3736 * @ipr_cmd: ipr command struct
3737 * @sglist: scatter/gather list
3739 * Builds a microcode download IOA data list (IOADL).
3742 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3743 struct ipr_sglist
*sglist
)
3745 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3746 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3747 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3750 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3751 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3752 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3755 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3757 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3758 ioadl
[i
].flags_and_data_len
=
3759 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3761 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
3764 ioadl
[i
-1].flags_and_data_len
|=
3765 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3769 * ipr_update_ioa_ucode - Update IOA's microcode
3770 * @ioa_cfg: ioa config struct
3771 * @sglist: scatter/gather list
3773 * Initiate an adapter reset to update the IOA's microcode
3776 * 0 on success / -EIO on failure
3778 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
3779 struct ipr_sglist
*sglist
)
3781 unsigned long lock_flags
;
3783 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3784 while (ioa_cfg
->in_reset_reload
) {
3785 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3786 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3787 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3790 if (ioa_cfg
->ucode_sglist
) {
3791 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3792 dev_err(&ioa_cfg
->pdev
->dev
,
3793 "Microcode download already in progress\n");
3797 sglist
->num_dma_sg
= pci_map_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
3798 sglist
->num_sg
, DMA_TO_DEVICE
);
3800 if (!sglist
->num_dma_sg
) {
3801 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3802 dev_err(&ioa_cfg
->pdev
->dev
,
3803 "Failed to map microcode download buffer!\n");
3807 ioa_cfg
->ucode_sglist
= sglist
;
3808 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3809 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3810 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3812 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3813 ioa_cfg
->ucode_sglist
= NULL
;
3814 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3819 * ipr_store_update_fw - Update the firmware on the adapter
3820 * @class_dev: device struct
3822 * @count: buffer size
3824 * This function will update the firmware on the adapter.
3827 * count on success / other on failure
3829 static ssize_t
ipr_store_update_fw(struct device
*dev
,
3830 struct device_attribute
*attr
,
3831 const char *buf
, size_t count
)
3833 struct Scsi_Host
*shost
= class_to_shost(dev
);
3834 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3835 struct ipr_ucode_image_header
*image_hdr
;
3836 const struct firmware
*fw_entry
;
3837 struct ipr_sglist
*sglist
;
3840 int len
, result
, dnld_size
;
3842 if (!capable(CAP_SYS_ADMIN
))
3845 len
= snprintf(fname
, 99, "%s", buf
);
3846 fname
[len
-1] = '\0';
3848 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
3849 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
3853 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
3855 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
3856 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
3857 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
3860 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
3861 release_firmware(fw_entry
);
3865 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
3868 dev_err(&ioa_cfg
->pdev
->dev
,
3869 "Microcode buffer copy to DMA buffer failed\n");
3873 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3875 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
3880 ipr_free_ucode_buffer(sglist
);
3881 release_firmware(fw_entry
);
3885 static struct device_attribute ipr_update_fw_attr
= {
3887 .name
= "update_fw",
3890 .store
= ipr_store_update_fw
3894 * ipr_show_fw_type - Show the adapter's firmware type.
3895 * @dev: class device struct
3899 * number of bytes printed to buffer
3901 static ssize_t
ipr_show_fw_type(struct device
*dev
,
3902 struct device_attribute
*attr
, char *buf
)
3904 struct Scsi_Host
*shost
= class_to_shost(dev
);
3905 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3906 unsigned long lock_flags
= 0;
3909 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3910 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
3911 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3915 static struct device_attribute ipr_ioa_fw_type_attr
= {
3920 .show
= ipr_show_fw_type
3923 static struct device_attribute
*ipr_ioa_attrs
[] = {
3924 &ipr_fw_version_attr
,
3925 &ipr_log_level_attr
,
3926 &ipr_diagnostics_attr
,
3927 &ipr_ioa_state_attr
,
3928 &ipr_ioa_reset_attr
,
3929 &ipr_update_fw_attr
,
3930 &ipr_ioa_fw_type_attr
,
3934 #ifdef CONFIG_SCSI_IPR_DUMP
3936 * ipr_read_dump - Dump the adapter
3937 * @filp: open sysfs file
3938 * @kobj: kobject struct
3939 * @bin_attr: bin_attribute struct
3942 * @count: buffer size
3945 * number of bytes printed to buffer
3947 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
3948 struct bin_attribute
*bin_attr
,
3949 char *buf
, loff_t off
, size_t count
)
3951 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
3952 struct Scsi_Host
*shost
= class_to_shost(cdev
);
3953 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3954 struct ipr_dump
*dump
;
3955 unsigned long lock_flags
= 0;
3960 if (!capable(CAP_SYS_ADMIN
))
3963 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3964 dump
= ioa_cfg
->dump
;
3966 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
3967 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3970 kref_get(&dump
->kref
);
3971 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3973 if (off
> dump
->driver_dump
.hdr
.len
) {
3974 kref_put(&dump
->kref
, ipr_release_dump
);
3978 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
3979 count
= dump
->driver_dump
.hdr
.len
- off
;
3983 if (count
&& off
< sizeof(dump
->driver_dump
)) {
3984 if (off
+ count
> sizeof(dump
->driver_dump
))
3985 len
= sizeof(dump
->driver_dump
) - off
;
3988 src
= (u8
*)&dump
->driver_dump
+ off
;
3989 memcpy(buf
, src
, len
);
3995 off
-= sizeof(dump
->driver_dump
);
3998 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
3999 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4000 sizeof(struct ipr_sdt_entry
));
4002 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4003 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4005 if (count
&& off
< sdt_end
) {
4006 if (off
+ count
> sdt_end
)
4007 len
= sdt_end
- off
;
4010 src
= (u8
*)&dump
->ioa_dump
+ off
;
4011 memcpy(buf
, src
, len
);
4020 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4021 len
= PAGE_ALIGN(off
) - off
;
4024 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4025 src
+= off
& ~PAGE_MASK
;
4026 memcpy(buf
, src
, len
);
4032 kref_put(&dump
->kref
, ipr_release_dump
);
4037 * ipr_alloc_dump - Prepare for adapter dump
4038 * @ioa_cfg: ioa config struct
4041 * 0 on success / other on failure
4043 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4045 struct ipr_dump
*dump
;
4047 unsigned long lock_flags
= 0;
4049 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4052 ipr_err("Dump memory allocation failed\n");
4057 ioa_data
= vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4059 ioa_data
= vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4062 ipr_err("Dump memory allocation failed\n");
4067 dump
->ioa_dump
.ioa_data
= ioa_data
;
4069 kref_init(&dump
->kref
);
4070 dump
->ioa_cfg
= ioa_cfg
;
4072 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4074 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4075 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4076 vfree(dump
->ioa_dump
.ioa_data
);
4081 ioa_cfg
->dump
= dump
;
4082 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4083 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4084 ioa_cfg
->dump_taken
= 1;
4085 schedule_work(&ioa_cfg
->work_q
);
4087 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4093 * ipr_free_dump - Free adapter dump memory
4094 * @ioa_cfg: ioa config struct
4097 * 0 on success / other on failure
4099 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4101 struct ipr_dump
*dump
;
4102 unsigned long lock_flags
= 0;
4106 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4107 dump
= ioa_cfg
->dump
;
4109 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4113 ioa_cfg
->dump
= NULL
;
4114 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4116 kref_put(&dump
->kref
, ipr_release_dump
);
4123 * ipr_write_dump - Setup dump state of adapter
4124 * @filp: open sysfs file
4125 * @kobj: kobject struct
4126 * @bin_attr: bin_attribute struct
4129 * @count: buffer size
4132 * number of bytes printed to buffer
4134 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4135 struct bin_attribute
*bin_attr
,
4136 char *buf
, loff_t off
, size_t count
)
4138 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4139 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4140 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4143 if (!capable(CAP_SYS_ADMIN
))
4147 rc
= ipr_alloc_dump(ioa_cfg
);
4148 else if (buf
[0] == '0')
4149 rc
= ipr_free_dump(ioa_cfg
);
4159 static struct bin_attribute ipr_dump_attr
= {
4162 .mode
= S_IRUSR
| S_IWUSR
,
4165 .read
= ipr_read_dump
,
4166 .write
= ipr_write_dump
4169 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4173 * ipr_change_queue_depth - Change the device's queue depth
4174 * @sdev: scsi device struct
4175 * @qdepth: depth to set
4176 * @reason: calling context
4181 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
,
4184 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4185 struct ipr_resource_entry
*res
;
4186 unsigned long lock_flags
= 0;
4188 if (reason
!= SCSI_QDEPTH_DEFAULT
)
4191 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4192 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4194 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4195 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4196 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4198 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
4199 return sdev
->queue_depth
;
4203 * ipr_change_queue_type - Change the device's queue type
4204 * @dsev: scsi device struct
4205 * @tag_type: type of tags to use
4208 * actual queue type set
4210 static int ipr_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
4212 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4213 struct ipr_resource_entry
*res
;
4214 unsigned long lock_flags
= 0;
4216 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4217 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4220 if (ipr_is_gscsi(res
) && sdev
->tagged_supported
) {
4222 * We don't bother quiescing the device here since the
4223 * adapter firmware does it for us.
4225 scsi_set_tag_type(sdev
, tag_type
);
4228 scsi_activate_tcq(sdev
, sdev
->queue_depth
);
4230 scsi_deactivate_tcq(sdev
, sdev
->queue_depth
);
4236 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4241 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4242 * @dev: device struct
4243 * @attr: device attribute structure
4247 * number of bytes printed to buffer
4249 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4251 struct scsi_device
*sdev
= to_scsi_device(dev
);
4252 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4253 struct ipr_resource_entry
*res
;
4254 unsigned long lock_flags
= 0;
4255 ssize_t len
= -ENXIO
;
4257 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4258 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4260 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4261 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4265 static struct device_attribute ipr_adapter_handle_attr
= {
4267 .name
= "adapter_handle",
4270 .show
= ipr_show_adapter_handle
4274 * ipr_show_resource_path - Show the resource path or the resource address for
4276 * @dev: device struct
4277 * @attr: device attribute structure
4281 * number of bytes printed to buffer
4283 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4285 struct scsi_device
*sdev
= to_scsi_device(dev
);
4286 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4287 struct ipr_resource_entry
*res
;
4288 unsigned long lock_flags
= 0;
4289 ssize_t len
= -ENXIO
;
4290 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4292 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4293 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4294 if (res
&& ioa_cfg
->sis64
)
4295 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4296 __ipr_format_res_path(res
->res_path
, buffer
,
4299 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4300 res
->bus
, res
->target
, res
->lun
);
4302 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4306 static struct device_attribute ipr_resource_path_attr
= {
4308 .name
= "resource_path",
4311 .show
= ipr_show_resource_path
4315 * ipr_show_device_id - Show the device_id for this device.
4316 * @dev: device struct
4317 * @attr: device attribute structure
4321 * number of bytes printed to buffer
4323 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4325 struct scsi_device
*sdev
= to_scsi_device(dev
);
4326 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4327 struct ipr_resource_entry
*res
;
4328 unsigned long lock_flags
= 0;
4329 ssize_t len
= -ENXIO
;
4331 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4332 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4333 if (res
&& ioa_cfg
->sis64
)
4334 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->dev_id
);
4336 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4338 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4342 static struct device_attribute ipr_device_id_attr
= {
4344 .name
= "device_id",
4347 .show
= ipr_show_device_id
4351 * ipr_show_resource_type - Show the resource type for this device.
4352 * @dev: device struct
4353 * @attr: device attribute structure
4357 * number of bytes printed to buffer
4359 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4361 struct scsi_device
*sdev
= to_scsi_device(dev
);
4362 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4363 struct ipr_resource_entry
*res
;
4364 unsigned long lock_flags
= 0;
4365 ssize_t len
= -ENXIO
;
4367 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4368 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4371 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4373 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4377 static struct device_attribute ipr_resource_type_attr
= {
4379 .name
= "resource_type",
4382 .show
= ipr_show_resource_type
4385 static struct device_attribute
*ipr_dev_attrs
[] = {
4386 &ipr_adapter_handle_attr
,
4387 &ipr_resource_path_attr
,
4388 &ipr_device_id_attr
,
4389 &ipr_resource_type_attr
,
4394 * ipr_biosparam - Return the HSC mapping
4395 * @sdev: scsi device struct
4396 * @block_device: block device pointer
4397 * @capacity: capacity of the device
4398 * @parm: Array containing returned HSC values.
4400 * This function generates the HSC parms that fdisk uses.
4401 * We want to make sure we return something that places partitions
4402 * on 4k boundaries for best performance with the IOA.
4407 static int ipr_biosparam(struct scsi_device
*sdev
,
4408 struct block_device
*block_device
,
4409 sector_t capacity
, int *parm
)
4417 cylinders
= capacity
;
4418 sector_div(cylinders
, (128 * 32));
4423 parm
[2] = cylinders
;
4429 * ipr_find_starget - Find target based on bus/target.
4430 * @starget: scsi target struct
4433 * resource entry pointer if found / NULL if not found
4435 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4437 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4438 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4439 struct ipr_resource_entry
*res
;
4441 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4442 if ((res
->bus
== starget
->channel
) &&
4443 (res
->target
== starget
->id
)) {
4451 static struct ata_port_info sata_port_info
;
4454 * ipr_target_alloc - Prepare for commands to a SCSI target
4455 * @starget: scsi target struct
4457 * If the device is a SATA device, this function allocates an
4458 * ATA port with libata, else it does nothing.
4461 * 0 on success / non-0 on failure
4463 static int ipr_target_alloc(struct scsi_target
*starget
)
4465 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4466 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4467 struct ipr_sata_port
*sata_port
;
4468 struct ata_port
*ap
;
4469 struct ipr_resource_entry
*res
;
4470 unsigned long lock_flags
;
4472 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4473 res
= ipr_find_starget(starget
);
4474 starget
->hostdata
= NULL
;
4476 if (res
&& ipr_is_gata(res
)) {
4477 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4478 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4482 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4484 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4485 sata_port
->ioa_cfg
= ioa_cfg
;
4487 sata_port
->res
= res
;
4489 res
->sata_port
= sata_port
;
4490 ap
->private_data
= sata_port
;
4491 starget
->hostdata
= sata_port
;
4497 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4503 * ipr_target_destroy - Destroy a SCSI target
4504 * @starget: scsi target struct
4506 * If the device was a SATA device, this function frees the libata
4507 * ATA port, else it does nothing.
4510 static void ipr_target_destroy(struct scsi_target
*starget
)
4512 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4513 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4514 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4516 if (ioa_cfg
->sis64
) {
4517 if (!ipr_find_starget(starget
)) {
4518 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4519 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4520 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4521 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4522 else if (starget
->channel
== 0)
4523 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4528 starget
->hostdata
= NULL
;
4529 ata_sas_port_destroy(sata_port
->ap
);
4535 * ipr_find_sdev - Find device based on bus/target/lun.
4536 * @sdev: scsi device struct
4539 * resource entry pointer if found / NULL if not found
4541 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4543 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4544 struct ipr_resource_entry
*res
;
4546 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4547 if ((res
->bus
== sdev
->channel
) &&
4548 (res
->target
== sdev
->id
) &&
4549 (res
->lun
== sdev
->lun
))
4557 * ipr_slave_destroy - Unconfigure a SCSI device
4558 * @sdev: scsi device struct
4563 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4565 struct ipr_resource_entry
*res
;
4566 struct ipr_ioa_cfg
*ioa_cfg
;
4567 unsigned long lock_flags
= 0;
4569 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4571 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4572 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4575 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4576 sdev
->hostdata
= NULL
;
4578 res
->sata_port
= NULL
;
4580 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4584 * ipr_slave_configure - Configure a SCSI device
4585 * @sdev: scsi device struct
4587 * This function configures the specified scsi device.
4592 static int ipr_slave_configure(struct scsi_device
*sdev
)
4594 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4595 struct ipr_resource_entry
*res
;
4596 struct ata_port
*ap
= NULL
;
4597 unsigned long lock_flags
= 0;
4598 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4600 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4601 res
= sdev
->hostdata
;
4603 if (ipr_is_af_dasd_device(res
))
4604 sdev
->type
= TYPE_RAID
;
4605 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4606 sdev
->scsi_level
= 4;
4607 sdev
->no_uld_attach
= 1;
4609 if (ipr_is_vset_device(res
)) {
4610 blk_queue_rq_timeout(sdev
->request_queue
,
4611 IPR_VSET_RW_TIMEOUT
);
4612 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4614 if (ipr_is_gata(res
) && res
->sata_port
)
4615 ap
= res
->sata_port
->ap
;
4616 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4619 scsi_adjust_queue_depth(sdev
, 0, IPR_MAX_CMD_PER_ATA_LUN
);
4620 ata_sas_slave_configure(sdev
, ap
);
4622 scsi_adjust_queue_depth(sdev
, 0, sdev
->host
->cmd_per_lun
);
4624 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4625 ipr_format_res_path(ioa_cfg
,
4626 res
->res_path
, buffer
, sizeof(buffer
)));
4629 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4634 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4635 * @sdev: scsi device struct
4637 * This function initializes an ATA port so that future commands
4638 * sent through queuecommand will work.
4643 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4645 struct ipr_sata_port
*sata_port
= NULL
;
4649 if (sdev
->sdev_target
)
4650 sata_port
= sdev
->sdev_target
->hostdata
;
4652 rc
= ata_sas_port_init(sata_port
->ap
);
4654 rc
= ata_sas_sync_probe(sata_port
->ap
);
4658 ipr_slave_destroy(sdev
);
4665 * ipr_slave_alloc - Prepare for commands to a device.
4666 * @sdev: scsi device struct
4668 * This function saves a pointer to the resource entry
4669 * in the scsi device struct if the device exists. We
4670 * can then use this pointer in ipr_queuecommand when
4671 * handling new commands.
4674 * 0 on success / -ENXIO if device does not exist
4676 static int ipr_slave_alloc(struct scsi_device
*sdev
)
4678 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4679 struct ipr_resource_entry
*res
;
4680 unsigned long lock_flags
;
4683 sdev
->hostdata
= NULL
;
4685 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4687 res
= ipr_find_sdev(sdev
);
4692 sdev
->hostdata
= res
;
4693 if (!ipr_is_naca_model(res
))
4694 res
->needs_sync_complete
= 1;
4696 if (ipr_is_gata(res
)) {
4697 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4698 return ipr_ata_slave_alloc(sdev
);
4702 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4708 * ipr_eh_host_reset - Reset the host adapter
4709 * @scsi_cmd: scsi command struct
4714 static int __ipr_eh_host_reset(struct scsi_cmnd
*scsi_cmd
)
4716 struct ipr_ioa_cfg
*ioa_cfg
;
4720 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
4722 if (!ioa_cfg
->in_reset_reload
) {
4723 dev_err(&ioa_cfg
->pdev
->dev
,
4724 "Adapter being reset as a result of error recovery.\n");
4726 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
4727 ioa_cfg
->sdt_state
= GET_DUMP
;
4730 rc
= ipr_reset_reload(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
4736 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
4740 spin_lock_irq(cmd
->device
->host
->host_lock
);
4741 rc
= __ipr_eh_host_reset(cmd
);
4742 spin_unlock_irq(cmd
->device
->host
->host_lock
);
4748 * ipr_device_reset - Reset the device
4749 * @ioa_cfg: ioa config struct
4750 * @res: resource entry struct
4752 * This function issues a device reset to the affected device.
4753 * If the device is a SCSI device, a LUN reset will be sent
4754 * to the device first. If that does not work, a target reset
4755 * will be sent. If the device is a SATA device, a PHY reset will
4759 * 0 on success / non-zero on failure
4761 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
4762 struct ipr_resource_entry
*res
)
4764 struct ipr_cmnd
*ipr_cmd
;
4765 struct ipr_ioarcb
*ioarcb
;
4766 struct ipr_cmd_pkt
*cmd_pkt
;
4767 struct ipr_ioarcb_ata_regs
*regs
;
4771 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
4772 ioarcb
= &ipr_cmd
->ioarcb
;
4773 cmd_pkt
= &ioarcb
->cmd_pkt
;
4775 if (ipr_cmd
->ioa_cfg
->sis64
) {
4776 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
4777 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
4779 regs
= &ioarcb
->u
.add_data
.u
.regs
;
4781 ioarcb
->res_handle
= res
->res_handle
;
4782 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
4783 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
4784 if (ipr_is_gata(res
)) {
4785 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
4786 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
4787 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
4790 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
4791 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
4792 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
4793 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
4794 if (ipr_cmd
->ioa_cfg
->sis64
)
4795 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
4796 sizeof(struct ipr_ioasa_gata
));
4798 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
4799 sizeof(struct ipr_ioasa_gata
));
4803 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
4807 * ipr_sata_reset - Reset the SATA port
4808 * @link: SATA link to reset
4809 * @classes: class of the attached device
4811 * This function issues a SATA phy reset to the affected ATA link.
4814 * 0 on success / non-zero on failure
4816 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
4817 unsigned long deadline
)
4819 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
4820 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
4821 struct ipr_resource_entry
*res
;
4822 unsigned long lock_flags
= 0;
4826 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4827 while (ioa_cfg
->in_reset_reload
) {
4828 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4829 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4830 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4833 res
= sata_port
->res
;
4835 rc
= ipr_device_reset(ioa_cfg
, res
);
4836 *classes
= res
->ata_class
;
4839 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4845 * ipr_eh_dev_reset - Reset the device
4846 * @scsi_cmd: scsi command struct
4848 * This function issues a device reset to the affected device.
4849 * A LUN reset will be sent to the device first. If that does
4850 * not work, a target reset will be sent.
4855 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
4857 struct ipr_cmnd
*ipr_cmd
;
4858 struct ipr_ioa_cfg
*ioa_cfg
;
4859 struct ipr_resource_entry
*res
;
4860 struct ata_port
*ap
;
4862 struct ipr_hrr_queue
*hrrq
;
4865 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
4866 res
= scsi_cmd
->device
->hostdata
;
4872 * If we are currently going through reset/reload, return failed. This will force the
4873 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4876 if (ioa_cfg
->in_reset_reload
)
4878 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
4881 for_each_hrrq(hrrq
, ioa_cfg
) {
4882 spin_lock(&hrrq
->_lock
);
4883 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
4884 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
4885 if (ipr_cmd
->scsi_cmd
)
4886 ipr_cmd
->done
= ipr_scsi_eh_done
;
4888 ipr_cmd
->done
= ipr_sata_eh_done
;
4890 !(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
4891 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
4892 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
4896 spin_unlock(&hrrq
->_lock
);
4898 res
->resetting_device
= 1;
4899 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
4901 if (ipr_is_gata(res
) && res
->sata_port
) {
4902 ap
= res
->sata_port
->ap
;
4903 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
4904 ata_std_error_handler(ap
);
4905 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
4907 for_each_hrrq(hrrq
, ioa_cfg
) {
4908 spin_lock(&hrrq
->_lock
);
4909 list_for_each_entry(ipr_cmd
,
4910 &hrrq
->hrrq_pending_q
, queue
) {
4911 if (ipr_cmd
->ioarcb
.res_handle
==
4917 spin_unlock(&hrrq
->_lock
);
4920 rc
= ipr_device_reset(ioa_cfg
, res
);
4921 res
->resetting_device
= 0;
4924 return rc
? FAILED
: SUCCESS
;
4927 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
4931 spin_lock_irq(cmd
->device
->host
->host_lock
);
4932 rc
= __ipr_eh_dev_reset(cmd
);
4933 spin_unlock_irq(cmd
->device
->host
->host_lock
);
4939 * ipr_bus_reset_done - Op done function for bus reset.
4940 * @ipr_cmd: ipr command struct
4942 * This function is the op done function for a bus reset
4947 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
4949 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4950 struct ipr_resource_entry
*res
;
4953 if (!ioa_cfg
->sis64
)
4954 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4955 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
4956 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
4962 * If abort has not completed, indicate the reset has, else call the
4963 * abort's done function to wake the sleeping eh thread
4965 if (ipr_cmd
->sibling
->sibling
)
4966 ipr_cmd
->sibling
->sibling
= NULL
;
4968 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
4970 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
4975 * ipr_abort_timeout - An abort task has timed out
4976 * @ipr_cmd: ipr command struct
4978 * This function handles when an abort task times out. If this
4979 * happens we issue a bus reset since we have resources tied
4980 * up that must be freed before returning to the midlayer.
4985 static void ipr_abort_timeout(struct ipr_cmnd
*ipr_cmd
)
4987 struct ipr_cmnd
*reset_cmd
;
4988 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4989 struct ipr_cmd_pkt
*cmd_pkt
;
4990 unsigned long lock_flags
= 0;
4993 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4994 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
4995 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4999 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5000 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5001 ipr_cmd
->sibling
= reset_cmd
;
5002 reset_cmd
->sibling
= ipr_cmd
;
5003 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5004 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5005 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5006 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5007 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5009 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5010 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5015 * ipr_cancel_op - Cancel specified op
5016 * @scsi_cmd: scsi command struct
5018 * This function cancels specified op.
5023 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5025 struct ipr_cmnd
*ipr_cmd
;
5026 struct ipr_ioa_cfg
*ioa_cfg
;
5027 struct ipr_resource_entry
*res
;
5028 struct ipr_cmd_pkt
*cmd_pkt
;
5031 struct ipr_hrr_queue
*hrrq
;
5034 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5035 res
= scsi_cmd
->device
->hostdata
;
5037 /* If we are currently going through reset/reload, return failed.
5038 * This will force the mid-layer to call ipr_eh_host_reset,
5039 * which will then go to sleep and wait for the reset to complete
5041 if (ioa_cfg
->in_reset_reload
||
5042 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5048 * If we are aborting a timed out op, chances are that the timeout was caused
5049 * by a still not detected EEH error. In such cases, reading a register will
5050 * trigger the EEH recovery infrastructure.
5052 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5054 if (!ipr_is_gscsi(res
))
5057 for_each_hrrq(hrrq
, ioa_cfg
) {
5058 spin_lock(&hrrq
->_lock
);
5059 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
5060 if (ipr_cmd
->scsi_cmd
== scsi_cmd
) {
5061 ipr_cmd
->done
= ipr_scsi_eh_done
;
5066 spin_unlock(&hrrq
->_lock
);
5072 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5073 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5074 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5075 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5076 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5077 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5079 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5081 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5082 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5085 * If the abort task timed out and we sent a bus reset, we will get
5086 * one the following responses to the abort
5088 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5093 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
5094 if (!ipr_is_naca_model(res
))
5095 res
->needs_sync_complete
= 1;
5098 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5102 * ipr_eh_abort - Abort a single op
5103 * @scsi_cmd: scsi command struct
5108 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5110 unsigned long flags
;
5115 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5116 rc
= ipr_cancel_op(scsi_cmd
);
5117 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5124 * ipr_handle_other_interrupt - Handle "other" interrupts
5125 * @ioa_cfg: ioa config struct
5126 * @int_reg: interrupt register
5129 * IRQ_NONE / IRQ_HANDLED
5131 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5134 irqreturn_t rc
= IRQ_HANDLED
;
5137 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5138 int_reg
&= ~int_mask_reg
;
5140 /* If an interrupt on the adapter did not occur, ignore it.
5141 * Or in the case of SIS 64, check for a stage change interrupt.
5143 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5144 if (ioa_cfg
->sis64
) {
5145 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5146 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5147 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5149 /* clear stage change */
5150 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5151 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5152 list_del(&ioa_cfg
->reset_cmd
->queue
);
5153 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5154 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5162 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5163 /* Mask the interrupt */
5164 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5166 /* Clear the interrupt */
5167 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.clr_interrupt_reg
);
5168 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5170 list_del(&ioa_cfg
->reset_cmd
->queue
);
5171 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5172 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5173 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5174 if (ioa_cfg
->clear_isr
) {
5175 if (ipr_debug
&& printk_ratelimit())
5176 dev_err(&ioa_cfg
->pdev
->dev
,
5177 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5178 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5179 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5183 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5184 ioa_cfg
->ioa_unit_checked
= 1;
5185 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5186 dev_err(&ioa_cfg
->pdev
->dev
,
5187 "No Host RRQ. 0x%08X\n", int_reg
);
5189 dev_err(&ioa_cfg
->pdev
->dev
,
5190 "Permanent IOA failure. 0x%08X\n", int_reg
);
5192 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5193 ioa_cfg
->sdt_state
= GET_DUMP
;
5195 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5196 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5203 * ipr_isr_eh - Interrupt service routine error handler
5204 * @ioa_cfg: ioa config struct
5205 * @msg: message to log
5210 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5212 ioa_cfg
->errors_logged
++;
5213 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5215 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5216 ioa_cfg
->sdt_state
= GET_DUMP
;
5218 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5221 static int __ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
,
5222 struct list_head
*doneq
)
5226 struct ipr_cmnd
*ipr_cmd
;
5227 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5230 /* If interrupts are disabled, ignore the interrupt */
5231 if (!hrr_queue
->allow_interrupts
)
5234 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5235 hrr_queue
->toggle_bit
) {
5237 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5238 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5239 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5241 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5242 cmd_index
< hrr_queue
->min_cmd_id
)) {
5244 "Invalid response handle from IOA: ",
5249 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5250 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5252 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5254 list_move_tail(&ipr_cmd
->queue
, doneq
);
5256 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5257 hrr_queue
->hrrq_curr
++;
5259 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5260 hrr_queue
->toggle_bit
^= 1u;
5267 * ipr_isr - Interrupt service routine
5269 * @devp: pointer to ioa config struct
5272 * IRQ_NONE / IRQ_HANDLED
5274 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5276 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5277 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5278 unsigned long hrrq_flags
= 0;
5284 struct ipr_cmnd
*ipr_cmd
, *temp
;
5285 irqreturn_t rc
= IRQ_NONE
;
5288 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5289 /* If interrupts are disabled, ignore the interrupt */
5290 if (!hrrq
->allow_interrupts
) {
5291 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5298 while ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5301 cmd_index
= (be32_to_cpu(*hrrq
->hrrq_curr
) &
5302 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5304 if (unlikely(cmd_index
> hrrq
->max_cmd_id
||
5305 cmd_index
< hrrq
->min_cmd_id
)) {
5307 "Invalid response handle from IOA: ",
5313 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5314 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5316 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5318 list_move_tail(&ipr_cmd
->queue
, &doneq
);
5322 if (hrrq
->hrrq_curr
< hrrq
->hrrq_end
) {
5325 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
5326 hrrq
->toggle_bit
^= 1u;
5330 if (ipr_cmd
&& !ioa_cfg
->clear_isr
)
5333 if (ipr_cmd
!= NULL
) {
5334 /* Clear the PCI interrupt */
5337 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5338 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5339 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5340 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5342 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5343 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5345 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5346 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5347 ipr_isr_eh(ioa_cfg
, "Error clearing HRRQ: ", num_hrrq
);
5354 if (unlikely(rc
== IRQ_NONE
))
5355 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5358 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5359 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5360 list_del(&ipr_cmd
->queue
);
5361 del_timer(&ipr_cmd
->timer
);
5362 ipr_cmd
->fast_done(ipr_cmd
);
5368 * ipr_isr_mhrrq - Interrupt service routine
5370 * @devp: pointer to ioa config struct
5373 * IRQ_NONE / IRQ_HANDLED
5375 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5377 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5378 unsigned long hrrq_flags
= 0;
5379 struct ipr_cmnd
*ipr_cmd
, *temp
;
5380 irqreturn_t rc
= IRQ_NONE
;
5383 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5385 /* If interrupts are disabled, ignore the interrupt */
5386 if (!hrrq
->allow_interrupts
) {
5387 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5391 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5394 if (__ipr_process_hrrq(hrrq
, &doneq
))
5397 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5399 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5400 list_del(&ipr_cmd
->queue
);
5401 del_timer(&ipr_cmd
->timer
);
5402 ipr_cmd
->fast_done(ipr_cmd
);
5408 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5409 * @ioa_cfg: ioa config struct
5410 * @ipr_cmd: ipr command struct
5413 * 0 on success / -1 on failure
5415 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5416 struct ipr_cmnd
*ipr_cmd
)
5419 struct scatterlist
*sg
;
5421 u32 ioadl_flags
= 0;
5422 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5423 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5424 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5426 length
= scsi_bufflen(scsi_cmd
);
5430 nseg
= scsi_dma_map(scsi_cmd
);
5432 if (printk_ratelimit())
5433 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
5437 ipr_cmd
->dma_use_sg
= nseg
;
5439 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5441 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5443 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5444 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5445 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5446 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5447 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5449 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5450 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5451 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5452 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5455 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5460 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5461 * @ioa_cfg: ioa config struct
5462 * @ipr_cmd: ipr command struct
5465 * 0 on success / -1 on failure
5467 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5468 struct ipr_cmnd
*ipr_cmd
)
5471 struct scatterlist
*sg
;
5473 u32 ioadl_flags
= 0;
5474 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5475 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5476 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5478 length
= scsi_bufflen(scsi_cmd
);
5482 nseg
= scsi_dma_map(scsi_cmd
);
5484 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
5488 ipr_cmd
->dma_use_sg
= nseg
;
5490 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5491 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5492 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5493 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5495 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5496 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5497 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5498 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5499 ioarcb
->read_ioadl_len
=
5500 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5503 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
5504 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
5505 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
5506 offsetof(struct ipr_ioarcb
, u
.add_data
));
5507 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5510 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5511 ioadl
[i
].flags_and_data_len
=
5512 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
5513 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
5516 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5521 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5522 * @scsi_cmd: scsi command struct
5527 static u8
ipr_get_task_attributes(struct scsi_cmnd
*scsi_cmd
)
5530 u8 rc
= IPR_FLAGS_LO_UNTAGGED_TASK
;
5532 if (scsi_populate_tag_msg(scsi_cmd
, tag
)) {
5534 case MSG_SIMPLE_TAG
:
5535 rc
= IPR_FLAGS_LO_SIMPLE_TASK
;
5538 rc
= IPR_FLAGS_LO_HEAD_OF_Q_TASK
;
5540 case MSG_ORDERED_TAG
:
5541 rc
= IPR_FLAGS_LO_ORDERED_TASK
;
5550 * ipr_erp_done - Process completion of ERP for a device
5551 * @ipr_cmd: ipr command struct
5553 * This function copies the sense buffer into the scsi_cmd
5554 * struct and pushes the scsi_done function.
5559 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
5561 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5562 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5563 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5565 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5566 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5567 scmd_printk(KERN_ERR
, scsi_cmd
,
5568 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
5570 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
5571 SCSI_SENSE_BUFFERSIZE
);
5575 if (!ipr_is_naca_model(res
))
5576 res
->needs_sync_complete
= 1;
5579 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
5580 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5581 scsi_cmd
->scsi_done(scsi_cmd
);
5585 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5586 * @ipr_cmd: ipr command struct
5591 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
5593 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5594 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5595 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
5597 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
5598 ioarcb
->data_transfer_length
= 0;
5599 ioarcb
->read_data_transfer_length
= 0;
5600 ioarcb
->ioadl_len
= 0;
5601 ioarcb
->read_ioadl_len
= 0;
5602 ioasa
->hdr
.ioasc
= 0;
5603 ioasa
->hdr
.residual_data_len
= 0;
5605 if (ipr_cmd
->ioa_cfg
->sis64
)
5606 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
5607 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
5609 ioarcb
->write_ioadl_addr
=
5610 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
5611 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5616 * ipr_erp_request_sense - Send request sense to a device
5617 * @ipr_cmd: ipr command struct
5619 * This function sends a request sense to a device as a result
5620 * of a check condition.
5625 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
5627 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5628 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5630 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5631 ipr_erp_done(ipr_cmd
);
5635 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5637 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
5638 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
5639 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
5640 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
5641 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
5642 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
5644 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
5645 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
5647 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
5648 IPR_REQUEST_SENSE_TIMEOUT
* 2);
5652 * ipr_erp_cancel_all - Send cancel all to a device
5653 * @ipr_cmd: ipr command struct
5655 * This function sends a cancel all to a device to clear the
5656 * queue. If we are running TCQ on the device, QERR is set to 1,
5657 * which means all outstanding ops have been dropped on the floor.
5658 * Cancel all will return them to us.
5663 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
5665 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5666 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5667 struct ipr_cmd_pkt
*cmd_pkt
;
5671 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5673 if (!scsi_get_tag_type(scsi_cmd
->device
)) {
5674 ipr_erp_request_sense(ipr_cmd
);
5678 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5679 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5680 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5682 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
5683 IPR_CANCEL_ALL_TIMEOUT
);
5687 * ipr_dump_ioasa - Dump contents of IOASA
5688 * @ioa_cfg: ioa config struct
5689 * @ipr_cmd: ipr command struct
5690 * @res: resource entry struct
5692 * This function is invoked by the interrupt handler when ops
5693 * fail. It will log the IOASA if appropriate. Only called
5699 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
5700 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
5704 u32 ioasc
, fd_ioasc
;
5705 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5706 __be32
*ioasa_data
= (__be32
*)ioasa
;
5709 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
5710 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
5715 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
5718 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
5719 error_index
= ipr_get_error(fd_ioasc
);
5721 error_index
= ipr_get_error(ioasc
);
5723 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
5724 /* Don't log an error if the IOA already logged one */
5725 if (ioasa
->hdr
.ilid
!= 0)
5728 if (!ipr_is_gscsi(res
))
5731 if (ipr_error_table
[error_index
].log_ioasa
== 0)
5735 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
5737 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
5738 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
5739 data_len
= sizeof(struct ipr_ioasa64
);
5740 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
5741 data_len
= sizeof(struct ipr_ioasa
);
5743 ipr_err("IOASA Dump:\n");
5745 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
5746 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
5747 be32_to_cpu(ioasa_data
[i
]),
5748 be32_to_cpu(ioasa_data
[i
+1]),
5749 be32_to_cpu(ioasa_data
[i
+2]),
5750 be32_to_cpu(ioasa_data
[i
+3]));
5755 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5757 * @sense_buf: sense data buffer
5762 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
5765 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
5766 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
5767 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5768 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
5770 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
5772 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
5775 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
5777 if (ipr_is_vset_device(res
) &&
5778 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
5779 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
5780 sense_buf
[0] = 0x72;
5781 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
5782 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
5783 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
5787 sense_buf
[9] = 0x0A;
5788 sense_buf
[10] = 0x80;
5790 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
5792 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
5793 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
5794 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
5795 sense_buf
[15] = failing_lba
& 0x000000ff;
5797 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
5799 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
5800 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
5801 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
5802 sense_buf
[19] = failing_lba
& 0x000000ff;
5804 sense_buf
[0] = 0x70;
5805 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
5806 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
5807 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
5809 /* Illegal request */
5810 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
5811 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
5812 sense_buf
[7] = 10; /* additional length */
5814 /* IOARCB was in error */
5815 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
5816 sense_buf
[15] = 0xC0;
5817 else /* Parameter data was invalid */
5818 sense_buf
[15] = 0x80;
5821 ((IPR_FIELD_POINTER_MASK
&
5822 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
5824 (IPR_FIELD_POINTER_MASK
&
5825 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
5827 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
5828 if (ipr_is_vset_device(res
))
5829 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
5831 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
5833 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
5834 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
5835 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
5836 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
5837 sense_buf
[6] = failing_lba
& 0x000000ff;
5840 sense_buf
[7] = 6; /* additional length */
5846 * ipr_get_autosense - Copy autosense data to sense buffer
5847 * @ipr_cmd: ipr command struct
5849 * This function copies the autosense buffer to the buffer
5850 * in the scsi_cmd, if there is autosense available.
5853 * 1 if autosense was available / 0 if not
5855 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
5857 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5858 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
5860 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
5863 if (ipr_cmd
->ioa_cfg
->sis64
)
5864 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
5865 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
5866 SCSI_SENSE_BUFFERSIZE
));
5868 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
5869 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
5870 SCSI_SENSE_BUFFERSIZE
));
5875 * ipr_erp_start - Process an error response for a SCSI op
5876 * @ioa_cfg: ioa config struct
5877 * @ipr_cmd: ipr command struct
5879 * This function determines whether or not to initiate ERP
5880 * on the affected device.
5885 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
5886 struct ipr_cmnd
*ipr_cmd
)
5888 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5889 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5890 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5891 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
5894 ipr_scsi_eh_done(ipr_cmd
);
5898 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
5899 ipr_gen_sense(ipr_cmd
);
5901 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
5903 switch (masked_ioasc
) {
5904 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
5905 if (ipr_is_naca_model(res
))
5906 scsi_cmd
->result
|= (DID_ABORT
<< 16);
5908 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
5910 case IPR_IOASC_IR_RESOURCE_HANDLE
:
5911 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
5912 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
5914 case IPR_IOASC_HW_SEL_TIMEOUT
:
5915 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
5916 if (!ipr_is_naca_model(res
))
5917 res
->needs_sync_complete
= 1;
5919 case IPR_IOASC_SYNC_REQUIRED
:
5921 res
->needs_sync_complete
= 1;
5922 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
5924 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
5925 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
5926 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
5928 case IPR_IOASC_BUS_WAS_RESET
:
5929 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
5931 * Report the bus reset and ask for a retry. The device
5932 * will give CC/UA the next command.
5934 if (!res
->resetting_device
)
5935 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
5936 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5937 if (!ipr_is_naca_model(res
))
5938 res
->needs_sync_complete
= 1;
5940 case IPR_IOASC_HW_DEV_BUS_STATUS
:
5941 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
5942 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
5943 if (!ipr_get_autosense(ipr_cmd
)) {
5944 if (!ipr_is_naca_model(res
)) {
5945 ipr_erp_cancel_all(ipr_cmd
);
5950 if (!ipr_is_naca_model(res
))
5951 res
->needs_sync_complete
= 1;
5953 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
5956 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
5957 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5958 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
5959 res
->needs_sync_complete
= 1;
5963 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
5964 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5965 scsi_cmd
->scsi_done(scsi_cmd
);
5969 * ipr_scsi_done - mid-layer done function
5970 * @ipr_cmd: ipr command struct
5972 * This function is invoked by the interrupt handler for
5973 * ops generated by the SCSI mid-layer
5978 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
5980 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5981 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5982 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5983 unsigned long hrrq_flags
;
5985 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
5987 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
5988 scsi_dma_unmap(scsi_cmd
);
5990 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, hrrq_flags
);
5991 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5992 scsi_cmd
->scsi_done(scsi_cmd
);
5993 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, hrrq_flags
);
5995 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, hrrq_flags
);
5996 ipr_erp_start(ioa_cfg
, ipr_cmd
);
5997 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, hrrq_flags
);
6002 * ipr_queuecommand - Queue a mid-layer request
6003 * @shost: scsi host struct
6004 * @scsi_cmd: scsi command struct
6006 * This function queues a request generated by the mid-layer.
6010 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6011 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6013 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6014 struct scsi_cmnd
*scsi_cmd
)
6016 struct ipr_ioa_cfg
*ioa_cfg
;
6017 struct ipr_resource_entry
*res
;
6018 struct ipr_ioarcb
*ioarcb
;
6019 struct ipr_cmnd
*ipr_cmd
;
6020 unsigned long hrrq_flags
, lock_flags
;
6022 struct ipr_hrr_queue
*hrrq
;
6025 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6027 scsi_cmd
->result
= (DID_OK
<< 16);
6028 res
= scsi_cmd
->device
->hostdata
;
6030 if (ipr_is_gata(res
) && res
->sata_port
) {
6031 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6032 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6033 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6037 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6038 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6040 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6042 * We are currently blocking all devices due to a host reset
6043 * We have told the host to stop giving us new requests, but
6044 * ERP ops don't count. FIXME
6046 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
)) {
6047 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6048 return SCSI_MLQUEUE_HOST_BUSY
;
6052 * FIXME - Create scsi_set_host_offline interface
6053 * and the ioa_is_dead check can be removed
6055 if (unlikely(hrrq
->ioa_is_dead
|| !res
)) {
6056 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6060 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6061 if (ipr_cmd
== NULL
) {
6062 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6063 return SCSI_MLQUEUE_HOST_BUSY
;
6065 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6067 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6068 ioarcb
= &ipr_cmd
->ioarcb
;
6070 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6071 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6072 ipr_cmd
->done
= ipr_scsi_eh_done
;
6074 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6075 if (scsi_cmd
->underflow
== 0)
6076 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6078 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6079 if (ipr_is_gscsi(res
))
6080 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6081 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6082 ioarcb
->cmd_pkt
.flags_lo
|= ipr_get_task_attributes(scsi_cmd
);
6085 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6086 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6087 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6091 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6093 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6095 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6096 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6097 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6098 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6100 scsi_dma_unmap(scsi_cmd
);
6101 return SCSI_MLQUEUE_HOST_BUSY
;
6104 if (unlikely(hrrq
->ioa_is_dead
)) {
6105 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6106 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6107 scsi_dma_unmap(scsi_cmd
);
6111 ioarcb
->res_handle
= res
->res_handle
;
6112 if (res
->needs_sync_complete
) {
6113 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6114 res
->needs_sync_complete
= 0;
6116 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6117 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6118 ipr_send_command(ipr_cmd
);
6119 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6123 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6124 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6125 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6126 scsi_cmd
->scsi_done(scsi_cmd
);
6127 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6132 * ipr_ioctl - IOCTL handler
6133 * @sdev: scsi device struct
6138 * 0 on success / other on failure
6140 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
6142 struct ipr_resource_entry
*res
;
6144 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6145 if (res
&& ipr_is_gata(res
)) {
6146 if (cmd
== HDIO_GET_IDENTITY
)
6148 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6155 * ipr_info - Get information about the card/driver
6156 * @scsi_host: scsi host struct
6159 * pointer to buffer with description string
6161 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6163 static char buffer
[512];
6164 struct ipr_ioa_cfg
*ioa_cfg
;
6165 unsigned long lock_flags
= 0;
6167 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6169 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6170 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6171 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6176 static struct scsi_host_template driver_template
= {
6177 .module
= THIS_MODULE
,
6179 .info
= ipr_ioa_info
,
6181 .queuecommand
= ipr_queuecommand
,
6182 .eh_abort_handler
= ipr_eh_abort
,
6183 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6184 .eh_host_reset_handler
= ipr_eh_host_reset
,
6185 .slave_alloc
= ipr_slave_alloc
,
6186 .slave_configure
= ipr_slave_configure
,
6187 .slave_destroy
= ipr_slave_destroy
,
6188 .target_alloc
= ipr_target_alloc
,
6189 .target_destroy
= ipr_target_destroy
,
6190 .change_queue_depth
= ipr_change_queue_depth
,
6191 .change_queue_type
= ipr_change_queue_type
,
6192 .bios_param
= ipr_biosparam
,
6193 .can_queue
= IPR_MAX_COMMANDS
,
6195 .sg_tablesize
= IPR_MAX_SGLIST
,
6196 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6197 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6198 .use_clustering
= ENABLE_CLUSTERING
,
6199 .shost_attrs
= ipr_ioa_attrs
,
6200 .sdev_attrs
= ipr_dev_attrs
,
6201 .proc_name
= IPR_NAME
6205 * ipr_ata_phy_reset - libata phy_reset handler
6206 * @ap: ata port to reset
6209 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6211 unsigned long flags
;
6212 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6213 struct ipr_resource_entry
*res
= sata_port
->res
;
6214 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6218 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6219 while (ioa_cfg
->in_reset_reload
) {
6220 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6221 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6222 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6225 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6228 rc
= ipr_device_reset(ioa_cfg
, res
);
6231 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6235 ap
->link
.device
[0].class = res
->ata_class
;
6236 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6237 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6240 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6245 * ipr_ata_post_internal - Cleanup after an internal command
6246 * @qc: ATA queued command
6251 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6253 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6254 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6255 struct ipr_cmnd
*ipr_cmd
;
6256 struct ipr_hrr_queue
*hrrq
;
6257 unsigned long flags
;
6259 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6260 while (ioa_cfg
->in_reset_reload
) {
6261 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6262 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6263 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6266 for_each_hrrq(hrrq
, ioa_cfg
) {
6267 spin_lock(&hrrq
->_lock
);
6268 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6269 if (ipr_cmd
->qc
== qc
) {
6270 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6274 spin_unlock(&hrrq
->_lock
);
6276 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6280 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6281 * @regs: destination
6282 * @tf: source ATA taskfile
6287 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6288 struct ata_taskfile
*tf
)
6290 regs
->feature
= tf
->feature
;
6291 regs
->nsect
= tf
->nsect
;
6292 regs
->lbal
= tf
->lbal
;
6293 regs
->lbam
= tf
->lbam
;
6294 regs
->lbah
= tf
->lbah
;
6295 regs
->device
= tf
->device
;
6296 regs
->command
= tf
->command
;
6297 regs
->hob_feature
= tf
->hob_feature
;
6298 regs
->hob_nsect
= tf
->hob_nsect
;
6299 regs
->hob_lbal
= tf
->hob_lbal
;
6300 regs
->hob_lbam
= tf
->hob_lbam
;
6301 regs
->hob_lbah
= tf
->hob_lbah
;
6302 regs
->ctl
= tf
->ctl
;
6306 * ipr_sata_done - done function for SATA commands
6307 * @ipr_cmd: ipr command struct
6309 * This function is invoked by the interrupt handler for
6310 * ops generated by the SCSI mid-layer to SATA devices
6315 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6317 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6318 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6319 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6320 struct ipr_resource_entry
*res
= sata_port
->res
;
6321 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6323 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6324 if (ipr_cmd
->ioa_cfg
->sis64
)
6325 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6326 sizeof(struct ipr_ioasa_gata
));
6328 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6329 sizeof(struct ipr_ioasa_gata
));
6330 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6332 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6333 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6335 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6336 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6338 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6339 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6340 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6341 ata_qc_complete(qc
);
6345 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6346 * @ipr_cmd: ipr command struct
6347 * @qc: ATA queued command
6350 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6351 struct ata_queued_cmd
*qc
)
6353 u32 ioadl_flags
= 0;
6354 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6355 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
6356 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6357 int len
= qc
->nbytes
;
6358 struct scatterlist
*sg
;
6360 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6365 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6366 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6367 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6368 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6369 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6371 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6373 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6374 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6375 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
));
6377 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6378 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6379 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6380 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6382 last_ioadl64
= ioadl64
;
6386 if (likely(last_ioadl64
))
6387 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6391 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6392 * @ipr_cmd: ipr command struct
6393 * @qc: ATA queued command
6396 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6397 struct ata_queued_cmd
*qc
)
6399 u32 ioadl_flags
= 0;
6400 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6401 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6402 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6403 int len
= qc
->nbytes
;
6404 struct scatterlist
*sg
;
6410 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6411 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6412 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6413 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6415 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6416 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6417 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6418 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6419 ioarcb
->read_ioadl_len
=
6420 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6423 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6424 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6425 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6431 if (likely(last_ioadl
))
6432 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6436 * ipr_qc_defer - Get a free ipr_cmd
6437 * @qc: queued command
6442 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
6444 struct ata_port
*ap
= qc
->ap
;
6445 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6446 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6447 struct ipr_cmnd
*ipr_cmd
;
6448 struct ipr_hrr_queue
*hrrq
;
6451 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6452 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6454 qc
->lldd_task
= NULL
;
6455 spin_lock(&hrrq
->_lock
);
6456 if (unlikely(hrrq
->ioa_is_dead
)) {
6457 spin_unlock(&hrrq
->_lock
);
6461 if (unlikely(!hrrq
->allow_cmds
)) {
6462 spin_unlock(&hrrq
->_lock
);
6463 return ATA_DEFER_LINK
;
6466 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6467 if (ipr_cmd
== NULL
) {
6468 spin_unlock(&hrrq
->_lock
);
6469 return ATA_DEFER_LINK
;
6472 qc
->lldd_task
= ipr_cmd
;
6473 spin_unlock(&hrrq
->_lock
);
6478 * ipr_qc_issue - Issue a SATA qc to a device
6479 * @qc: queued command
6484 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
6486 struct ata_port
*ap
= qc
->ap
;
6487 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6488 struct ipr_resource_entry
*res
= sata_port
->res
;
6489 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6490 struct ipr_cmnd
*ipr_cmd
;
6491 struct ipr_ioarcb
*ioarcb
;
6492 struct ipr_ioarcb_ata_regs
*regs
;
6494 if (qc
->lldd_task
== NULL
)
6497 ipr_cmd
= qc
->lldd_task
;
6498 if (ipr_cmd
== NULL
)
6499 return AC_ERR_SYSTEM
;
6501 qc
->lldd_task
= NULL
;
6502 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6503 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
6504 ipr_cmd
->hrrq
->ioa_is_dead
)) {
6505 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6506 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6507 return AC_ERR_SYSTEM
;
6510 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
6511 ioarcb
= &ipr_cmd
->ioarcb
;
6513 if (ioa_cfg
->sis64
) {
6514 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
6515 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
6517 regs
= &ioarcb
->u
.add_data
.u
.regs
;
6519 memset(regs
, 0, sizeof(*regs
));
6520 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
6522 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
6524 ipr_cmd
->done
= ipr_sata_done
;
6525 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
6526 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
6527 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6528 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6529 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
6532 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
6534 ipr_build_ata_ioadl(ipr_cmd
, qc
);
6536 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
6537 ipr_copy_sata_tf(regs
, &qc
->tf
);
6538 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
6539 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6541 switch (qc
->tf
.protocol
) {
6542 case ATA_PROT_NODATA
:
6547 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6550 case ATAPI_PROT_PIO
:
6551 case ATAPI_PROT_NODATA
:
6552 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6555 case ATAPI_PROT_DMA
:
6556 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6557 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6562 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6563 return AC_ERR_INVALID
;
6566 ipr_send_command(ipr_cmd
);
6567 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6573 * ipr_qc_fill_rtf - Read result TF
6574 * @qc: ATA queued command
6579 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
6581 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6582 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
6583 struct ata_taskfile
*tf
= &qc
->result_tf
;
6585 tf
->feature
= g
->error
;
6586 tf
->nsect
= g
->nsect
;
6590 tf
->device
= g
->device
;
6591 tf
->command
= g
->status
;
6592 tf
->hob_nsect
= g
->hob_nsect
;
6593 tf
->hob_lbal
= g
->hob_lbal
;
6594 tf
->hob_lbam
= g
->hob_lbam
;
6595 tf
->hob_lbah
= g
->hob_lbah
;
6596 tf
->ctl
= g
->alt_status
;
6601 static struct ata_port_operations ipr_sata_ops
= {
6602 .phy_reset
= ipr_ata_phy_reset
,
6603 .hardreset
= ipr_sata_reset
,
6604 .post_internal_cmd
= ipr_ata_post_internal
,
6605 .qc_prep
= ata_noop_qc_prep
,
6606 .qc_defer
= ipr_qc_defer
,
6607 .qc_issue
= ipr_qc_issue
,
6608 .qc_fill_rtf
= ipr_qc_fill_rtf
,
6609 .port_start
= ata_sas_port_start
,
6610 .port_stop
= ata_sas_port_stop
6613 static struct ata_port_info sata_port_info
= {
6614 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
,
6615 .pio_mask
= ATA_PIO4_ONLY
,
6616 .mwdma_mask
= ATA_MWDMA2
,
6617 .udma_mask
= ATA_UDMA6
,
6618 .port_ops
= &ipr_sata_ops
6621 #ifdef CONFIG_PPC_PSERIES
6622 static const u16 ipr_blocked_processors
[] = {
6634 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6635 * @ioa_cfg: ioa cfg struct
6637 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6638 * certain pSeries hardware. This function determines if the given
6639 * adapter is in one of these confgurations or not.
6642 * 1 if adapter is not supported / 0 if adapter is supported
6644 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
6648 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
6649 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
6650 if (pvr_version_is(ipr_blocked_processors
[i
]))
6657 #define ipr_invalid_adapter(ioa_cfg) 0
6661 * ipr_ioa_bringdown_done - IOA bring down completion.
6662 * @ipr_cmd: ipr command struct
6664 * This function processes the completion of an adapter bring down.
6665 * It wakes any reset sleepers.
6670 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
6672 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6675 ioa_cfg
->in_reset_reload
= 0;
6676 ioa_cfg
->reset_retries
= 0;
6677 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6678 wake_up_all(&ioa_cfg
->reset_wait_q
);
6680 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
6681 scsi_unblock_requests(ioa_cfg
->host
);
6682 spin_lock_irq(ioa_cfg
->host
->host_lock
);
6685 return IPR_RC_JOB_RETURN
;
6689 * ipr_ioa_reset_done - IOA reset completion.
6690 * @ipr_cmd: ipr command struct
6692 * This function processes the completion of an adapter reset.
6693 * It schedules any necessary mid-layer add/removes and
6694 * wakes any reset sleepers.
6699 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
6701 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6702 struct ipr_resource_entry
*res
;
6703 struct ipr_hostrcb
*hostrcb
, *temp
;
6707 ioa_cfg
->in_reset_reload
= 0;
6708 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
6709 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
6710 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
6711 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
6714 ioa_cfg
->reset_cmd
= NULL
;
6715 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
6717 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
6718 if (ioa_cfg
->allow_ml_add_del
&& (res
->add_to_ml
|| res
->del_from_ml
)) {
6723 schedule_work(&ioa_cfg
->work_q
);
6725 list_for_each_entry_safe(hostrcb
, temp
, &ioa_cfg
->hostrcb_free_q
, queue
) {
6726 list_del(&hostrcb
->queue
);
6727 if (i
++ < IPR_NUM_LOG_HCAMS
)
6728 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
6730 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
6733 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
6734 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
6736 ioa_cfg
->reset_retries
= 0;
6737 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6738 wake_up_all(&ioa_cfg
->reset_wait_q
);
6740 spin_unlock(ioa_cfg
->host
->host_lock
);
6741 scsi_unblock_requests(ioa_cfg
->host
);
6742 spin_lock(ioa_cfg
->host
->host_lock
);
6744 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6745 scsi_block_requests(ioa_cfg
->host
);
6748 return IPR_RC_JOB_RETURN
;
6752 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6753 * @supported_dev: supported device struct
6754 * @vpids: vendor product id struct
6759 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
6760 struct ipr_std_inq_vpids
*vpids
)
6762 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
6763 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
6764 supported_dev
->num_records
= 1;
6765 supported_dev
->data_length
=
6766 cpu_to_be16(sizeof(struct ipr_supported_device
));
6767 supported_dev
->reserved
= 0;
6771 * ipr_set_supported_devs - Send Set Supported Devices for a device
6772 * @ipr_cmd: ipr command struct
6774 * This function sends a Set Supported Devices to the adapter
6777 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6779 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
6781 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6782 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
6783 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6784 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
6786 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
6788 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
6789 if (!ipr_is_scsi_disk(res
))
6792 ipr_cmd
->u
.res
= res
;
6793 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
6795 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
6796 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6797 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6799 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
6800 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
6801 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
6802 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
6804 ipr_init_ioadl(ipr_cmd
,
6805 ioa_cfg
->vpd_cbs_dma
+
6806 offsetof(struct ipr_misc_cbs
, supp_dev
),
6807 sizeof(struct ipr_supported_device
),
6808 IPR_IOADL_FLAGS_WRITE_LAST
);
6810 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
6811 IPR_SET_SUP_DEVICE_TIMEOUT
);
6813 if (!ioa_cfg
->sis64
)
6814 ipr_cmd
->job_step
= ipr_set_supported_devs
;
6816 return IPR_RC_JOB_RETURN
;
6820 return IPR_RC_JOB_CONTINUE
;
6824 * ipr_get_mode_page - Locate specified mode page
6825 * @mode_pages: mode page buffer
6826 * @page_code: page code to find
6827 * @len: minimum required length for mode page
6830 * pointer to mode page / NULL on failure
6832 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
6833 u32 page_code
, u32 len
)
6835 struct ipr_mode_page_hdr
*mode_hdr
;
6839 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
6842 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
6843 mode_hdr
= (struct ipr_mode_page_hdr
*)
6844 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
6847 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
6848 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
6852 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
6853 mode_hdr
->page_length
);
6854 length
-= page_length
;
6855 mode_hdr
= (struct ipr_mode_page_hdr
*)
6856 ((unsigned long)mode_hdr
+ page_length
);
6863 * ipr_check_term_power - Check for term power errors
6864 * @ioa_cfg: ioa config struct
6865 * @mode_pages: IOAFP mode pages buffer
6867 * Check the IOAFP's mode page 28 for term power errors
6872 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
6873 struct ipr_mode_pages
*mode_pages
)
6877 struct ipr_dev_bus_entry
*bus
;
6878 struct ipr_mode_page28
*mode_page
;
6880 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
6881 sizeof(struct ipr_mode_page28
));
6883 entry_length
= mode_page
->entry_length
;
6885 bus
= mode_page
->bus
;
6887 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
6888 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
6889 dev_err(&ioa_cfg
->pdev
->dev
,
6890 "Term power is absent on scsi bus %d\n",
6894 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
6899 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6900 * @ioa_cfg: ioa config struct
6902 * Looks through the config table checking for SES devices. If
6903 * the SES device is in the SES table indicating a maximum SCSI
6904 * bus speed, the speed is limited for the bus.
6909 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
6914 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
6915 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
6916 ioa_cfg
->bus_attr
[i
].bus_width
);
6918 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
6919 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
6924 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6925 * @ioa_cfg: ioa config struct
6926 * @mode_pages: mode page 28 buffer
6928 * Updates mode page 28 based on driver configuration
6933 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
6934 struct ipr_mode_pages
*mode_pages
)
6936 int i
, entry_length
;
6937 struct ipr_dev_bus_entry
*bus
;
6938 struct ipr_bus_attributes
*bus_attr
;
6939 struct ipr_mode_page28
*mode_page
;
6941 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
6942 sizeof(struct ipr_mode_page28
));
6944 entry_length
= mode_page
->entry_length
;
6946 /* Loop for each device bus entry */
6947 for (i
= 0, bus
= mode_page
->bus
;
6948 i
< mode_page
->num_entries
;
6949 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
6950 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
6951 dev_err(&ioa_cfg
->pdev
->dev
,
6952 "Invalid resource address reported: 0x%08X\n",
6953 IPR_GET_PHYS_LOC(bus
->res_addr
));
6957 bus_attr
= &ioa_cfg
->bus_attr
[i
];
6958 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
6959 bus
->bus_width
= bus_attr
->bus_width
;
6960 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
6961 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
6962 if (bus_attr
->qas_enabled
)
6963 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
6965 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
6970 * ipr_build_mode_select - Build a mode select command
6971 * @ipr_cmd: ipr command struct
6972 * @res_handle: resource handle to send command to
6973 * @parm: Byte 2 of Mode Sense command
6974 * @dma_addr: DMA buffer address
6975 * @xfer_len: data transfer length
6980 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
6981 __be32 res_handle
, u8 parm
,
6982 dma_addr_t dma_addr
, u8 xfer_len
)
6984 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6986 ioarcb
->res_handle
= res_handle
;
6987 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
6988 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6989 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
6990 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
6991 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
6993 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
6997 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6998 * @ipr_cmd: ipr command struct
7000 * This function sets up the SCSI bus attributes and sends
7001 * a Mode Select for Page 28 to activate them.
7006 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7008 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7009 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7013 ipr_scsi_bus_speed_limit(ioa_cfg
);
7014 ipr_check_term_power(ioa_cfg
, mode_pages
);
7015 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7016 length
= mode_pages
->hdr
.length
+ 1;
7017 mode_pages
->hdr
.length
= 0;
7019 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7020 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7023 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7024 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7025 struct ipr_resource_entry
, queue
);
7026 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7029 return IPR_RC_JOB_RETURN
;
7033 * ipr_build_mode_sense - Builds a mode sense command
7034 * @ipr_cmd: ipr command struct
7035 * @res: resource entry struct
7036 * @parm: Byte 2 of mode sense command
7037 * @dma_addr: DMA address of mode sense buffer
7038 * @xfer_len: Size of DMA buffer
7043 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7045 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7047 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7049 ioarcb
->res_handle
= res_handle
;
7050 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7051 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7052 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7053 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7055 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7059 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7060 * @ipr_cmd: ipr command struct
7062 * This function handles the failure of an IOA bringup command.
7067 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7069 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7070 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7072 dev_err(&ioa_cfg
->pdev
->dev
,
7073 "0x%02X failed with IOASC: 0x%08X\n",
7074 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7076 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7077 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7078 return IPR_RC_JOB_RETURN
;
7082 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7083 * @ipr_cmd: ipr command struct
7085 * This function handles the failure of a Mode Sense to the IOAFP.
7086 * Some adapters do not handle all mode pages.
7089 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7091 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7093 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7094 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7096 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7097 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7098 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7099 struct ipr_resource_entry
, queue
);
7100 return IPR_RC_JOB_CONTINUE
;
7103 return ipr_reset_cmd_failed(ipr_cmd
);
7107 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7108 * @ipr_cmd: ipr command struct
7110 * This function send a Page 28 mode sense to the IOA to
7111 * retrieve SCSI bus attributes.
7116 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7118 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7121 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7122 0x28, ioa_cfg
->vpd_cbs_dma
+
7123 offsetof(struct ipr_misc_cbs
, mode_pages
),
7124 sizeof(struct ipr_mode_pages
));
7126 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7127 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7129 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7132 return IPR_RC_JOB_RETURN
;
7136 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7137 * @ipr_cmd: ipr command struct
7139 * This function enables dual IOA RAID support if possible.
7144 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7146 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7147 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7148 struct ipr_mode_page24
*mode_page
;
7152 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7153 sizeof(struct ipr_mode_page24
));
7156 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7158 length
= mode_pages
->hdr
.length
+ 1;
7159 mode_pages
->hdr
.length
= 0;
7161 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7162 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7165 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7166 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7169 return IPR_RC_JOB_RETURN
;
7173 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7174 * @ipr_cmd: ipr command struct
7176 * This function handles the failure of a Mode Sense to the IOAFP.
7177 * Some adapters do not handle all mode pages.
7180 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7182 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7184 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7186 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7187 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7188 return IPR_RC_JOB_CONTINUE
;
7191 return ipr_reset_cmd_failed(ipr_cmd
);
7195 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7196 * @ipr_cmd: ipr command struct
7198 * This function send a mode sense to the IOA to retrieve
7199 * the IOA Advanced Function Control mode page.
7204 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7206 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7209 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7210 0x24, ioa_cfg
->vpd_cbs_dma
+
7211 offsetof(struct ipr_misc_cbs
, mode_pages
),
7212 sizeof(struct ipr_mode_pages
));
7214 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7215 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7217 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7220 return IPR_RC_JOB_RETURN
;
7224 * ipr_init_res_table - Initialize the resource table
7225 * @ipr_cmd: ipr command struct
7227 * This function looks through the existing resource table, comparing
7228 * it with the config table. This function will take care of old/new
7229 * devices and schedule adding/removing them from the mid-layer
7233 * IPR_RC_JOB_CONTINUE
7235 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7237 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7238 struct ipr_resource_entry
*res
, *temp
;
7239 struct ipr_config_table_entry_wrapper cfgtew
;
7240 int entries
, found
, flag
, i
;
7245 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7247 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7249 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7250 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7252 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7253 list_move_tail(&res
->queue
, &old_res
);
7256 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7258 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7260 for (i
= 0; i
< entries
; i
++) {
7262 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7264 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7267 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7268 if (ipr_is_same_device(res
, &cfgtew
)) {
7269 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7276 if (list_empty(&ioa_cfg
->free_res_q
)) {
7277 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7282 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7283 struct ipr_resource_entry
, queue
);
7284 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7285 ipr_init_res_entry(res
, &cfgtew
);
7287 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7288 res
->sdev
->allow_restart
= 1;
7291 ipr_update_res_entry(res
, &cfgtew
);
7294 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7296 res
->del_from_ml
= 1;
7297 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7298 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7302 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7303 ipr_clear_res_target(res
);
7304 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7307 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7308 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7310 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7313 return IPR_RC_JOB_CONTINUE
;
7317 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7318 * @ipr_cmd: ipr command struct
7320 * This function sends a Query IOA Configuration command
7321 * to the adapter to retrieve the IOA configuration table.
7326 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7328 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7329 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7330 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7331 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7334 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7335 ioa_cfg
->dual_raid
= 1;
7336 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7337 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7338 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7339 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7340 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7342 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7343 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7344 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7345 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7347 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7348 IPR_IOADL_FLAGS_READ_LAST
);
7350 ipr_cmd
->job_step
= ipr_init_res_table
;
7352 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7355 return IPR_RC_JOB_RETURN
;
7359 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7360 * @ipr_cmd: ipr command struct
7362 * This utility function sends an inquiry to the adapter.
7367 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7368 dma_addr_t dma_addr
, u8 xfer_len
)
7370 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7373 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7374 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7376 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7377 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7378 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7379 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7381 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7383 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7388 * ipr_inquiry_page_supported - Is the given inquiry page supported
7389 * @page0: inquiry page 0 buffer
7392 * This function determines if the specified inquiry page is supported.
7395 * 1 if page is supported / 0 if not
7397 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
7401 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
7402 if (page0
->page
[i
] == page
)
7409 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7410 * @ipr_cmd: ipr command struct
7412 * This function sends a Page 0xD0 inquiry to the adapter
7413 * to retrieve adapter capabilities.
7416 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7418 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
7420 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7421 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
7422 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7425 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7426 memset(cap
, 0, sizeof(*cap
));
7428 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
7429 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
7430 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
7431 sizeof(struct ipr_inquiry_cap
));
7432 return IPR_RC_JOB_RETURN
;
7436 return IPR_RC_JOB_CONTINUE
;
7440 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7441 * @ipr_cmd: ipr command struct
7443 * This function sends a Page 3 inquiry to the adapter
7444 * to retrieve software VPD information.
7447 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7449 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
7451 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7455 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
7457 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
7458 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
7459 sizeof(struct ipr_inquiry_page3
));
7462 return IPR_RC_JOB_RETURN
;
7466 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7467 * @ipr_cmd: ipr command struct
7469 * This function sends a Page 0 inquiry to the adapter
7470 * to retrieve supported inquiry pages.
7473 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7475 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
7477 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7482 /* Grab the type out of the VPD and store it away */
7483 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
7485 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
7487 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
7489 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
7490 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
7491 sizeof(struct ipr_inquiry_page0
));
7494 return IPR_RC_JOB_RETURN
;
7498 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7499 * @ipr_cmd: ipr command struct
7501 * This function sends a standard inquiry to the adapter.
7506 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
7508 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7511 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
7513 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
7514 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
7515 sizeof(struct ipr_ioa_vpd
));
7518 return IPR_RC_JOB_RETURN
;
7522 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7523 * @ipr_cmd: ipr command struct
7525 * This function send an Identify Host Request Response Queue
7526 * command to establish the HRRQ with the adapter.
7531 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
7533 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7534 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7535 struct ipr_hrr_queue
*hrrq
;
7538 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
7539 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
7541 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
7542 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
7544 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
7545 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7547 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7549 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
7551 if (ioa_cfg
->nvectors
== 1)
7552 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
7554 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
7556 ioarcb
->cmd_pkt
.cdb
[2] =
7557 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
7558 ioarcb
->cmd_pkt
.cdb
[3] =
7559 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
7560 ioarcb
->cmd_pkt
.cdb
[4] =
7561 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
7562 ioarcb
->cmd_pkt
.cdb
[5] =
7563 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
7564 ioarcb
->cmd_pkt
.cdb
[7] =
7565 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
7566 ioarcb
->cmd_pkt
.cdb
[8] =
7567 (sizeof(u32
) * hrrq
->size
) & 0xff;
7569 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
7570 ioarcb
->cmd_pkt
.cdb
[9] =
7571 ioa_cfg
->identify_hrrq_index
;
7573 if (ioa_cfg
->sis64
) {
7574 ioarcb
->cmd_pkt
.cdb
[10] =
7575 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
7576 ioarcb
->cmd_pkt
.cdb
[11] =
7577 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
7578 ioarcb
->cmd_pkt
.cdb
[12] =
7579 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
7580 ioarcb
->cmd_pkt
.cdb
[13] =
7581 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
7584 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
7585 ioarcb
->cmd_pkt
.cdb
[14] =
7586 ioa_cfg
->identify_hrrq_index
;
7588 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7589 IPR_INTERNAL_TIMEOUT
);
7591 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
7592 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7595 return IPR_RC_JOB_RETURN
;
7599 return IPR_RC_JOB_CONTINUE
;
7603 * ipr_reset_timer_done - Adapter reset timer function
7604 * @ipr_cmd: ipr command struct
7606 * Description: This function is used in adapter reset processing
7607 * for timing events. If the reset_cmd pointer in the IOA
7608 * config struct is not this adapter's we are doing nested
7609 * resets and fail_all_ops will take care of freeing the
7615 static void ipr_reset_timer_done(struct ipr_cmnd
*ipr_cmd
)
7617 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7618 unsigned long lock_flags
= 0;
7620 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
7622 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
7623 list_del(&ipr_cmd
->queue
);
7624 ipr_cmd
->done(ipr_cmd
);
7627 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
7631 * ipr_reset_start_timer - Start a timer for adapter reset job
7632 * @ipr_cmd: ipr command struct
7633 * @timeout: timeout value
7635 * Description: This function is used in adapter reset processing
7636 * for timing events. If the reset_cmd pointer in the IOA
7637 * config struct is not this adapter's we are doing nested
7638 * resets and fail_all_ops will take care of freeing the
7644 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
7645 unsigned long timeout
)
7649 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7650 ipr_cmd
->done
= ipr_reset_ioa_job
;
7652 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7653 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
7654 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_reset_timer_done
;
7655 add_timer(&ipr_cmd
->timer
);
7659 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7660 * @ioa_cfg: ioa cfg struct
7665 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
7667 struct ipr_hrr_queue
*hrrq
;
7669 for_each_hrrq(hrrq
, ioa_cfg
) {
7670 spin_lock(&hrrq
->_lock
);
7671 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
7673 /* Initialize Host RRQ pointers */
7674 hrrq
->hrrq_start
= hrrq
->host_rrq
;
7675 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
7676 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
7677 hrrq
->toggle_bit
= 1;
7678 spin_unlock(&hrrq
->_lock
);
7682 ioa_cfg
->identify_hrrq_index
= 0;
7683 if (ioa_cfg
->hrrq_num
== 1)
7684 atomic_set(&ioa_cfg
->hrrq_index
, 0);
7686 atomic_set(&ioa_cfg
->hrrq_index
, 1);
7688 /* Zero out config table */
7689 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
7693 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7694 * @ipr_cmd: ipr command struct
7697 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7699 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
7701 unsigned long stage
, stage_time
;
7703 volatile u32 int_reg
;
7704 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7707 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
7708 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
7709 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
7711 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
7713 /* sanity check the stage_time value */
7714 if (stage_time
== 0)
7715 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
7716 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
7717 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
7718 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
7719 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
7721 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
7722 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
7723 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7724 stage_time
= ioa_cfg
->transop_timeout
;
7725 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7726 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
7727 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
7728 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
7729 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7730 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
7731 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
7732 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
7733 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7734 return IPR_RC_JOB_CONTINUE
;
7738 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7739 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
7740 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
7741 ipr_cmd
->done
= ipr_reset_ioa_job
;
7742 add_timer(&ipr_cmd
->timer
);
7744 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7746 return IPR_RC_JOB_RETURN
;
7750 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7751 * @ipr_cmd: ipr command struct
7753 * This function reinitializes some control blocks and
7754 * enables destructive diagnostics on the adapter.
7759 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
7761 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7762 volatile u32 int_reg
;
7763 volatile u64 maskval
;
7767 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7768 ipr_init_ioa_mem(ioa_cfg
);
7770 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
7771 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
7772 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
7773 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
7776 if (ioa_cfg
->sis64
) {
7777 /* Set the adapter to the correct endian mode. */
7778 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
7779 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
7782 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
7784 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
7785 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
7786 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
7787 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7788 return IPR_RC_JOB_CONTINUE
;
7791 /* Enable destructive diagnostics on IOA */
7792 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
7794 if (ioa_cfg
->sis64
) {
7795 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
7796 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
7797 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
7799 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
7801 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7803 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
7805 if (ioa_cfg
->sis64
) {
7806 ipr_cmd
->job_step
= ipr_reset_next_stage
;
7807 return IPR_RC_JOB_CONTINUE
;
7810 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7811 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
7812 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
7813 ipr_cmd
->done
= ipr_reset_ioa_job
;
7814 add_timer(&ipr_cmd
->timer
);
7815 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7818 return IPR_RC_JOB_RETURN
;
7822 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7823 * @ipr_cmd: ipr command struct
7825 * This function is invoked when an adapter dump has run out
7826 * of processing time.
7829 * IPR_RC_JOB_CONTINUE
7831 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
7833 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7835 if (ioa_cfg
->sdt_state
== GET_DUMP
)
7836 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
7837 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
7838 ioa_cfg
->sdt_state
= ABORT_DUMP
;
7840 ioa_cfg
->dump_timeout
= 1;
7841 ipr_cmd
->job_step
= ipr_reset_alert
;
7843 return IPR_RC_JOB_CONTINUE
;
7847 * ipr_unit_check_no_data - Log a unit check/no data error log
7848 * @ioa_cfg: ioa config struct
7850 * Logs an error indicating the adapter unit checked, but for some
7851 * reason, we were unable to fetch the unit check buffer.
7856 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
7858 ioa_cfg
->errors_logged
++;
7859 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
7863 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7864 * @ioa_cfg: ioa config struct
7866 * Fetches the unit check buffer from the adapter by clocking the data
7867 * through the mailbox register.
7872 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
7874 unsigned long mailbox
;
7875 struct ipr_hostrcb
*hostrcb
;
7876 struct ipr_uc_sdt sdt
;
7880 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
7882 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
7883 ipr_unit_check_no_data(ioa_cfg
);
7887 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
7888 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
7889 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
7891 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
7892 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
7893 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
7894 ipr_unit_check_no_data(ioa_cfg
);
7898 /* Find length of the first sdt entry (UC buffer) */
7899 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
7900 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
7902 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
7903 be32_to_cpu(sdt
.entry
[0].start_token
)) &
7904 IPR_FMT2_MBX_ADDR_MASK
;
7906 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
7907 struct ipr_hostrcb
, queue
);
7908 list_del(&hostrcb
->queue
);
7909 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
7911 rc
= ipr_get_ldump_data_section(ioa_cfg
,
7912 be32_to_cpu(sdt
.entry
[0].start_token
),
7913 (__be32
*)&hostrcb
->hcam
,
7914 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
7917 ipr_handle_log_data(ioa_cfg
, hostrcb
);
7918 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
7919 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
7920 ioa_cfg
->sdt_state
== GET_DUMP
)
7921 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
7923 ipr_unit_check_no_data(ioa_cfg
);
7925 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
7929 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7930 * @ipr_cmd: ipr command struct
7932 * Description: This function will call to get the unit check buffer.
7937 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
7939 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7942 ioa_cfg
->ioa_unit_checked
= 0;
7943 ipr_get_unit_check_buffer(ioa_cfg
);
7944 ipr_cmd
->job_step
= ipr_reset_alert
;
7945 ipr_reset_start_timer(ipr_cmd
, 0);
7948 return IPR_RC_JOB_RETURN
;
7952 * ipr_reset_restore_cfg_space - Restore PCI config space.
7953 * @ipr_cmd: ipr command struct
7955 * Description: This function restores the saved PCI config space of
7956 * the adapter, fails all outstanding ops back to the callers, and
7957 * fetches the dump/unit check if applicable to this reset.
7960 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7962 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
7964 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7968 ioa_cfg
->pdev
->state_saved
= true;
7969 pci_restore_state(ioa_cfg
->pdev
);
7971 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
7972 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
7973 return IPR_RC_JOB_CONTINUE
;
7976 ipr_fail_all_ops(ioa_cfg
);
7978 if (ioa_cfg
->sis64
) {
7979 /* Set the adapter to the correct endian mode. */
7980 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
7981 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
7984 if (ioa_cfg
->ioa_unit_checked
) {
7985 if (ioa_cfg
->sis64
) {
7986 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
7987 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
7988 return IPR_RC_JOB_RETURN
;
7990 ioa_cfg
->ioa_unit_checked
= 0;
7991 ipr_get_unit_check_buffer(ioa_cfg
);
7992 ipr_cmd
->job_step
= ipr_reset_alert
;
7993 ipr_reset_start_timer(ipr_cmd
, 0);
7994 return IPR_RC_JOB_RETURN
;
7998 if (ioa_cfg
->in_ioa_bringdown
) {
7999 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8001 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8003 if (GET_DUMP
== ioa_cfg
->sdt_state
) {
8004 ioa_cfg
->sdt_state
= READ_DUMP
;
8005 ioa_cfg
->dump_timeout
= 0;
8007 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8009 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8010 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8011 schedule_work(&ioa_cfg
->work_q
);
8012 return IPR_RC_JOB_RETURN
;
8017 return IPR_RC_JOB_CONTINUE
;
8021 * ipr_reset_bist_done - BIST has completed on the adapter.
8022 * @ipr_cmd: ipr command struct
8024 * Description: Unblock config space and resume the reset process.
8027 * IPR_RC_JOB_CONTINUE
8029 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8031 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8034 if (ioa_cfg
->cfg_locked
)
8035 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8036 ioa_cfg
->cfg_locked
= 0;
8037 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8039 return IPR_RC_JOB_CONTINUE
;
8043 * ipr_reset_start_bist - Run BIST on the adapter.
8044 * @ipr_cmd: ipr command struct
8046 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8049 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8051 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8053 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8054 int rc
= PCIBIOS_SUCCESSFUL
;
8057 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8058 writel(IPR_UPROCI_SIS64_START_BIST
,
8059 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8061 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8063 if (rc
== PCIBIOS_SUCCESSFUL
) {
8064 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8065 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8066 rc
= IPR_RC_JOB_RETURN
;
8068 if (ioa_cfg
->cfg_locked
)
8069 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8070 ioa_cfg
->cfg_locked
= 0;
8071 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8072 rc
= IPR_RC_JOB_CONTINUE
;
8080 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8081 * @ipr_cmd: ipr command struct
8083 * Description: This clears PCI reset to the adapter and delays two seconds.
8088 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8091 pci_set_pcie_reset_state(ipr_cmd
->ioa_cfg
->pdev
, pcie_deassert_reset
);
8092 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8093 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8095 return IPR_RC_JOB_RETURN
;
8099 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8100 * @ipr_cmd: ipr command struct
8102 * Description: This asserts PCI reset to the adapter.
8107 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8109 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8110 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8113 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8114 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8115 ipr_reset_start_timer(ipr_cmd
, IPR_PCI_RESET_TIMEOUT
);
8117 return IPR_RC_JOB_RETURN
;
8121 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8122 * @ipr_cmd: ipr command struct
8124 * Description: This attempts to block config access to the IOA.
8127 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8129 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8131 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8132 int rc
= IPR_RC_JOB_CONTINUE
;
8134 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8135 ioa_cfg
->cfg_locked
= 1;
8136 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8138 if (ipr_cmd
->u
.time_left
) {
8139 rc
= IPR_RC_JOB_RETURN
;
8140 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8141 ipr_reset_start_timer(ipr_cmd
,
8142 IPR_CHECK_FOR_RESET_TIMEOUT
);
8144 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8145 dev_err(&ioa_cfg
->pdev
->dev
,
8146 "Timed out waiting to lock config access. Resetting anyway.\n");
8154 * ipr_reset_block_config_access - Block config access to the IOA
8155 * @ipr_cmd: ipr command struct
8157 * Description: This attempts to block config access to the IOA
8160 * IPR_RC_JOB_CONTINUE
8162 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8164 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8165 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8166 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8167 return IPR_RC_JOB_CONTINUE
;
8171 * ipr_reset_allowed - Query whether or not IOA can be reset
8172 * @ioa_cfg: ioa config struct
8175 * 0 if reset not allowed / non-zero if reset is allowed
8177 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8179 volatile u32 temp_reg
;
8181 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8182 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8186 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8187 * @ipr_cmd: ipr command struct
8189 * Description: This function waits for adapter permission to run BIST,
8190 * then runs BIST. If the adapter does not give permission after a
8191 * reasonable time, we will reset the adapter anyway. The impact of
8192 * resetting the adapter without warning the adapter is the risk of
8193 * losing the persistent error log on the adapter. If the adapter is
8194 * reset while it is writing to the flash on the adapter, the flash
8195 * segment will have bad ECC and be zeroed.
8198 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8200 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8202 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8203 int rc
= IPR_RC_JOB_RETURN
;
8205 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8206 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8207 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8209 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8210 rc
= IPR_RC_JOB_CONTINUE
;
8217 * ipr_reset_alert - Alert the adapter of a pending reset
8218 * @ipr_cmd: ipr command struct
8220 * Description: This function alerts the adapter that it will be reset.
8221 * If memory space is not currently enabled, proceed directly
8222 * to running BIST on the adapter. The timer must always be started
8223 * so we guarantee we do not run BIST from ipr_isr.
8228 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8230 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8235 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8237 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8238 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8239 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8240 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8242 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8245 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8246 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8249 return IPR_RC_JOB_RETURN
;
8253 * ipr_reset_ucode_download_done - Microcode download completion
8254 * @ipr_cmd: ipr command struct
8256 * Description: This function unmaps the microcode download buffer.
8259 * IPR_RC_JOB_CONTINUE
8261 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
8263 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8264 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8266 pci_unmap_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
8267 sglist
->num_sg
, DMA_TO_DEVICE
);
8269 ipr_cmd
->job_step
= ipr_reset_alert
;
8270 return IPR_RC_JOB_CONTINUE
;
8274 * ipr_reset_ucode_download - Download microcode to the adapter
8275 * @ipr_cmd: ipr command struct
8277 * Description: This function checks to see if it there is microcode
8278 * to download to the adapter. If there is, a download is performed.
8281 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8283 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
8285 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8286 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8289 ipr_cmd
->job_step
= ipr_reset_alert
;
8292 return IPR_RC_JOB_CONTINUE
;
8294 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8295 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
8296 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
8297 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
8298 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
8299 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
8300 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
8303 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
8305 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
8306 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
8308 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8309 IPR_WRITE_BUFFER_TIMEOUT
);
8312 return IPR_RC_JOB_RETURN
;
8316 * ipr_reset_shutdown_ioa - Shutdown the adapter
8317 * @ipr_cmd: ipr command struct
8319 * Description: This function issues an adapter shutdown of the
8320 * specified type to the specified adapter as part of the
8321 * adapter reset job.
8324 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8326 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
8328 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8329 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
8330 unsigned long timeout
;
8331 int rc
= IPR_RC_JOB_CONTINUE
;
8334 if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
8335 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
8336 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8337 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8338 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
8339 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
8341 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
8342 timeout
= IPR_SHUTDOWN_TIMEOUT
;
8343 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
8344 timeout
= IPR_INTERNAL_TIMEOUT
;
8345 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
8346 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
8348 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
8350 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
8352 rc
= IPR_RC_JOB_RETURN
;
8353 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
8355 ipr_cmd
->job_step
= ipr_reset_alert
;
8362 * ipr_reset_ioa_job - Adapter reset job
8363 * @ipr_cmd: ipr command struct
8365 * Description: This function is the job router for the adapter reset job.
8370 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
8373 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8376 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
8378 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
8380 * We are doing nested adapter resets and this is
8381 * not the current reset job.
8383 list_add_tail(&ipr_cmd
->queue
,
8384 &ipr_cmd
->hrrq
->hrrq_free_q
);
8388 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
8389 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
8390 if (rc
== IPR_RC_JOB_RETURN
)
8394 ipr_reinit_ipr_cmnd(ipr_cmd
);
8395 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
8396 rc
= ipr_cmd
->job_step(ipr_cmd
);
8397 } while (rc
== IPR_RC_JOB_CONTINUE
);
8401 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8402 * @ioa_cfg: ioa config struct
8403 * @job_step: first job step of reset job
8404 * @shutdown_type: shutdown type
8406 * Description: This function will initiate the reset of the given adapter
8407 * starting at the selected job step.
8408 * If the caller needs to wait on the completion of the reset,
8409 * the caller must sleep on the reset_wait_q.
8414 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8415 int (*job_step
) (struct ipr_cmnd
*),
8416 enum ipr_shutdown_type shutdown_type
)
8418 struct ipr_cmnd
*ipr_cmd
;
8421 ioa_cfg
->in_reset_reload
= 1;
8422 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8423 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8424 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
8425 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8428 scsi_block_requests(ioa_cfg
->host
);
8430 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
8431 ioa_cfg
->reset_cmd
= ipr_cmd
;
8432 ipr_cmd
->job_step
= job_step
;
8433 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
8435 ipr_reset_ioa_job(ipr_cmd
);
8439 * ipr_initiate_ioa_reset - Initiate an adapter reset
8440 * @ioa_cfg: ioa config struct
8441 * @shutdown_type: shutdown type
8443 * Description: This function will initiate the reset of the given adapter.
8444 * If the caller needs to wait on the completion of the reset,
8445 * the caller must sleep on the reset_wait_q.
8450 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8451 enum ipr_shutdown_type shutdown_type
)
8455 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
8458 if (ioa_cfg
->in_reset_reload
) {
8459 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8460 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8461 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8462 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8465 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
8466 dev_err(&ioa_cfg
->pdev
->dev
,
8467 "IOA taken offline - error recovery failed\n");
8469 ioa_cfg
->reset_retries
= 0;
8470 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8471 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8472 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
8473 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8477 if (ioa_cfg
->in_ioa_bringdown
) {
8478 ioa_cfg
->reset_cmd
= NULL
;
8479 ioa_cfg
->in_reset_reload
= 0;
8480 ipr_fail_all_ops(ioa_cfg
);
8481 wake_up_all(&ioa_cfg
->reset_wait_q
);
8483 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
8484 scsi_unblock_requests(ioa_cfg
->host
);
8485 spin_lock_irq(ioa_cfg
->host
->host_lock
);
8488 ioa_cfg
->in_ioa_bringdown
= 1;
8489 shutdown_type
= IPR_SHUTDOWN_NONE
;
8493 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
8498 * ipr_reset_freeze - Hold off all I/O activity
8499 * @ipr_cmd: ipr command struct
8501 * Description: If the PCI slot is frozen, hold off all I/O
8502 * activity; then, as soon as the slot is available again,
8503 * initiate an adapter reset.
8505 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
8507 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8510 /* Disallow new interrupts, avoid loop */
8511 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8512 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8513 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
8514 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8517 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8518 ipr_cmd
->done
= ipr_reset_ioa_job
;
8519 return IPR_RC_JOB_RETURN
;
8523 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8524 * @pdev: PCI device struct
8526 * Description: This routine is called to tell us that the PCI bus
8527 * is down. Can't do anything here, except put the device driver
8528 * into a holding pattern, waiting for the PCI bus to come back.
8530 static void ipr_pci_frozen(struct pci_dev
*pdev
)
8532 unsigned long flags
= 0;
8533 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8535 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8536 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
8537 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8541 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8542 * @pdev: PCI device struct
8544 * Description: This routine is called by the pci error recovery
8545 * code after the PCI slot has been reset, just before we
8546 * should resume normal operations.
8548 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
8550 unsigned long flags
= 0;
8551 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8553 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8554 if (ioa_cfg
->needs_warm_reset
)
8555 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8557 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
8559 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8560 return PCI_ERS_RESULT_RECOVERED
;
8564 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8565 * @pdev: PCI device struct
8567 * Description: This routine is called when the PCI bus has
8568 * permanently failed.
8570 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
8572 unsigned long flags
= 0;
8573 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8576 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8577 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
8578 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8579 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
;
8580 ioa_cfg
->in_ioa_bringdown
= 1;
8581 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8582 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8583 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
8584 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8587 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8588 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8592 * ipr_pci_error_detected - Called when a PCI error is detected.
8593 * @pdev: PCI device struct
8594 * @state: PCI channel state
8596 * Description: Called when a PCI error is detected.
8599 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8601 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
8602 pci_channel_state_t state
)
8605 case pci_channel_io_frozen
:
8606 ipr_pci_frozen(pdev
);
8607 return PCI_ERS_RESULT_NEED_RESET
;
8608 case pci_channel_io_perm_failure
:
8609 ipr_pci_perm_failure(pdev
);
8610 return PCI_ERS_RESULT_DISCONNECT
;
8615 return PCI_ERS_RESULT_NEED_RESET
;
8619 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8620 * @ioa_cfg: ioa cfg struct
8622 * Description: This is the second phase of adapter intialization
8623 * This function takes care of initilizing the adapter to the point
8624 * where it can accept new commands.
8627 * 0 on success / -EIO on failure
8629 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
8632 unsigned long host_lock_flags
= 0;
8635 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8636 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
8637 if (ioa_cfg
->needs_hard_reset
) {
8638 ioa_cfg
->needs_hard_reset
= 0;
8639 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8641 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
8643 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8644 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
8645 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8647 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
8649 } else if (ipr_invalid_adapter(ioa_cfg
)) {
8653 dev_err(&ioa_cfg
->pdev
->dev
,
8654 "Adapter not supported in this hardware configuration.\n");
8657 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8664 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8665 * @ioa_cfg: ioa config struct
8670 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
8674 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
8675 if (ioa_cfg
->ipr_cmnd_list
[i
])
8676 pci_pool_free(ioa_cfg
->ipr_cmd_pool
,
8677 ioa_cfg
->ipr_cmnd_list
[i
],
8678 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
8680 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
8683 if (ioa_cfg
->ipr_cmd_pool
)
8684 pci_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
8686 kfree(ioa_cfg
->ipr_cmnd_list
);
8687 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
8688 ioa_cfg
->ipr_cmnd_list
= NULL
;
8689 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
8690 ioa_cfg
->ipr_cmd_pool
= NULL
;
8694 * ipr_free_mem - Frees memory allocated for an adapter
8695 * @ioa_cfg: ioa cfg struct
8700 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8704 kfree(ioa_cfg
->res_entries
);
8705 pci_free_consistent(ioa_cfg
->pdev
, sizeof(struct ipr_misc_cbs
),
8706 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
8707 ipr_free_cmd_blks(ioa_cfg
);
8709 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
8710 pci_free_consistent(ioa_cfg
->pdev
,
8711 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
8712 ioa_cfg
->hrrq
[i
].host_rrq
,
8713 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
8715 pci_free_consistent(ioa_cfg
->pdev
, ioa_cfg
->cfg_table_size
,
8716 ioa_cfg
->u
.cfg_table
,
8717 ioa_cfg
->cfg_table_dma
);
8719 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
8720 pci_free_consistent(ioa_cfg
->pdev
,
8721 sizeof(struct ipr_hostrcb
),
8722 ioa_cfg
->hostrcb
[i
],
8723 ioa_cfg
->hostrcb_dma
[i
]);
8726 ipr_free_dump(ioa_cfg
);
8727 kfree(ioa_cfg
->trace
);
8731 * ipr_free_all_resources - Free all allocated resources for an adapter.
8732 * @ipr_cmd: ipr command struct
8734 * This function frees all allocated resources for the
8735 * specified adapter.
8740 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
8742 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8745 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
8746 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
8748 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
8749 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
8752 free_irq(pdev
->irq
, &ioa_cfg
->hrrq
[0]);
8754 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
8755 pci_disable_msi(pdev
);
8756 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
8757 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
8758 pci_disable_msix(pdev
);
8759 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
8762 iounmap(ioa_cfg
->hdw_dma_regs
);
8763 pci_release_regions(pdev
);
8764 ipr_free_mem(ioa_cfg
);
8765 scsi_host_put(ioa_cfg
->host
);
8766 pci_disable_device(pdev
);
8771 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8772 * @ioa_cfg: ioa config struct
8775 * 0 on success / -ENOMEM on allocation failure
8777 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
8779 struct ipr_cmnd
*ipr_cmd
;
8780 struct ipr_ioarcb
*ioarcb
;
8781 dma_addr_t dma_addr
;
8782 int i
, entries_each_hrrq
, hrrq_id
= 0;
8784 ioa_cfg
->ipr_cmd_pool
= pci_pool_create(IPR_NAME
, ioa_cfg
->pdev
,
8785 sizeof(struct ipr_cmnd
), 512, 0);
8787 if (!ioa_cfg
->ipr_cmd_pool
)
8790 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
8791 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
8793 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
8794 ipr_free_cmd_blks(ioa_cfg
);
8798 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8799 if (ioa_cfg
->hrrq_num
> 1) {
8801 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
8802 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
8803 ioa_cfg
->hrrq
[i
].max_cmd_id
=
8804 (entries_each_hrrq
- 1);
8807 IPR_NUM_BASE_CMD_BLKS
/
8808 (ioa_cfg
->hrrq_num
- 1);
8809 ioa_cfg
->hrrq
[i
].min_cmd_id
=
8810 IPR_NUM_INTERNAL_CMD_BLKS
+
8811 (i
- 1) * entries_each_hrrq
;
8812 ioa_cfg
->hrrq
[i
].max_cmd_id
=
8813 (IPR_NUM_INTERNAL_CMD_BLKS
+
8814 i
* entries_each_hrrq
- 1);
8817 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
8818 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
8819 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
8821 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
8824 BUG_ON(ioa_cfg
->hrrq_num
== 0);
8826 i
= IPR_NUM_CMD_BLKS
-
8827 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
8829 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
8830 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
8833 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
8834 ipr_cmd
= pci_pool_alloc(ioa_cfg
->ipr_cmd_pool
, GFP_KERNEL
, &dma_addr
);
8837 ipr_free_cmd_blks(ioa_cfg
);
8841 memset(ipr_cmd
, 0, sizeof(*ipr_cmd
));
8842 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
8843 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
8845 ioarcb
= &ipr_cmd
->ioarcb
;
8846 ipr_cmd
->dma_addr
= dma_addr
;
8848 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
8850 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
8852 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
8853 if (ioa_cfg
->sis64
) {
8854 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
8855 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
8856 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
8857 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
8859 ioarcb
->write_ioadl_addr
=
8860 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
8861 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
8862 ioarcb
->ioasa_host_pci_addr
=
8863 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
8865 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
8866 ipr_cmd
->cmd_index
= i
;
8867 ipr_cmd
->ioa_cfg
= ioa_cfg
;
8868 ipr_cmd
->sense_buffer_dma
= dma_addr
+
8869 offsetof(struct ipr_cmnd
, sense_buffer
);
8871 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
8872 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
8873 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
8874 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
8882 * ipr_alloc_mem - Allocate memory for an adapter
8883 * @ioa_cfg: ioa config struct
8886 * 0 on success / non-zero for error
8888 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8890 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8891 int i
, rc
= -ENOMEM
;
8894 ioa_cfg
->res_entries
= kzalloc(sizeof(struct ipr_resource_entry
) *
8895 ioa_cfg
->max_devs_supported
, GFP_KERNEL
);
8897 if (!ioa_cfg
->res_entries
)
8900 if (ioa_cfg
->sis64
) {
8901 ioa_cfg
->target_ids
= kzalloc(sizeof(unsigned long) *
8902 BITS_TO_LONGS(ioa_cfg
->max_devs_supported
), GFP_KERNEL
);
8903 ioa_cfg
->array_ids
= kzalloc(sizeof(unsigned long) *
8904 BITS_TO_LONGS(ioa_cfg
->max_devs_supported
), GFP_KERNEL
);
8905 ioa_cfg
->vset_ids
= kzalloc(sizeof(unsigned long) *
8906 BITS_TO_LONGS(ioa_cfg
->max_devs_supported
), GFP_KERNEL
);
8908 if (!ioa_cfg
->target_ids
|| !ioa_cfg
->array_ids
8909 || !ioa_cfg
->vset_ids
)
8910 goto out_free_res_entries
;
8913 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
8914 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
8915 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
8918 ioa_cfg
->vpd_cbs
= pci_alloc_consistent(ioa_cfg
->pdev
,
8919 sizeof(struct ipr_misc_cbs
),
8920 &ioa_cfg
->vpd_cbs_dma
);
8922 if (!ioa_cfg
->vpd_cbs
)
8923 goto out_free_res_entries
;
8925 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8926 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
8927 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
8928 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
8930 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
8932 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
8935 if (ipr_alloc_cmd_blks(ioa_cfg
))
8936 goto out_free_vpd_cbs
;
8938 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8939 ioa_cfg
->hrrq
[i
].host_rrq
= pci_alloc_consistent(ioa_cfg
->pdev
,
8940 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
8941 &ioa_cfg
->hrrq
[i
].host_rrq_dma
);
8943 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
8945 pci_free_consistent(pdev
,
8946 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
8947 ioa_cfg
->hrrq
[i
].host_rrq
,
8948 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
8949 goto out_ipr_free_cmd_blocks
;
8951 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
8954 ioa_cfg
->u
.cfg_table
= pci_alloc_consistent(ioa_cfg
->pdev
,
8955 ioa_cfg
->cfg_table_size
,
8956 &ioa_cfg
->cfg_table_dma
);
8958 if (!ioa_cfg
->u
.cfg_table
)
8959 goto out_free_host_rrq
;
8961 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
8962 ioa_cfg
->hostrcb
[i
] = pci_alloc_consistent(ioa_cfg
->pdev
,
8963 sizeof(struct ipr_hostrcb
),
8964 &ioa_cfg
->hostrcb_dma
[i
]);
8966 if (!ioa_cfg
->hostrcb
[i
])
8967 goto out_free_hostrcb_dma
;
8969 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
8970 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
8971 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
8972 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
8975 ioa_cfg
->trace
= kzalloc(sizeof(struct ipr_trace_entry
) *
8976 IPR_NUM_TRACE_ENTRIES
, GFP_KERNEL
);
8978 if (!ioa_cfg
->trace
)
8979 goto out_free_hostrcb_dma
;
8986 out_free_hostrcb_dma
:
8988 pci_free_consistent(pdev
, sizeof(struct ipr_hostrcb
),
8989 ioa_cfg
->hostrcb
[i
],
8990 ioa_cfg
->hostrcb_dma
[i
]);
8992 pci_free_consistent(pdev
, ioa_cfg
->cfg_table_size
,
8993 ioa_cfg
->u
.cfg_table
,
8994 ioa_cfg
->cfg_table_dma
);
8996 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8997 pci_free_consistent(pdev
,
8998 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
8999 ioa_cfg
->hrrq
[i
].host_rrq
,
9000 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9002 out_ipr_free_cmd_blocks
:
9003 ipr_free_cmd_blks(ioa_cfg
);
9005 pci_free_consistent(pdev
, sizeof(struct ipr_misc_cbs
),
9006 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9007 out_free_res_entries
:
9008 kfree(ioa_cfg
->res_entries
);
9009 kfree(ioa_cfg
->target_ids
);
9010 kfree(ioa_cfg
->array_ids
);
9011 kfree(ioa_cfg
->vset_ids
);
9016 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9017 * @ioa_cfg: ioa config struct
9022 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9026 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9027 ioa_cfg
->bus_attr
[i
].bus
= i
;
9028 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9029 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9030 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9031 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9033 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9038 * ipr_init_ioa_cfg - Initialize IOA config struct
9039 * @ioa_cfg: ioa config struct
9040 * @host: scsi host struct
9041 * @pdev: PCI dev struct
9046 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9047 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9049 const struct ipr_interrupt_offsets
*p
;
9050 struct ipr_interrupts
*t
;
9053 ioa_cfg
->host
= host
;
9054 ioa_cfg
->pdev
= pdev
;
9055 ioa_cfg
->log_level
= ipr_log_level
;
9056 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9057 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9058 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9059 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9060 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9061 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9062 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9064 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9065 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9066 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9067 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9068 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9069 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9070 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9071 ioa_cfg
->sdt_state
= INACTIVE
;
9073 ipr_initialize_bus_attr(ioa_cfg
);
9074 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9076 if (ioa_cfg
->sis64
) {
9077 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9078 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9079 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9080 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9082 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9083 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9084 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9085 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9087 host
->max_channel
= IPR_MAX_BUS_TO_SCAN
;
9088 host
->unique_id
= host
->host_no
;
9089 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9090 host
->can_queue
= ioa_cfg
->max_cmds
;
9091 pci_set_drvdata(pdev
, ioa_cfg
);
9093 p
= &ioa_cfg
->chip_cfg
->regs
;
9095 base
= ioa_cfg
->hdw_dma_regs
;
9097 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9098 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9099 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9100 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9101 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9102 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9103 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9104 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9105 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9106 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9107 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9108 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9109 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9110 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9111 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9112 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9114 if (ioa_cfg
->sis64
) {
9115 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9116 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9117 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9118 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9123 * ipr_get_chip_info - Find adapter chip information
9124 * @dev_id: PCI device id struct
9127 * ptr to chip information on success / NULL on failure
9129 static const struct ipr_chip_t
*
9130 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
9134 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
9135 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
9136 ipr_chip
[i
].device
== dev_id
->device
)
9137 return &ipr_chip
[i
];
9141 static int ipr_enable_msix(struct ipr_ioa_cfg
*ioa_cfg
)
9143 struct msix_entry entries
[IPR_MAX_MSIX_VECTORS
];
9144 int i
, err
, vectors
;
9146 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
9147 entries
[i
].entry
= i
;
9149 vectors
= ipr_number_of_msix
;
9151 while ((err
= pci_enable_msix(ioa_cfg
->pdev
, entries
, vectors
)) > 0)
9155 pci_disable_msix(ioa_cfg
->pdev
);
9160 for (i
= 0; i
< vectors
; i
++)
9161 ioa_cfg
->vectors_info
[i
].vec
= entries
[i
].vector
;
9162 ioa_cfg
->nvectors
= vectors
;
9168 static int ipr_enable_msi(struct ipr_ioa_cfg
*ioa_cfg
)
9170 int i
, err
, vectors
;
9172 vectors
= ipr_number_of_msix
;
9174 while ((err
= pci_enable_msi_block(ioa_cfg
->pdev
, vectors
)) > 0)
9178 pci_disable_msi(ioa_cfg
->pdev
);
9183 for (i
= 0; i
< vectors
; i
++)
9184 ioa_cfg
->vectors_info
[i
].vec
= ioa_cfg
->pdev
->irq
+ i
;
9185 ioa_cfg
->nvectors
= vectors
;
9191 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
9193 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
9195 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
9196 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
9197 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
9198 ioa_cfg
->vectors_info
[vec_idx
].
9199 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
9203 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9207 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
9208 rc
= request_irq(ioa_cfg
->vectors_info
[i
].vec
,
9211 ioa_cfg
->vectors_info
[i
].desc
,
9215 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
9224 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9225 * @pdev: PCI device struct
9227 * Description: Simply set the msi_received flag to 1 indicating that
9228 * Message Signaled Interrupts are supported.
9231 * 0 on success / non-zero on failure
9233 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
9235 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
9236 unsigned long lock_flags
= 0;
9237 irqreturn_t rc
= IRQ_HANDLED
;
9239 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
9240 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9242 ioa_cfg
->msi_received
= 1;
9243 wake_up(&ioa_cfg
->msi_wait_q
);
9245 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9250 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9251 * @pdev: PCI device struct
9253 * Description: The return value from pci_enable_msi() can not always be
9254 * trusted. This routine sets up and initiates a test interrupt to determine
9255 * if the interrupt is received via the ipr_test_intr() service routine.
9256 * If the tests fails, the driver will fall back to LSI.
9259 * 0 on success / non-zero on failure
9261 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
9264 volatile u32 int_reg
;
9265 unsigned long lock_flags
= 0;
9269 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9270 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9271 ioa_cfg
->msi_received
= 0;
9272 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9273 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
9274 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
9275 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9277 rc
= request_irq(pdev
->irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
9279 dev_err(&pdev
->dev
, "Can not assign irq %d\n", pdev
->irq
);
9281 } else if (ipr_debug
)
9282 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", pdev
->irq
);
9284 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
9285 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
9286 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
9287 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9288 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9290 if (!ioa_cfg
->msi_received
) {
9291 /* MSI test failed */
9292 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
9294 } else if (ipr_debug
)
9295 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
9297 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9299 free_irq(pdev
->irq
, ioa_cfg
);
9306 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9307 * @pdev: PCI device struct
9308 * @dev_id: PCI device id struct
9311 * 0 on success / non-zero on failure
9313 static int ipr_probe_ioa(struct pci_dev
*pdev
,
9314 const struct pci_device_id
*dev_id
)
9316 struct ipr_ioa_cfg
*ioa_cfg
;
9317 struct Scsi_Host
*host
;
9318 unsigned long ipr_regs_pci
;
9319 void __iomem
*ipr_regs
;
9320 int rc
= PCIBIOS_SUCCESSFUL
;
9321 volatile u32 mask
, uproc
, interrupts
;
9322 unsigned long lock_flags
;
9326 if ((rc
= pci_enable_device(pdev
))) {
9327 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
9331 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
9333 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
9336 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
9341 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
9342 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
9343 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
9345 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
9347 if (!ioa_cfg
->ipr_chip
) {
9348 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
9349 dev_id
->vendor
, dev_id
->device
);
9350 goto out_scsi_host_put
;
9353 /* set SIS 32 or SIS 64 */
9354 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
9355 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
9356 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
9357 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
9359 if (ipr_transop_timeout
)
9360 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
9361 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
9362 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
9364 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
9366 ioa_cfg
->revid
= pdev
->revision
;
9368 ipr_regs_pci
= pci_resource_start(pdev
, 0);
9370 rc
= pci_request_regions(pdev
, IPR_NAME
);
9373 "Couldn't register memory range of registers\n");
9374 goto out_scsi_host_put
;
9377 ipr_regs
= pci_ioremap_bar(pdev
, 0);
9381 "Couldn't map memory range of registers\n");
9383 goto out_release_regions
;
9386 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
9387 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
9388 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
9390 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
9392 pci_set_master(pdev
);
9394 if (ioa_cfg
->sis64
) {
9395 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
9397 dev_dbg(&pdev
->dev
, "Failed to set 64 bit PCI DMA mask\n");
9398 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9402 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9405 dev_err(&pdev
->dev
, "Failed to set PCI DMA mask\n");
9409 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
9410 ioa_cfg
->chip_cfg
->cache_line_size
);
9412 if (rc
!= PCIBIOS_SUCCESSFUL
) {
9413 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
9418 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
9419 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
9420 IPR_MAX_MSIX_VECTORS
);
9421 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
9424 if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
9425 ipr_enable_msix(ioa_cfg
) == 0)
9426 ioa_cfg
->intr_flag
= IPR_USE_MSIX
;
9427 else if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
9428 ipr_enable_msi(ioa_cfg
) == 0)
9429 ioa_cfg
->intr_flag
= IPR_USE_MSI
;
9431 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
9432 ioa_cfg
->nvectors
= 1;
9433 dev_info(&pdev
->dev
, "Cannot enable MSI.\n");
9436 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
9437 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9438 rc
= ipr_test_msi(ioa_cfg
, pdev
);
9439 if (rc
== -EOPNOTSUPP
) {
9440 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
9441 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
9442 pci_disable_msi(pdev
);
9443 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9444 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
9445 pci_disable_msix(pdev
);
9448 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
9449 ioa_cfg
->nvectors
= 1;
9452 goto out_msi_disable
;
9454 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
9455 dev_info(&pdev
->dev
,
9456 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9457 ioa_cfg
->nvectors
, pdev
->irq
);
9458 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9459 dev_info(&pdev
->dev
,
9460 "Request for %d MSIXs succeeded.",
9465 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
9466 (unsigned int)num_online_cpus(),
9467 (unsigned int)IPR_MAX_HRRQ_NUM
);
9469 /* Save away PCI config space for use following IOA reset */
9470 rc
= pci_save_state(pdev
);
9472 if (rc
!= PCIBIOS_SUCCESSFUL
) {
9473 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
9475 goto out_msi_disable
;
9478 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
9479 goto out_msi_disable
;
9481 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
9482 goto out_msi_disable
;
9485 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9486 + ((sizeof(struct ipr_config_table_entry64
)
9487 * ioa_cfg
->max_devs_supported
)));
9489 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9490 + ((sizeof(struct ipr_config_table_entry
)
9491 * ioa_cfg
->max_devs_supported
)));
9493 rc
= ipr_alloc_mem(ioa_cfg
);
9496 "Couldn't allocate enough memory for device driver!\n");
9497 goto out_msi_disable
;
9501 * If HRRQ updated interrupt is not masked, or reset alert is set,
9502 * the card is in an unknown state and needs a hard reset
9504 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
9505 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
9506 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
9507 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
9508 ioa_cfg
->needs_hard_reset
= 1;
9509 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
9510 ioa_cfg
->needs_hard_reset
= 1;
9511 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
9512 ioa_cfg
->ioa_unit_checked
= 1;
9514 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9515 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9516 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9518 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
9519 || ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9520 name_msi_vectors(ioa_cfg
);
9521 rc
= request_irq(ioa_cfg
->vectors_info
[0].vec
, ipr_isr
,
9523 ioa_cfg
->vectors_info
[0].desc
,
9526 rc
= ipr_request_other_msi_irqs(ioa_cfg
);
9528 rc
= request_irq(pdev
->irq
, ipr_isr
,
9530 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
9533 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
9538 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
9539 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
9540 ioa_cfg
->needs_warm_reset
= 1;
9541 ioa_cfg
->reset
= ipr_reset_slot_reset
;
9543 ioa_cfg
->reset
= ipr_reset_start_bist
;
9545 spin_lock(&ipr_driver_lock
);
9546 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
9547 spin_unlock(&ipr_driver_lock
);
9554 ipr_free_mem(ioa_cfg
);
9556 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
9557 pci_disable_msi(pdev
);
9558 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9559 pci_disable_msix(pdev
);
9562 out_release_regions
:
9563 pci_release_regions(pdev
);
9565 scsi_host_put(host
);
9567 pci_disable_device(pdev
);
9572 * ipr_scan_vsets - Scans for VSET devices
9573 * @ioa_cfg: ioa config struct
9575 * Description: Since the VSET resources do not follow SAM in that we can have
9576 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9581 static void ipr_scan_vsets(struct ipr_ioa_cfg
*ioa_cfg
)
9585 for (target
= 0; target
< IPR_MAX_NUM_TARGETS_PER_BUS
; target
++)
9586 for (lun
= 0; lun
< IPR_MAX_NUM_VSET_LUNS_PER_TARGET
; lun
++)
9587 scsi_add_device(ioa_cfg
->host
, IPR_VSET_BUS
, target
, lun
);
9591 * ipr_initiate_ioa_bringdown - Bring down an adapter
9592 * @ioa_cfg: ioa config struct
9593 * @shutdown_type: shutdown type
9595 * Description: This function will initiate bringing down the adapter.
9596 * This consists of issuing an IOA shutdown to the adapter
9597 * to flush the cache, and running BIST.
9598 * If the caller needs to wait on the completion of the reset,
9599 * the caller must sleep on the reset_wait_q.
9604 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
9605 enum ipr_shutdown_type shutdown_type
)
9608 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9609 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9610 ioa_cfg
->reset_retries
= 0;
9611 ioa_cfg
->in_ioa_bringdown
= 1;
9612 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
9617 * __ipr_remove - Remove a single adapter
9618 * @pdev: pci device struct
9620 * Adapter hot plug remove entry point.
9625 static void __ipr_remove(struct pci_dev
*pdev
)
9627 unsigned long host_lock_flags
= 0;
9628 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9631 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9632 while (ioa_cfg
->in_reset_reload
) {
9633 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9634 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9635 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9638 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
9640 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9641 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9642 flush_work(&ioa_cfg
->work_q
);
9643 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9645 spin_lock(&ipr_driver_lock
);
9646 list_del(&ioa_cfg
->queue
);
9647 spin_unlock(&ipr_driver_lock
);
9649 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
9650 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9651 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9653 ipr_free_all_resources(ioa_cfg
);
9659 * ipr_remove - IOA hot plug remove entry point
9660 * @pdev: pci device struct
9662 * Adapter hot plug remove entry point.
9667 static void ipr_remove(struct pci_dev
*pdev
)
9669 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9673 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9675 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9677 scsi_remove_host(ioa_cfg
->host
);
9685 * ipr_probe - Adapter hot plug add entry point
9688 * 0 on success / non-zero on failure
9690 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
9692 struct ipr_ioa_cfg
*ioa_cfg
;
9695 rc
= ipr_probe_ioa(pdev
, dev_id
);
9700 ioa_cfg
= pci_get_drvdata(pdev
);
9701 rc
= ipr_probe_ioa_part2(ioa_cfg
);
9708 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
9715 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9719 scsi_remove_host(ioa_cfg
->host
);
9724 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9728 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9730 scsi_remove_host(ioa_cfg
->host
);
9735 scsi_scan_host(ioa_cfg
->host
);
9736 ipr_scan_vsets(ioa_cfg
);
9737 scsi_add_device(ioa_cfg
->host
, IPR_IOA_BUS
, IPR_IOA_TARGET
, IPR_IOA_LUN
);
9738 ioa_cfg
->allow_ml_add_del
= 1;
9739 ioa_cfg
->host
->max_channel
= IPR_VSET_BUS
;
9740 schedule_work(&ioa_cfg
->work_q
);
9745 * ipr_shutdown - Shutdown handler.
9746 * @pdev: pci device struct
9748 * This function is invoked upon system shutdown/reboot. It will issue
9749 * an adapter shutdown to the adapter to flush the write cache.
9754 static void ipr_shutdown(struct pci_dev
*pdev
)
9756 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9757 unsigned long lock_flags
= 0;
9759 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9760 while (ioa_cfg
->in_reset_reload
) {
9761 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9762 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9763 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9766 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
9767 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9768 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9771 static struct pci_device_id ipr_pci_table
[] = {
9772 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9773 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
9774 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9775 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
9776 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9777 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
9778 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9779 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
9780 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
9781 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
9782 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
9783 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
9784 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
9785 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
9786 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
9787 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
9788 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9789 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
9790 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
9791 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
9792 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
9793 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9794 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
9795 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
9796 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9797 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
9798 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
9799 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
9800 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
9801 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9802 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
9803 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
9804 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9805 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
9806 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
9807 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9808 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
9809 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
9810 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
9811 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
9812 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
9813 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
9814 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
9815 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
9816 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
9817 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
9818 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
9819 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
9820 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
9821 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9822 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
9823 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
9824 IPR_USE_LONG_TRANSOP_TIMEOUT
},
9825 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9826 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
9827 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9828 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
9829 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9830 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
9831 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9832 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
9833 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9834 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
9835 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
9836 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
9837 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9838 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
9839 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9840 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
9841 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9842 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
9843 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9844 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
9845 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9846 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
9847 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9848 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
9849 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9850 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
9851 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9852 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
9853 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
9854 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
9857 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
9859 static const struct pci_error_handlers ipr_err_handler
= {
9860 .error_detected
= ipr_pci_error_detected
,
9861 .slot_reset
= ipr_pci_slot_reset
,
9864 static struct pci_driver ipr_driver
= {
9866 .id_table
= ipr_pci_table
,
9868 .remove
= ipr_remove
,
9869 .shutdown
= ipr_shutdown
,
9870 .err_handler
= &ipr_err_handler
,
9874 * ipr_halt_done - Shutdown prepare completion
9879 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
9881 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9885 * ipr_halt - Issue shutdown prepare to all adapters
9888 * NOTIFY_OK on success / NOTIFY_DONE on failure
9890 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
9892 struct ipr_cmnd
*ipr_cmd
;
9893 struct ipr_ioa_cfg
*ioa_cfg
;
9894 unsigned long flags
= 0;
9896 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
9899 spin_lock(&ipr_driver_lock
);
9901 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
9902 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9903 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
9904 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9908 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
9909 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9910 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9911 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
9912 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
9914 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
9915 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9917 spin_unlock(&ipr_driver_lock
);
9922 static struct notifier_block ipr_notifier
= {
9927 * ipr_init - Module entry point
9930 * 0 on success / negative value on failure
9932 static int __init
ipr_init(void)
9934 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9935 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
9937 register_reboot_notifier(&ipr_notifier
);
9938 return pci_register_driver(&ipr_driver
);
9942 * ipr_exit - Module unload
9944 * Module unload entry point.
9949 static void __exit
ipr_exit(void)
9951 unregister_reboot_notifier(&ipr_notifier
);
9952 pci_unregister_driver(&ipr_driver
);
9955 module_init(ipr_init
);
9956 module_exit(ipr_exit
);