]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] ipr: add support for new IOASCs
[mirror_ubuntu-eoan-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
35a39691 73#include <linux/libata.h>
0ce3a7e5 74#include <linux/hdreg.h>
f72919ec 75#include <linux/reboot.h>
3e7ebdfa 76#include <linux/stringify.h>
1da177e4
LT
77#include <asm/io.h>
78#include <asm/irq.h>
79#include <asm/processor.h>
80#include <scsi/scsi.h>
81#include <scsi/scsi_host.h>
82#include <scsi/scsi_tcq.h>
83#include <scsi/scsi_eh.h>
84#include <scsi/scsi_cmnd.h>
1da177e4
LT
85#include "ipr.h"
86
87/*
88 * Global Data
89 */
b7d68ca3 90static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
91static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
92static unsigned int ipr_max_speed = 1;
93static int ipr_testmode = 0;
94static unsigned int ipr_fastfail = 0;
5469cb5b 95static unsigned int ipr_transop_timeout = 0;
d3c74871 96static unsigned int ipr_debug = 0;
3e7ebdfa 97static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 98static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
99static DEFINE_SPINLOCK(ipr_driver_lock);
100
101/* This table describes the differences between DMA controller chips */
102static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 103 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
104 .mailbox = 0x0042C,
105 .cache_line_size = 0x20,
106 {
107 .set_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_mask_reg = 0x00230,
214777ba 109 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 110 .sense_interrupt_mask_reg = 0x0022C,
214777ba 111 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 112 .clr_interrupt_reg = 0x00228,
214777ba 113 .clr_interrupt_reg32 = 0x00228,
1da177e4 114 .sense_interrupt_reg = 0x00224,
214777ba 115 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
116 .ioarrin_reg = 0x00404,
117 .sense_uproc_interrupt_reg = 0x00214,
214777ba 118 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 119 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
120 .set_uproc_interrupt_reg32 = 0x00214,
121 .clr_uproc_interrupt_reg = 0x00218,
122 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
123 }
124 },
125 { /* Snipe and Scamp */
126 .mailbox = 0x0052C,
127 .cache_line_size = 0x20,
128 {
129 .set_interrupt_mask_reg = 0x00288,
130 .clr_interrupt_mask_reg = 0x0028C,
214777ba 131 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 132 .sense_interrupt_mask_reg = 0x00288,
214777ba 133 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 134 .clr_interrupt_reg = 0x00284,
214777ba 135 .clr_interrupt_reg32 = 0x00284,
1da177e4 136 .sense_interrupt_reg = 0x00280,
214777ba 137 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
138 .ioarrin_reg = 0x00504,
139 .sense_uproc_interrupt_reg = 0x00290,
214777ba 140 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 141 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
142 .set_uproc_interrupt_reg32 = 0x00290,
143 .clr_uproc_interrupt_reg = 0x00294,
144 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
145 }
146 },
a74c1639
WB
147 { /* CRoC */
148 .mailbox = 0x00040,
149 .cache_line_size = 0x20,
150 {
151 .set_interrupt_mask_reg = 0x00010,
152 .clr_interrupt_mask_reg = 0x00018,
214777ba 153 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 154 .sense_interrupt_mask_reg = 0x00010,
214777ba 155 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 156 .clr_interrupt_reg = 0x00008,
214777ba 157 .clr_interrupt_reg32 = 0x0000C,
a74c1639 158 .sense_interrupt_reg = 0x00000,
214777ba 159 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
160 .ioarrin_reg = 0x00070,
161 .sense_uproc_interrupt_reg = 0x00020,
214777ba 162 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 163 .set_uproc_interrupt_reg = 0x00020,
214777ba 164 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 165 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
166 .clr_uproc_interrupt_reg32 = 0x0002C,
167 .init_feedback_reg = 0x0005C,
dcbad00e
WB
168 .dump_addr_reg = 0x00064,
169 .dump_data_reg = 0x00068
a74c1639
WB
170 }
171 },
1da177e4
LT
172};
173
174static const struct ipr_chip_t ipr_chip[] = {
a32c055f
WB
175 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
181 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }
1da177e4
LT
182};
183
184static int ipr_max_bus_speeds [] = {
185 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
186};
187
188MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
189MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
190module_param_named(max_speed, ipr_max_speed, uint, 0);
191MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
192module_param_named(log_level, ipr_log_level, uint, 0);
193MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
194module_param_named(testmode, ipr_testmode, int, 0);
195MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 196module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
197MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
198module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
199MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 200module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 201MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
202module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
203MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
204module_param_named(max_devs, ipr_max_devs, int, 0);
205MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
206 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
1da177e4
LT
207MODULE_LICENSE("GPL");
208MODULE_VERSION(IPR_DRIVER_VERSION);
209
1da177e4
LT
210/* A constant array of IOASCs/URCs/Error Messages */
211static const
212struct ipr_error_table_t ipr_error_table[] = {
933916f3 213 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
214 "8155: An unknown error was received"},
215 {0x00330000, 0, 0,
216 "Soft underlength error"},
217 {0x005A0000, 0, 0,
218 "Command to be cancelled not found"},
219 {0x00808000, 0, 0,
220 "Qualified success"},
933916f3 221 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 222 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 223 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 224 "4101: Soft device bus fabric error"},
5aa3a333
WB
225 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
226 "FFFC: Logical block guard error recovered by the device"},
227 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
228 "FFFC: Logical block reference tag error recovered by the device"},
229 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
230 "4171: Recovered scatter list tag / sequence number error"},
231 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
232 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
233 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
234 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
235 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
236 "FFFD: Recovered logical block reference tag error detected by the IOA"},
237 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
238 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 239 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 240 "FFF9: Device sector reassign successful"},
933916f3 241 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 242 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 243 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 244 "7001: IOA sector reassignment successful"},
933916f3 245 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 246 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 247 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 248 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 249 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 250 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 251 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 252 "FFF6: Device hardware error recovered by the IOA"},
933916f3 253 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 254 "FFF6: Device hardware error recovered by the device"},
933916f3 255 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 256 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 257 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 258 "FFFA: Undefined device response recovered by the IOA"},
933916f3 259 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FFF6: Device bus error, message or command phase"},
933916f3 261 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 262 "FFFE: Task Management Function failed"},
933916f3 263 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "FFF6: Failure prediction threshold exceeded"},
933916f3 265 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
266 "8009: Impending cache battery pack failure"},
267 {0x02040400, 0, 0,
268 "34FF: Disk device format in progress"},
65f56475
BK
269 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
270 "9070: IOA requested reset"},
1da177e4
LT
271 {0x023F0000, 0, 0,
272 "Synchronization required"},
273 {0x024E0000, 0, 0,
274 "No ready, IOA shutdown"},
275 {0x025A0000, 0, 0,
276 "Not ready, IOA has been shutdown"},
933916f3 277 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
278 "3020: Storage subsystem configuration error"},
279 {0x03110B00, 0, 0,
280 "FFF5: Medium error, data unreadable, recommend reassign"},
281 {0x03110C00, 0, 0,
282 "7000: Medium error, data unreadable, do not reassign"},
933916f3 283 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 284 "FFF3: Disk media format bad"},
933916f3 285 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 286 "3002: Addressed device failed to respond to selection"},
933916f3 287 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 288 "3100: Device bus error"},
933916f3 289 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
290 "3109: IOA timed out a device command"},
291 {0x04088000, 0, 0,
292 "3120: SCSI bus is not operational"},
933916f3 293 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 294 "4100: Hard device bus fabric error"},
5aa3a333
WB
295 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
296 "310C: Logical block guard error detected by the device"},
297 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
298 "310C: Logical block reference tag error detected by the device"},
299 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
300 "4170: Scatter list tag / sequence number error"},
301 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
302 "8150: Logical block CRC error on IOA to Host transfer"},
303 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
304 "4170: Logical block sequence number error on IOA to Host transfer"},
305 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
306 "310D: Logical block reference tag error detected by the IOA"},
307 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
308 "310D: Logical block guard error detected by the IOA"},
933916f3 309 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 310 "9000: IOA reserved area data check"},
933916f3 311 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 312 "9001: IOA reserved area invalid data pattern"},
933916f3 313 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 314 "9002: IOA reserved area LRC error"},
5aa3a333
WB
315 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
316 "Hardware Error, IOA metadata access error"},
933916f3 317 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 318 "102E: Out of alternate sectors for disk storage"},
933916f3 319 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 320 "FFF4: Data transfer underlength error"},
933916f3 321 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 322 "FFF4: Data transfer overlength error"},
933916f3 323 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 324 "3400: Logical unit failure"},
933916f3 325 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 326 "FFF4: Device microcode is corrupt"},
933916f3 327 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
328 "8150: PCI bus error"},
329 {0x04430000, 1, 0,
330 "Unsupported device bus message received"},
933916f3 331 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 332 "FFF4: Disk device problem"},
933916f3 333 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 334 "8150: Permanent IOA failure"},
933916f3 335 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 336 "3010: Disk device returned wrong response to IOA"},
933916f3 337 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
338 "8151: IOA microcode error"},
339 {0x04448500, 0, 0,
340 "Device bus status error"},
933916f3 341 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 342 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
343 {0x04448700, 0, 0,
344 "ATA device status error"},
1da177e4
LT
345 {0x04490000, 0, 0,
346 "Message reject received from the device"},
933916f3 347 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 348 "8008: A permanent cache battery pack failure occurred"},
933916f3 349 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 350 "9090: Disk unit has been modified after the last known status"},
933916f3 351 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 352 "9081: IOA detected device error"},
933916f3 353 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 354 "9082: IOA detected device error"},
933916f3 355 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "3110: Device bus error, message or command phase"},
933916f3 357 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 358 "3110: SAS Command / Task Management Function failed"},
933916f3 359 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 360 "9091: Incorrect hardware configuration change has been detected"},
933916f3 361 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 362 "9073: Invalid multi-adapter configuration"},
933916f3 363 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 364 "4010: Incorrect connection between cascaded expanders"},
933916f3 365 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 366 "4020: Connections exceed IOA design limits"},
933916f3 367 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 368 "4030: Incorrect multipath connection"},
933916f3 369 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 370 "4110: Unsupported enclosure function"},
933916f3 371 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
372 "FFF4: Command to logical unit failed"},
373 {0x05240000, 1, 0,
374 "Illegal request, invalid request type or request packet"},
375 {0x05250000, 0, 0,
376 "Illegal request, invalid resource handle"},
b0df54bb
BK
377 {0x05258000, 0, 0,
378 "Illegal request, commands not allowed to this device"},
379 {0x05258100, 0, 0,
380 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
381 {0x05258200, 0, 0,
382 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
383 {0x05260000, 0, 0,
384 "Illegal request, invalid field in parameter list"},
385 {0x05260100, 0, 0,
386 "Illegal request, parameter not supported"},
387 {0x05260200, 0, 0,
388 "Illegal request, parameter value invalid"},
389 {0x052C0000, 0, 0,
390 "Illegal request, command sequence error"},
b0df54bb
BK
391 {0x052C8000, 1, 0,
392 "Illegal request, dual adapter support not enabled"},
933916f3 393 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 394 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 395 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 396 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 397 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 398 "3140: Device bus not ready to ready transition"},
933916f3 399 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
400 "FFFB: SCSI bus was reset"},
401 {0x06290500, 0, 0,
402 "FFFE: SCSI bus transition to single ended"},
403 {0x06290600, 0, 0,
404 "FFFE: SCSI bus transition to LVD"},
933916f3 405 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 406 "FFFB: SCSI bus was reset by another initiator"},
933916f3 407 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 408 "3029: A device replacement has occurred"},
933916f3 409 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 410 "9051: IOA cache data exists for a missing or failed device"},
933916f3 411 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 412 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 413 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 414 "9025: Disk unit is not supported at its physical location"},
933916f3 415 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 416 "3020: IOA detected a SCSI bus configuration error"},
933916f3 417 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 418 "3150: SCSI bus configuration error"},
933916f3 419 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 420 "9074: Asymmetric advanced function disk configuration"},
933916f3 421 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 422 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 423 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 424 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 425 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 426 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 427 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 428 "9076: Configuration error, missing remote IOA"},
933916f3 429 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 430 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
431 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
432 "4070: Logically bad block written on device"},
933916f3 433 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 434 "9041: Array protection temporarily suspended"},
933916f3 435 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 436 "9042: Corrupt array parity detected on specified device"},
933916f3 437 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 438 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 439 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 440 "9071: Link operational transition"},
933916f3 441 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 442 "9072: Link not operational transition"},
933916f3 443 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 444 "9032: Array exposed but still protected"},
e435340c
BK
445 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
446 "70DD: Device forced failed by disrupt device command"},
933916f3 447 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 448 "4061: Multipath redundancy level got better"},
933916f3 449 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 450 "4060: Multipath redundancy level got worse"},
1da177e4
LT
451 {0x07270000, 0, 0,
452 "Failure due to other device"},
933916f3 453 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 454 "9008: IOA does not support functions expected by devices"},
933916f3 455 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 456 "9010: Cache data associated with attached devices cannot be found"},
933916f3 457 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 458 "9011: Cache data belongs to devices other than those attached"},
933916f3 459 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 460 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 461 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 462 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 463 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 464 "9022: Exposed array is missing a required device"},
933916f3 465 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 466 "9023: Array member(s) not at required physical locations"},
933916f3 467 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 468 "9024: Array not functional due to present hardware configuration"},
933916f3 469 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 470 "9026: Array not functional due to present hardware configuration"},
933916f3 471 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 472 "9027: Array is missing a device and parity is out of sync"},
933916f3 473 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 474 "9028: Maximum number of arrays already exist"},
933916f3 475 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 476 "9050: Required cache data cannot be located for a disk unit"},
933916f3 477 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 478 "9052: Cache data exists for a device that has been modified"},
933916f3 479 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 480 "9054: IOA resources not available due to previous problems"},
933916f3 481 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 482 "9092: Disk unit requires initialization before use"},
933916f3 483 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 484 "9029: Incorrect hardware configuration change has been detected"},
933916f3 485 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 486 "9060: One or more disk pairs are missing from an array"},
933916f3 487 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 488 "9061: One or more disks are missing from an array"},
933916f3 489 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 490 "9062: One or more disks are missing from an array"},
933916f3 491 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
492 "9063: Maximum number of functional arrays has been exceeded"},
493 {0x0B260000, 0, 0,
494 "Aborted command, invalid descriptor"},
495 {0x0B5A0000, 0, 0,
496 "Command terminated by host"}
497};
498
499static const struct ipr_ses_table_entry ipr_ses_table[] = {
500 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
501 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
502 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
503 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
504 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
505 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
506 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
507 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
508 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
509 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
510 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
511 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
512 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
513};
514
515/*
516 * Function Prototypes
517 */
518static int ipr_reset_alert(struct ipr_cmnd *);
519static void ipr_process_ccn(struct ipr_cmnd *);
520static void ipr_process_error(struct ipr_cmnd *);
521static void ipr_reset_ioa_job(struct ipr_cmnd *);
522static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
523 enum ipr_shutdown_type);
524
525#ifdef CONFIG_SCSI_IPR_TRACE
526/**
527 * ipr_trc_hook - Add a trace entry to the driver trace
528 * @ipr_cmd: ipr command struct
529 * @type: trace type
530 * @add_data: additional data
531 *
532 * Return value:
533 * none
534 **/
535static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
536 u8 type, u32 add_data)
537{
538 struct ipr_trace_entry *trace_entry;
539 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
540
541 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
542 trace_entry->time = jiffies;
543 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
544 trace_entry->type = type;
a32c055f
WB
545 if (ipr_cmd->ioa_cfg->sis64)
546 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
547 else
548 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 549 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
550 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
551 trace_entry->u.add_data = add_data;
552}
553#else
554#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
555#endif
556
557/**
558 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
559 * @ipr_cmd: ipr command struct
560 *
561 * Return value:
562 * none
563 **/
564static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
565{
566 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
567 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
a32c055f 568 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
569
570 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 571 ioarcb->data_transfer_length = 0;
1da177e4 572 ioarcb->read_data_transfer_length = 0;
a32c055f 573 ioarcb->ioadl_len = 0;
1da177e4 574 ioarcb->read_ioadl_len = 0;
a32c055f
WB
575
576 if (ipr_cmd->ioa_cfg->sis64)
577 ioarcb->u.sis64_addr_data.data_ioadl_addr =
578 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
579 else {
580 ioarcb->write_ioadl_addr =
581 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
582 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
583 }
584
1da177e4
LT
585 ioasa->ioasc = 0;
586 ioasa->residual_data_len = 0;
35a39691 587 ioasa->u.gata.status = 0;
1da177e4
LT
588
589 ipr_cmd->scsi_cmd = NULL;
35a39691 590 ipr_cmd->qc = NULL;
1da177e4
LT
591 ipr_cmd->sense_buffer[0] = 0;
592 ipr_cmd->dma_use_sg = 0;
593}
594
595/**
596 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
597 * @ipr_cmd: ipr command struct
598 *
599 * Return value:
600 * none
601 **/
602static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
603{
604 ipr_reinit_ipr_cmnd(ipr_cmd);
605 ipr_cmd->u.scratch = 0;
606 ipr_cmd->sibling = NULL;
607 init_timer(&ipr_cmd->timer);
608}
609
610/**
611 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
612 * @ioa_cfg: ioa config struct
613 *
614 * Return value:
615 * pointer to ipr command struct
616 **/
617static
618struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
619{
620 struct ipr_cmnd *ipr_cmd;
621
622 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
623 list_del(&ipr_cmd->queue);
624 ipr_init_ipr_cmnd(ipr_cmd);
625
626 return ipr_cmd;
627}
628
1da177e4
LT
629/**
630 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
631 * @ioa_cfg: ioa config struct
632 * @clr_ints: interrupts to clear
633 *
634 * This function masks all interrupts on the adapter, then clears the
635 * interrupts specified in the mask
636 *
637 * Return value:
638 * none
639 **/
640static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
641 u32 clr_ints)
642{
643 volatile u32 int_reg;
644
645 /* Stop new interrupts */
646 ioa_cfg->allow_interrupts = 0;
647
648 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
649 if (ioa_cfg->sis64)
650 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
651 else
652 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
653
654 /* Clear any pending interrupts */
214777ba
WB
655 if (ioa_cfg->sis64)
656 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
657 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
658 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
659}
660
661/**
662 * ipr_save_pcix_cmd_reg - Save PCI-X command register
663 * @ioa_cfg: ioa config struct
664 *
665 * Return value:
666 * 0 on success / -EIO on failure
667 **/
668static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
669{
670 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
671
7dce0e1c
BK
672 if (pcix_cmd_reg == 0)
673 return 0;
1da177e4
LT
674
675 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
676 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
677 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
678 return -EIO;
679 }
680
681 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
682 return 0;
683}
684
685/**
686 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
687 * @ioa_cfg: ioa config struct
688 *
689 * Return value:
690 * 0 on success / -EIO on failure
691 **/
692static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
693{
694 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
695
696 if (pcix_cmd_reg) {
697 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
698 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
699 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
700 return -EIO;
701 }
1da177e4
LT
702 }
703
704 return 0;
705}
706
35a39691
BK
707/**
708 * ipr_sata_eh_done - done function for aborted SATA commands
709 * @ipr_cmd: ipr command struct
710 *
711 * This function is invoked for ops generated to SATA
712 * devices which are being aborted.
713 *
714 * Return value:
715 * none
716 **/
717static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
718{
719 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
720 struct ata_queued_cmd *qc = ipr_cmd->qc;
721 struct ipr_sata_port *sata_port = qc->ap->private_data;
722
723 qc->err_mask |= AC_ERR_OTHER;
724 sata_port->ioasa.status |= ATA_BUSY;
725 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
726 ata_qc_complete(qc);
727}
728
1da177e4
LT
729/**
730 * ipr_scsi_eh_done - mid-layer done function for aborted ops
731 * @ipr_cmd: ipr command struct
732 *
733 * This function is invoked by the interrupt handler for
734 * ops generated by the SCSI mid-layer which are being aborted.
735 *
736 * Return value:
737 * none
738 **/
739static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
740{
741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
742 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
743
744 scsi_cmd->result |= (DID_ERROR << 16);
745
63015bc9 746 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
747 scsi_cmd->scsi_done(scsi_cmd);
748 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
749}
750
751/**
752 * ipr_fail_all_ops - Fails all outstanding ops.
753 * @ioa_cfg: ioa config struct
754 *
755 * This function fails all outstanding ops.
756 *
757 * Return value:
758 * none
759 **/
760static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
761{
762 struct ipr_cmnd *ipr_cmd, *temp;
763
764 ENTER;
765 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
766 list_del(&ipr_cmd->queue);
767
768 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
769 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
770
771 if (ipr_cmd->scsi_cmd)
772 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
773 else if (ipr_cmd->qc)
774 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
775
776 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
777 del_timer(&ipr_cmd->timer);
778 ipr_cmd->done(ipr_cmd);
779 }
780
781 LEAVE;
782}
783
a32c055f
WB
784/**
785 * ipr_send_command - Send driver initiated requests.
786 * @ipr_cmd: ipr command struct
787 *
788 * This function sends a command to the adapter using the correct write call.
789 * In the case of sis64, calculate the ioarcb size required. Then or in the
790 * appropriate bits.
791 *
792 * Return value:
793 * none
794 **/
795static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
796{
797 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
798 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
799
800 if (ioa_cfg->sis64) {
801 /* The default size is 256 bytes */
802 send_dma_addr |= 0x1;
803
804 /* If the number of ioadls * size of ioadl > 128 bytes,
805 then use a 512 byte ioarcb */
806 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
807 send_dma_addr |= 0x4;
808 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
809 } else
810 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
811}
812
1da177e4
LT
813/**
814 * ipr_do_req - Send driver initiated requests.
815 * @ipr_cmd: ipr command struct
816 * @done: done function
817 * @timeout_func: timeout function
818 * @timeout: timeout value
819 *
820 * This function sends the specified command to the adapter with the
821 * timeout given. The done function is invoked on command completion.
822 *
823 * Return value:
824 * none
825 **/
826static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
827 void (*done) (struct ipr_cmnd *),
828 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
829{
830 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
831
832 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
833
834 ipr_cmd->done = done;
835
836 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
837 ipr_cmd->timer.expires = jiffies + timeout;
838 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
839
840 add_timer(&ipr_cmd->timer);
841
842 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
843
844 mb();
a32c055f
WB
845
846 ipr_send_command(ipr_cmd);
1da177e4
LT
847}
848
849/**
850 * ipr_internal_cmd_done - Op done function for an internally generated op.
851 * @ipr_cmd: ipr command struct
852 *
853 * This function is the op done function for an internally generated,
854 * blocking op. It simply wakes the sleeping thread.
855 *
856 * Return value:
857 * none
858 **/
859static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
860{
861 if (ipr_cmd->sibling)
862 ipr_cmd->sibling = NULL;
863 else
864 complete(&ipr_cmd->completion);
865}
866
a32c055f
WB
867/**
868 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
869 * @ipr_cmd: ipr command struct
870 * @dma_addr: dma address
871 * @len: transfer length
872 * @flags: ioadl flag value
873 *
874 * This function initializes an ioadl in the case where there is only a single
875 * descriptor.
876 *
877 * Return value:
878 * nothing
879 **/
880static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
881 u32 len, int flags)
882{
883 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
884 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
885
886 ipr_cmd->dma_use_sg = 1;
887
888 if (ipr_cmd->ioa_cfg->sis64) {
889 ioadl64->flags = cpu_to_be32(flags);
890 ioadl64->data_len = cpu_to_be32(len);
891 ioadl64->address = cpu_to_be64(dma_addr);
892
893 ipr_cmd->ioarcb.ioadl_len =
894 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
895 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
896 } else {
897 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
898 ioadl->address = cpu_to_be32(dma_addr);
899
900 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
901 ipr_cmd->ioarcb.read_ioadl_len =
902 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
903 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
904 } else {
905 ipr_cmd->ioarcb.ioadl_len =
906 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
907 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
908 }
909 }
910}
911
1da177e4
LT
912/**
913 * ipr_send_blocking_cmd - Send command and sleep on its completion.
914 * @ipr_cmd: ipr command struct
915 * @timeout_func: function to invoke if command times out
916 * @timeout: timeout
917 *
918 * Return value:
919 * none
920 **/
921static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
922 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
923 u32 timeout)
924{
925 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
926
927 init_completion(&ipr_cmd->completion);
928 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
929
930 spin_unlock_irq(ioa_cfg->host->host_lock);
931 wait_for_completion(&ipr_cmd->completion);
932 spin_lock_irq(ioa_cfg->host->host_lock);
933}
934
935/**
936 * ipr_send_hcam - Send an HCAM to the adapter.
937 * @ioa_cfg: ioa config struct
938 * @type: HCAM type
939 * @hostrcb: hostrcb struct
940 *
941 * This function will send a Host Controlled Async command to the adapter.
942 * If HCAMs are currently not allowed to be issued to the adapter, it will
943 * place the hostrcb on the free queue.
944 *
945 * Return value:
946 * none
947 **/
948static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
949 struct ipr_hostrcb *hostrcb)
950{
951 struct ipr_cmnd *ipr_cmd;
952 struct ipr_ioarcb *ioarcb;
953
954 if (ioa_cfg->allow_cmds) {
955 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
956 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
957 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
958
959 ipr_cmd->u.hostrcb = hostrcb;
960 ioarcb = &ipr_cmd->ioarcb;
961
962 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
963 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
964 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
965 ioarcb->cmd_pkt.cdb[1] = type;
966 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
967 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
968
a32c055f
WB
969 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
970 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
971
972 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
973 ipr_cmd->done = ipr_process_ccn;
974 else
975 ipr_cmd->done = ipr_process_error;
976
977 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
978
979 mb();
a32c055f
WB
980
981 ipr_send_command(ipr_cmd);
1da177e4
LT
982 } else {
983 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
984 }
985}
986
3e7ebdfa
WB
987/**
988 * ipr_update_ata_class - Update the ata class in the resource entry
989 * @res: resource entry struct
990 * @proto: cfgte device bus protocol value
991 *
992 * Return value:
993 * none
994 **/
995static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
996{
997 switch(proto) {
998 case IPR_PROTO_SATA:
999 case IPR_PROTO_SAS_STP:
1000 res->ata_class = ATA_DEV_ATA;
1001 break;
1002 case IPR_PROTO_SATA_ATAPI:
1003 case IPR_PROTO_SAS_STP_ATAPI:
1004 res->ata_class = ATA_DEV_ATAPI;
1005 break;
1006 default:
1007 res->ata_class = ATA_DEV_UNKNOWN;
1008 break;
1009 };
1010}
1011
1da177e4
LT
1012/**
1013 * ipr_init_res_entry - Initialize a resource entry struct.
1014 * @res: resource entry struct
3e7ebdfa 1015 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1016 *
1017 * Return value:
1018 * none
1019 **/
3e7ebdfa
WB
1020static void ipr_init_res_entry(struct ipr_resource_entry *res,
1021 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1022{
3e7ebdfa
WB
1023 int found = 0;
1024 unsigned int proto;
1025 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1026 struct ipr_resource_entry *gscsi_res = NULL;
1027
ee0a90fa 1028 res->needs_sync_complete = 0;
1da177e4
LT
1029 res->in_erp = 0;
1030 res->add_to_ml = 0;
1031 res->del_from_ml = 0;
1032 res->resetting_device = 0;
1033 res->sdev = NULL;
35a39691 1034 res->sata_port = NULL;
3e7ebdfa
WB
1035
1036 if (ioa_cfg->sis64) {
1037 proto = cfgtew->u.cfgte64->proto;
1038 res->res_flags = cfgtew->u.cfgte64->res_flags;
1039 res->qmodel = IPR_QUEUEING_MODEL64(res);
1040 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1041
1042 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1043 sizeof(res->res_path));
1044
1045 res->bus = 0;
1046 res->lun = scsilun_to_int(&res->dev_lun);
1047
1048 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1049 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1050 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1051 found = 1;
1052 res->target = gscsi_res->target;
1053 break;
1054 }
1055 }
1056 if (!found) {
1057 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1058 ioa_cfg->max_devs_supported);
1059 set_bit(res->target, ioa_cfg->target_ids);
1060 }
1061
1062 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1063 sizeof(res->dev_lun.scsi_lun));
1064 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1065 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1066 res->target = 0;
1067 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1068 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1069 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1070 ioa_cfg->max_devs_supported);
1071 set_bit(res->target, ioa_cfg->array_ids);
1072 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1073 res->bus = IPR_VSET_VIRTUAL_BUS;
1074 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1075 ioa_cfg->max_devs_supported);
1076 set_bit(res->target, ioa_cfg->vset_ids);
1077 } else {
1078 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1079 ioa_cfg->max_devs_supported);
1080 set_bit(res->target, ioa_cfg->target_ids);
1081 }
1082 } else {
1083 proto = cfgtew->u.cfgte->proto;
1084 res->qmodel = IPR_QUEUEING_MODEL(res);
1085 res->flags = cfgtew->u.cfgte->flags;
1086 if (res->flags & IPR_IS_IOA_RESOURCE)
1087 res->type = IPR_RES_TYPE_IOAFP;
1088 else
1089 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1090
1091 res->bus = cfgtew->u.cfgte->res_addr.bus;
1092 res->target = cfgtew->u.cfgte->res_addr.target;
1093 res->lun = cfgtew->u.cfgte->res_addr.lun;
1094 }
1095
1096 ipr_update_ata_class(res, proto);
1097}
1098
1099/**
1100 * ipr_is_same_device - Determine if two devices are the same.
1101 * @res: resource entry struct
1102 * @cfgtew: config table entry wrapper struct
1103 *
1104 * Return value:
1105 * 1 if the devices are the same / 0 otherwise
1106 **/
1107static int ipr_is_same_device(struct ipr_resource_entry *res,
1108 struct ipr_config_table_entry_wrapper *cfgtew)
1109{
1110 if (res->ioa_cfg->sis64) {
1111 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1112 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1113 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1114 sizeof(cfgtew->u.cfgte64->lun))) {
1115 return 1;
1116 }
1117 } else {
1118 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1119 res->target == cfgtew->u.cfgte->res_addr.target &&
1120 res->lun == cfgtew->u.cfgte->res_addr.lun)
1121 return 1;
1122 }
1123
1124 return 0;
1125}
1126
1127/**
1128 * ipr_format_resource_path - Format the resource path for printing.
1129 * @res_path: resource path
1130 * @buf: buffer
1131 *
1132 * Return value:
1133 * pointer to buffer
1134 **/
1135static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1136{
1137 int i;
1138
1139 sprintf(buffer, "%02X", res_path[0]);
1140 for (i=1; res_path[i] != 0xff; i++)
4565e370 1141 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
3e7ebdfa
WB
1142
1143 return buffer;
1144}
1145
1146/**
1147 * ipr_update_res_entry - Update the resource entry.
1148 * @res: resource entry struct
1149 * @cfgtew: config table entry wrapper struct
1150 *
1151 * Return value:
1152 * none
1153 **/
1154static void ipr_update_res_entry(struct ipr_resource_entry *res,
1155 struct ipr_config_table_entry_wrapper *cfgtew)
1156{
1157 char buffer[IPR_MAX_RES_PATH_LENGTH];
1158 unsigned int proto;
1159 int new_path = 0;
1160
1161 if (res->ioa_cfg->sis64) {
1162 res->flags = cfgtew->u.cfgte64->flags;
1163 res->res_flags = cfgtew->u.cfgte64->res_flags;
1164 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1165
1166 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1167 sizeof(struct ipr_std_inq_data));
1168
1169 res->qmodel = IPR_QUEUEING_MODEL64(res);
1170 proto = cfgtew->u.cfgte64->proto;
1171 res->res_handle = cfgtew->u.cfgte64->res_handle;
1172 res->dev_id = cfgtew->u.cfgte64->dev_id;
1173
1174 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1175 sizeof(res->dev_lun.scsi_lun));
1176
1177 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1178 sizeof(res->res_path))) {
1179 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1180 sizeof(res->res_path));
1181 new_path = 1;
1182 }
1183
1184 if (res->sdev && new_path)
1185 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1186 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1187 } else {
1188 res->flags = cfgtew->u.cfgte->flags;
1189 if (res->flags & IPR_IS_IOA_RESOURCE)
1190 res->type = IPR_RES_TYPE_IOAFP;
1191 else
1192 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1193
1194 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1195 sizeof(struct ipr_std_inq_data));
1196
1197 res->qmodel = IPR_QUEUEING_MODEL(res);
1198 proto = cfgtew->u.cfgte->proto;
1199 res->res_handle = cfgtew->u.cfgte->res_handle;
1200 }
1201
1202 ipr_update_ata_class(res, proto);
1203}
1204
1205/**
1206 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1207 * for the resource.
1208 * @res: resource entry struct
1209 * @cfgtew: config table entry wrapper struct
1210 *
1211 * Return value:
1212 * none
1213 **/
1214static void ipr_clear_res_target(struct ipr_resource_entry *res)
1215{
1216 struct ipr_resource_entry *gscsi_res = NULL;
1217 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1218
1219 if (!ioa_cfg->sis64)
1220 return;
1221
1222 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1223 clear_bit(res->target, ioa_cfg->array_ids);
1224 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1225 clear_bit(res->target, ioa_cfg->vset_ids);
1226 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1227 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1228 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1229 return;
1230 clear_bit(res->target, ioa_cfg->target_ids);
1231
1232 } else if (res->bus == 0)
1233 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1234}
1235
1236/**
1237 * ipr_handle_config_change - Handle a config change from the adapter
1238 * @ioa_cfg: ioa config struct
1239 * @hostrcb: hostrcb
1240 *
1241 * Return value:
1242 * none
1243 **/
1244static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1245 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1246{
1247 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1248 struct ipr_config_table_entry_wrapper cfgtew;
1249 __be32 cc_res_handle;
1250
1da177e4
LT
1251 u32 is_ndn = 1;
1252
3e7ebdfa
WB
1253 if (ioa_cfg->sis64) {
1254 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1255 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1256 } else {
1257 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1258 cc_res_handle = cfgtew.u.cfgte->res_handle;
1259 }
1da177e4
LT
1260
1261 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1262 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1263 is_ndn = 0;
1264 break;
1265 }
1266 }
1267
1268 if (is_ndn) {
1269 if (list_empty(&ioa_cfg->free_res_q)) {
1270 ipr_send_hcam(ioa_cfg,
1271 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1272 hostrcb);
1273 return;
1274 }
1275
1276 res = list_entry(ioa_cfg->free_res_q.next,
1277 struct ipr_resource_entry, queue);
1278
1279 list_del(&res->queue);
3e7ebdfa 1280 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1281 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1282 }
1283
3e7ebdfa 1284 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1285
1286 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1287 if (res->sdev) {
1da177e4 1288 res->del_from_ml = 1;
3e7ebdfa 1289 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1290 if (ioa_cfg->allow_ml_add_del)
1291 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1292 } else {
1293 ipr_clear_res_target(res);
1da177e4 1294 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1295 }
1da177e4
LT
1296 } else if (!res->sdev) {
1297 res->add_to_ml = 1;
1298 if (ioa_cfg->allow_ml_add_del)
1299 schedule_work(&ioa_cfg->work_q);
1300 }
1301
1302 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1303}
1304
1305/**
1306 * ipr_process_ccn - Op done function for a CCN.
1307 * @ipr_cmd: ipr command struct
1308 *
1309 * This function is the op done function for a configuration
1310 * change notification host controlled async from the adapter.
1311 *
1312 * Return value:
1313 * none
1314 **/
1315static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1316{
1317 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1318 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1319 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1320
1321 list_del(&hostrcb->queue);
1322 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1323
1324 if (ioasc) {
1325 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1326 dev_err(&ioa_cfg->pdev->dev,
1327 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1328
1329 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1330 } else {
1331 ipr_handle_config_change(ioa_cfg, hostrcb);
1332 }
1333}
1334
8cf093e2
BK
1335/**
1336 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1337 * @i: index into buffer
1338 * @buf: string to modify
1339 *
1340 * This function will strip all trailing whitespace, pad the end
1341 * of the string with a single space, and NULL terminate the string.
1342 *
1343 * Return value:
1344 * new length of string
1345 **/
1346static int strip_and_pad_whitespace(int i, char *buf)
1347{
1348 while (i && buf[i] == ' ')
1349 i--;
1350 buf[i+1] = ' ';
1351 buf[i+2] = '\0';
1352 return i + 2;
1353}
1354
1355/**
1356 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1357 * @prefix: string to print at start of printk
1358 * @hostrcb: hostrcb pointer
1359 * @vpd: vendor/product id/sn struct
1360 *
1361 * Return value:
1362 * none
1363 **/
1364static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1365 struct ipr_vpd *vpd)
1366{
1367 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1368 int i = 0;
1369
1370 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1371 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1372
1373 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1374 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1375
1376 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1377 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1378
1379 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1380}
1381
1da177e4
LT
1382/**
1383 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1384 * @vpd: vendor/product id/sn struct
1da177e4
LT
1385 *
1386 * Return value:
1387 * none
1388 **/
cfc32139 1389static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1390{
1391 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1392 + IPR_SERIAL_NUM_LEN];
1393
cfc32139
BK
1394 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1395 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1396 IPR_PROD_ID_LEN);
1397 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1398 ipr_err("Vendor/Product ID: %s\n", buffer);
1399
cfc32139 1400 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1401 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1402 ipr_err(" Serial Number: %s\n", buffer);
1403}
1404
8cf093e2
BK
1405/**
1406 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1407 * @prefix: string to print at start of printk
1408 * @hostrcb: hostrcb pointer
1409 * @vpd: vendor/product id/sn/wwn struct
1410 *
1411 * Return value:
1412 * none
1413 **/
1414static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1415 struct ipr_ext_vpd *vpd)
1416{
1417 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1418 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1419 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1420}
1421
ee0f05b8
BK
1422/**
1423 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1424 * @vpd: vendor/product id/sn/wwn struct
1425 *
1426 * Return value:
1427 * none
1428 **/
1429static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1430{
1431 ipr_log_vpd(&vpd->vpd);
1432 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1433 be32_to_cpu(vpd->wwid[1]));
1434}
1435
1436/**
1437 * ipr_log_enhanced_cache_error - Log a cache error.
1438 * @ioa_cfg: ioa config struct
1439 * @hostrcb: hostrcb struct
1440 *
1441 * Return value:
1442 * none
1443 **/
1444static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1445 struct ipr_hostrcb *hostrcb)
1446{
4565e370
WB
1447 struct ipr_hostrcb_type_12_error *error;
1448
1449 if (ioa_cfg->sis64)
1450 error = &hostrcb->hcam.u.error64.u.type_12_error;
1451 else
1452 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1453
1454 ipr_err("-----Current Configuration-----\n");
1455 ipr_err("Cache Directory Card Information:\n");
1456 ipr_log_ext_vpd(&error->ioa_vpd);
1457 ipr_err("Adapter Card Information:\n");
1458 ipr_log_ext_vpd(&error->cfc_vpd);
1459
1460 ipr_err("-----Expected Configuration-----\n");
1461 ipr_err("Cache Directory Card Information:\n");
1462 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1463 ipr_err("Adapter Card Information:\n");
1464 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1465
1466 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1467 be32_to_cpu(error->ioa_data[0]),
1468 be32_to_cpu(error->ioa_data[1]),
1469 be32_to_cpu(error->ioa_data[2]));
1470}
1471
1da177e4
LT
1472/**
1473 * ipr_log_cache_error - Log a cache error.
1474 * @ioa_cfg: ioa config struct
1475 * @hostrcb: hostrcb struct
1476 *
1477 * Return value:
1478 * none
1479 **/
1480static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1481 struct ipr_hostrcb *hostrcb)
1482{
1483 struct ipr_hostrcb_type_02_error *error =
1484 &hostrcb->hcam.u.error.u.type_02_error;
1485
1486 ipr_err("-----Current Configuration-----\n");
1487 ipr_err("Cache Directory Card Information:\n");
cfc32139 1488 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1489 ipr_err("Adapter Card Information:\n");
cfc32139 1490 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1491
1492 ipr_err("-----Expected Configuration-----\n");
1493 ipr_err("Cache Directory Card Information:\n");
cfc32139 1494 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1495 ipr_err("Adapter Card Information:\n");
cfc32139 1496 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1497
1498 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1499 be32_to_cpu(error->ioa_data[0]),
1500 be32_to_cpu(error->ioa_data[1]),
1501 be32_to_cpu(error->ioa_data[2]));
1502}
1503
ee0f05b8
BK
1504/**
1505 * ipr_log_enhanced_config_error - Log a configuration error.
1506 * @ioa_cfg: ioa config struct
1507 * @hostrcb: hostrcb struct
1508 *
1509 * Return value:
1510 * none
1511 **/
1512static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1513 struct ipr_hostrcb *hostrcb)
1514{
1515 int errors_logged, i;
1516 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1517 struct ipr_hostrcb_type_13_error *error;
1518
1519 error = &hostrcb->hcam.u.error.u.type_13_error;
1520 errors_logged = be32_to_cpu(error->errors_logged);
1521
1522 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1523 be32_to_cpu(error->errors_detected), errors_logged);
1524
1525 dev_entry = error->dev;
1526
1527 for (i = 0; i < errors_logged; i++, dev_entry++) {
1528 ipr_err_separator;
1529
1530 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1531 ipr_log_ext_vpd(&dev_entry->vpd);
1532
1533 ipr_err("-----New Device Information-----\n");
1534 ipr_log_ext_vpd(&dev_entry->new_vpd);
1535
1536 ipr_err("Cache Directory Card Information:\n");
1537 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1538
1539 ipr_err("Adapter Card Information:\n");
1540 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1541 }
1542}
1543
4565e370
WB
1544/**
1545 * ipr_log_sis64_config_error - Log a device error.
1546 * @ioa_cfg: ioa config struct
1547 * @hostrcb: hostrcb struct
1548 *
1549 * Return value:
1550 * none
1551 **/
1552static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1553 struct ipr_hostrcb *hostrcb)
1554{
1555 int errors_logged, i;
1556 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1557 struct ipr_hostrcb_type_23_error *error;
1558 char buffer[IPR_MAX_RES_PATH_LENGTH];
1559
1560 error = &hostrcb->hcam.u.error64.u.type_23_error;
1561 errors_logged = be32_to_cpu(error->errors_logged);
1562
1563 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1564 be32_to_cpu(error->errors_detected), errors_logged);
1565
1566 dev_entry = error->dev;
1567
1568 for (i = 0; i < errors_logged; i++, dev_entry++) {
1569 ipr_err_separator;
1570
1571 ipr_err("Device %d : %s", i + 1,
1572 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1573 ipr_log_ext_vpd(&dev_entry->vpd);
1574
1575 ipr_err("-----New Device Information-----\n");
1576 ipr_log_ext_vpd(&dev_entry->new_vpd);
1577
1578 ipr_err("Cache Directory Card Information:\n");
1579 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1580
1581 ipr_err("Adapter Card Information:\n");
1582 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1583 }
1584}
1585
1da177e4
LT
1586/**
1587 * ipr_log_config_error - Log a configuration error.
1588 * @ioa_cfg: ioa config struct
1589 * @hostrcb: hostrcb struct
1590 *
1591 * Return value:
1592 * none
1593 **/
1594static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1595 struct ipr_hostrcb *hostrcb)
1596{
1597 int errors_logged, i;
1598 struct ipr_hostrcb_device_data_entry *dev_entry;
1599 struct ipr_hostrcb_type_03_error *error;
1600
1601 error = &hostrcb->hcam.u.error.u.type_03_error;
1602 errors_logged = be32_to_cpu(error->errors_logged);
1603
1604 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1605 be32_to_cpu(error->errors_detected), errors_logged);
1606
cfc32139 1607 dev_entry = error->dev;
1da177e4
LT
1608
1609 for (i = 0; i < errors_logged; i++, dev_entry++) {
1610 ipr_err_separator;
1611
fa15b1f6 1612 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1613 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1614
1615 ipr_err("-----New Device Information-----\n");
cfc32139 1616 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1617
1618 ipr_err("Cache Directory Card Information:\n");
cfc32139 1619 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1620
1621 ipr_err("Adapter Card Information:\n");
cfc32139 1622 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1623
1624 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1625 be32_to_cpu(dev_entry->ioa_data[0]),
1626 be32_to_cpu(dev_entry->ioa_data[1]),
1627 be32_to_cpu(dev_entry->ioa_data[2]),
1628 be32_to_cpu(dev_entry->ioa_data[3]),
1629 be32_to_cpu(dev_entry->ioa_data[4]));
1630 }
1631}
1632
ee0f05b8
BK
1633/**
1634 * ipr_log_enhanced_array_error - Log an array configuration error.
1635 * @ioa_cfg: ioa config struct
1636 * @hostrcb: hostrcb struct
1637 *
1638 * Return value:
1639 * none
1640 **/
1641static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1642 struct ipr_hostrcb *hostrcb)
1643{
1644 int i, num_entries;
1645 struct ipr_hostrcb_type_14_error *error;
1646 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1647 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1648
1649 error = &hostrcb->hcam.u.error.u.type_14_error;
1650
1651 ipr_err_separator;
1652
1653 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1654 error->protection_level,
1655 ioa_cfg->host->host_no,
1656 error->last_func_vset_res_addr.bus,
1657 error->last_func_vset_res_addr.target,
1658 error->last_func_vset_res_addr.lun);
1659
1660 ipr_err_separator;
1661
1662 array_entry = error->array_member;
1663 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1664 sizeof(error->array_member));
1665
1666 for (i = 0; i < num_entries; i++, array_entry++) {
1667 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1668 continue;
1669
1670 if (be32_to_cpu(error->exposed_mode_adn) == i)
1671 ipr_err("Exposed Array Member %d:\n", i);
1672 else
1673 ipr_err("Array Member %d:\n", i);
1674
1675 ipr_log_ext_vpd(&array_entry->vpd);
1676 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1677 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1678 "Expected Location");
1679
1680 ipr_err_separator;
1681 }
1682}
1683
1da177e4
LT
1684/**
1685 * ipr_log_array_error - Log an array configuration error.
1686 * @ioa_cfg: ioa config struct
1687 * @hostrcb: hostrcb struct
1688 *
1689 * Return value:
1690 * none
1691 **/
1692static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1693 struct ipr_hostrcb *hostrcb)
1694{
1695 int i;
1696 struct ipr_hostrcb_type_04_error *error;
1697 struct ipr_hostrcb_array_data_entry *array_entry;
1698 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1699
1700 error = &hostrcb->hcam.u.error.u.type_04_error;
1701
1702 ipr_err_separator;
1703
1704 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1705 error->protection_level,
1706 ioa_cfg->host->host_no,
1707 error->last_func_vset_res_addr.bus,
1708 error->last_func_vset_res_addr.target,
1709 error->last_func_vset_res_addr.lun);
1710
1711 ipr_err_separator;
1712
1713 array_entry = error->array_member;
1714
1715 for (i = 0; i < 18; i++) {
cfc32139 1716 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1717 continue;
1718
fa15b1f6 1719 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1720 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1721 else
1da177e4 1722 ipr_err("Array Member %d:\n", i);
1da177e4 1723
cfc32139 1724 ipr_log_vpd(&array_entry->vpd);
1da177e4 1725
fa15b1f6
BK
1726 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1727 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1728 "Expected Location");
1da177e4
LT
1729
1730 ipr_err_separator;
1731
1732 if (i == 9)
1733 array_entry = error->array_member2;
1734 else
1735 array_entry++;
1736 }
1737}
1738
1739/**
b0df54bb 1740 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1741 * @ioa_cfg: ioa config struct
b0df54bb
BK
1742 * @data: IOA error data
1743 * @len: data length
1da177e4
LT
1744 *
1745 * Return value:
1746 * none
1747 **/
ac719aba 1748static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1749{
1750 int i;
1da177e4 1751
b0df54bb 1752 if (len == 0)
1da177e4
LT
1753 return;
1754
ac719aba
BK
1755 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1756 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1757
b0df54bb 1758 for (i = 0; i < len / 4; i += 4) {
1da177e4 1759 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1760 be32_to_cpu(data[i]),
1761 be32_to_cpu(data[i+1]),
1762 be32_to_cpu(data[i+2]),
1763 be32_to_cpu(data[i+3]));
1da177e4
LT
1764 }
1765}
1766
ee0f05b8
BK
1767/**
1768 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1769 * @ioa_cfg: ioa config struct
1770 * @hostrcb: hostrcb struct
1771 *
1772 * Return value:
1773 * none
1774 **/
1775static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1776 struct ipr_hostrcb *hostrcb)
1777{
1778 struct ipr_hostrcb_type_17_error *error;
1779
4565e370
WB
1780 if (ioa_cfg->sis64)
1781 error = &hostrcb->hcam.u.error64.u.type_17_error;
1782 else
1783 error = &hostrcb->hcam.u.error.u.type_17_error;
1784
ee0f05b8 1785 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1786 strim(error->failure_reason);
ee0f05b8 1787
8cf093e2
BK
1788 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1789 be32_to_cpu(hostrcb->hcam.u.error.prc));
1790 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1791 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1792 be32_to_cpu(hostrcb->hcam.length) -
1793 (offsetof(struct ipr_hostrcb_error, u) +
1794 offsetof(struct ipr_hostrcb_type_17_error, data)));
1795}
1796
b0df54bb
BK
1797/**
1798 * ipr_log_dual_ioa_error - Log a dual adapter error.
1799 * @ioa_cfg: ioa config struct
1800 * @hostrcb: hostrcb struct
1801 *
1802 * Return value:
1803 * none
1804 **/
1805static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1806 struct ipr_hostrcb *hostrcb)
1807{
1808 struct ipr_hostrcb_type_07_error *error;
1809
1810 error = &hostrcb->hcam.u.error.u.type_07_error;
1811 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1812 strim(error->failure_reason);
b0df54bb 1813
8cf093e2
BK
1814 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1815 be32_to_cpu(hostrcb->hcam.u.error.prc));
1816 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1817 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1818 be32_to_cpu(hostrcb->hcam.length) -
1819 (offsetof(struct ipr_hostrcb_error, u) +
1820 offsetof(struct ipr_hostrcb_type_07_error, data)));
1821}
1822
49dc6a18
BK
1823static const struct {
1824 u8 active;
1825 char *desc;
1826} path_active_desc[] = {
1827 { IPR_PATH_NO_INFO, "Path" },
1828 { IPR_PATH_ACTIVE, "Active path" },
1829 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1830};
1831
1832static const struct {
1833 u8 state;
1834 char *desc;
1835} path_state_desc[] = {
1836 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1837 { IPR_PATH_HEALTHY, "is healthy" },
1838 { IPR_PATH_DEGRADED, "is degraded" },
1839 { IPR_PATH_FAILED, "is failed" }
1840};
1841
1842/**
1843 * ipr_log_fabric_path - Log a fabric path error
1844 * @hostrcb: hostrcb struct
1845 * @fabric: fabric descriptor
1846 *
1847 * Return value:
1848 * none
1849 **/
1850static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1851 struct ipr_hostrcb_fabric_desc *fabric)
1852{
1853 int i, j;
1854 u8 path_state = fabric->path_state;
1855 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1856 u8 state = path_state & IPR_PATH_STATE_MASK;
1857
1858 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1859 if (path_active_desc[i].active != active)
1860 continue;
1861
1862 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1863 if (path_state_desc[j].state != state)
1864 continue;
1865
1866 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1867 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1868 path_active_desc[i].desc, path_state_desc[j].desc,
1869 fabric->ioa_port);
1870 } else if (fabric->cascaded_expander == 0xff) {
1871 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1872 path_active_desc[i].desc, path_state_desc[j].desc,
1873 fabric->ioa_port, fabric->phy);
1874 } else if (fabric->phy == 0xff) {
1875 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1876 path_active_desc[i].desc, path_state_desc[j].desc,
1877 fabric->ioa_port, fabric->cascaded_expander);
1878 } else {
1879 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1880 path_active_desc[i].desc, path_state_desc[j].desc,
1881 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1882 }
1883 return;
1884 }
1885 }
1886
1887 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1888 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1889}
1890
4565e370
WB
1891/**
1892 * ipr_log64_fabric_path - Log a fabric path error
1893 * @hostrcb: hostrcb struct
1894 * @fabric: fabric descriptor
1895 *
1896 * Return value:
1897 * none
1898 **/
1899static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1900 struct ipr_hostrcb64_fabric_desc *fabric)
1901{
1902 int i, j;
1903 u8 path_state = fabric->path_state;
1904 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1905 u8 state = path_state & IPR_PATH_STATE_MASK;
1906 char buffer[IPR_MAX_RES_PATH_LENGTH];
1907
1908 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1909 if (path_active_desc[i].active != active)
1910 continue;
1911
1912 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1913 if (path_state_desc[j].state != state)
1914 continue;
1915
1916 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1917 path_active_desc[i].desc, path_state_desc[j].desc,
1918 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1919 return;
1920 }
1921 }
1922
1923 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1924 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1925}
1926
49dc6a18
BK
1927static const struct {
1928 u8 type;
1929 char *desc;
1930} path_type_desc[] = {
1931 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1932 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1933 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1934 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1935};
1936
1937static const struct {
1938 u8 status;
1939 char *desc;
1940} path_status_desc[] = {
1941 { IPR_PATH_CFG_NO_PROB, "Functional" },
1942 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1943 { IPR_PATH_CFG_FAILED, "Failed" },
1944 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1945 { IPR_PATH_NOT_DETECTED, "Missing" },
1946 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1947};
1948
1949static const char *link_rate[] = {
1950 "unknown",
1951 "disabled",
1952 "phy reset problem",
1953 "spinup hold",
1954 "port selector",
1955 "unknown",
1956 "unknown",
1957 "unknown",
1958 "1.5Gbps",
1959 "3.0Gbps",
1960 "unknown",
1961 "unknown",
1962 "unknown",
1963 "unknown",
1964 "unknown",
1965 "unknown"
1966};
1967
1968/**
1969 * ipr_log_path_elem - Log a fabric path element.
1970 * @hostrcb: hostrcb struct
1971 * @cfg: fabric path element struct
1972 *
1973 * Return value:
1974 * none
1975 **/
1976static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1977 struct ipr_hostrcb_config_element *cfg)
1978{
1979 int i, j;
1980 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1981 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1982
1983 if (type == IPR_PATH_CFG_NOT_EXIST)
1984 return;
1985
1986 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1987 if (path_type_desc[i].type != type)
1988 continue;
1989
1990 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1991 if (path_status_desc[j].status != status)
1992 continue;
1993
1994 if (type == IPR_PATH_CFG_IOA_PORT) {
1995 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1996 path_status_desc[j].desc, path_type_desc[i].desc,
1997 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1998 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1999 } else {
2000 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2001 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2002 path_status_desc[j].desc, path_type_desc[i].desc,
2003 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2004 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2005 } else if (cfg->cascaded_expander == 0xff) {
2006 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2007 "WWN=%08X%08X\n", path_status_desc[j].desc,
2008 path_type_desc[i].desc, cfg->phy,
2009 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2010 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2011 } else if (cfg->phy == 0xff) {
2012 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2013 "WWN=%08X%08X\n", path_status_desc[j].desc,
2014 path_type_desc[i].desc, cfg->cascaded_expander,
2015 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2016 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2017 } else {
2018 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2019 "WWN=%08X%08X\n", path_status_desc[j].desc,
2020 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2021 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2022 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2023 }
2024 }
2025 return;
2026 }
2027 }
2028
2029 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2030 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2031 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2032 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2033}
2034
4565e370
WB
2035/**
2036 * ipr_log64_path_elem - Log a fabric path element.
2037 * @hostrcb: hostrcb struct
2038 * @cfg: fabric path element struct
2039 *
2040 * Return value:
2041 * none
2042 **/
2043static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2044 struct ipr_hostrcb64_config_element *cfg)
2045{
2046 int i, j;
2047 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2048 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2049 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2050 char buffer[IPR_MAX_RES_PATH_LENGTH];
2051
2052 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2053 return;
2054
2055 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2056 if (path_type_desc[i].type != type)
2057 continue;
2058
2059 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2060 if (path_status_desc[j].status != status)
2061 continue;
2062
2063 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2064 path_status_desc[j].desc, path_type_desc[i].desc,
2065 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2066 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2067 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2068 return;
2069 }
2070 }
2071 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2072 "WWN=%08X%08X\n", cfg->type_status,
2073 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2074 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2075 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2076}
2077
49dc6a18
BK
2078/**
2079 * ipr_log_fabric_error - Log a fabric error.
2080 * @ioa_cfg: ioa config struct
2081 * @hostrcb: hostrcb struct
2082 *
2083 * Return value:
2084 * none
2085 **/
2086static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2087 struct ipr_hostrcb *hostrcb)
2088{
2089 struct ipr_hostrcb_type_20_error *error;
2090 struct ipr_hostrcb_fabric_desc *fabric;
2091 struct ipr_hostrcb_config_element *cfg;
2092 int i, add_len;
2093
2094 error = &hostrcb->hcam.u.error.u.type_20_error;
2095 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2096 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2097
2098 add_len = be32_to_cpu(hostrcb->hcam.length) -
2099 (offsetof(struct ipr_hostrcb_error, u) +
2100 offsetof(struct ipr_hostrcb_type_20_error, desc));
2101
2102 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2103 ipr_log_fabric_path(hostrcb, fabric);
2104 for_each_fabric_cfg(fabric, cfg)
2105 ipr_log_path_elem(hostrcb, cfg);
2106
2107 add_len -= be16_to_cpu(fabric->length);
2108 fabric = (struct ipr_hostrcb_fabric_desc *)
2109 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2110 }
2111
ac719aba 2112 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2113}
2114
4565e370
WB
2115/**
2116 * ipr_log_sis64_array_error - Log a sis64 array error.
2117 * @ioa_cfg: ioa config struct
2118 * @hostrcb: hostrcb struct
2119 *
2120 * Return value:
2121 * none
2122 **/
2123static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2124 struct ipr_hostrcb *hostrcb)
2125{
2126 int i, num_entries;
2127 struct ipr_hostrcb_type_24_error *error;
2128 struct ipr_hostrcb64_array_data_entry *array_entry;
2129 char buffer[IPR_MAX_RES_PATH_LENGTH];
2130 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2131
2132 error = &hostrcb->hcam.u.error64.u.type_24_error;
2133
2134 ipr_err_separator;
2135
2136 ipr_err("RAID %s Array Configuration: %s\n",
2137 error->protection_level,
2138 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2139
2140 ipr_err_separator;
2141
2142 array_entry = error->array_member;
2143 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2144 sizeof(error->array_member));
2145
2146 for (i = 0; i < num_entries; i++, array_entry++) {
2147
2148 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2149 continue;
2150
2151 if (error->exposed_mode_adn == i)
2152 ipr_err("Exposed Array Member %d:\n", i);
2153 else
2154 ipr_err("Array Member %d:\n", i);
2155
2156 ipr_err("Array Member %d:\n", i);
2157 ipr_log_ext_vpd(&array_entry->vpd);
2158 ipr_err("Current Location: %s",
2159 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2160 ipr_err("Expected Location: %s",
2161 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2162
2163 ipr_err_separator;
2164 }
2165}
2166
2167/**
2168 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2169 * @ioa_cfg: ioa config struct
2170 * @hostrcb: hostrcb struct
2171 *
2172 * Return value:
2173 * none
2174 **/
2175static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2176 struct ipr_hostrcb *hostrcb)
2177{
2178 struct ipr_hostrcb_type_30_error *error;
2179 struct ipr_hostrcb64_fabric_desc *fabric;
2180 struct ipr_hostrcb64_config_element *cfg;
2181 int i, add_len;
2182
2183 error = &hostrcb->hcam.u.error64.u.type_30_error;
2184
2185 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2186 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2187
2188 add_len = be32_to_cpu(hostrcb->hcam.length) -
2189 (offsetof(struct ipr_hostrcb64_error, u) +
2190 offsetof(struct ipr_hostrcb_type_30_error, desc));
2191
2192 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2193 ipr_log64_fabric_path(hostrcb, fabric);
2194 for_each_fabric_cfg(fabric, cfg)
2195 ipr_log64_path_elem(hostrcb, cfg);
2196
2197 add_len -= be16_to_cpu(fabric->length);
2198 fabric = (struct ipr_hostrcb64_fabric_desc *)
2199 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2200 }
2201
2202 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2203}
2204
b0df54bb
BK
2205/**
2206 * ipr_log_generic_error - Log an adapter error.
2207 * @ioa_cfg: ioa config struct
2208 * @hostrcb: hostrcb struct
2209 *
2210 * Return value:
2211 * none
2212 **/
2213static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2214 struct ipr_hostrcb *hostrcb)
2215{
ac719aba 2216 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2217 be32_to_cpu(hostrcb->hcam.length));
2218}
2219
1da177e4
LT
2220/**
2221 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2222 * @ioasc: IOASC
2223 *
2224 * This function will return the index of into the ipr_error_table
2225 * for the specified IOASC. If the IOASC is not in the table,
2226 * 0 will be returned, which points to the entry used for unknown errors.
2227 *
2228 * Return value:
2229 * index into the ipr_error_table
2230 **/
2231static u32 ipr_get_error(u32 ioasc)
2232{
2233 int i;
2234
2235 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2236 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2237 return i;
2238
2239 return 0;
2240}
2241
2242/**
2243 * ipr_handle_log_data - Log an adapter error.
2244 * @ioa_cfg: ioa config struct
2245 * @hostrcb: hostrcb struct
2246 *
2247 * This function logs an adapter error to the system.
2248 *
2249 * Return value:
2250 * none
2251 **/
2252static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2253 struct ipr_hostrcb *hostrcb)
2254{
2255 u32 ioasc;
2256 int error_index;
2257
2258 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2259 return;
2260
2261 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2262 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2263
4565e370
WB
2264 if (ioa_cfg->sis64)
2265 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2266 else
2267 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2268
4565e370
WB
2269 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2270 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2271 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2272 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2273 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2274 }
2275
2276 error_index = ipr_get_error(ioasc);
2277
2278 if (!ipr_error_table[error_index].log_hcam)
2279 return;
2280
49dc6a18 2281 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2282
2283 /* Set indication we have logged an error */
2284 ioa_cfg->errors_logged++;
2285
933916f3 2286 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2287 return;
cf852037
BK
2288 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2289 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2290
2291 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2292 case IPR_HOST_RCB_OVERLAY_ID_2:
2293 ipr_log_cache_error(ioa_cfg, hostrcb);
2294 break;
2295 case IPR_HOST_RCB_OVERLAY_ID_3:
2296 ipr_log_config_error(ioa_cfg, hostrcb);
2297 break;
2298 case IPR_HOST_RCB_OVERLAY_ID_4:
2299 case IPR_HOST_RCB_OVERLAY_ID_6:
2300 ipr_log_array_error(ioa_cfg, hostrcb);
2301 break;
b0df54bb
BK
2302 case IPR_HOST_RCB_OVERLAY_ID_7:
2303 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2304 break;
ee0f05b8
BK
2305 case IPR_HOST_RCB_OVERLAY_ID_12:
2306 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2307 break;
2308 case IPR_HOST_RCB_OVERLAY_ID_13:
2309 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2310 break;
2311 case IPR_HOST_RCB_OVERLAY_ID_14:
2312 case IPR_HOST_RCB_OVERLAY_ID_16:
2313 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2314 break;
2315 case IPR_HOST_RCB_OVERLAY_ID_17:
2316 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2317 break;
49dc6a18
BK
2318 case IPR_HOST_RCB_OVERLAY_ID_20:
2319 ipr_log_fabric_error(ioa_cfg, hostrcb);
2320 break;
4565e370
WB
2321 case IPR_HOST_RCB_OVERLAY_ID_23:
2322 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2323 break;
2324 case IPR_HOST_RCB_OVERLAY_ID_24:
2325 case IPR_HOST_RCB_OVERLAY_ID_26:
2326 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2327 break;
2328 case IPR_HOST_RCB_OVERLAY_ID_30:
2329 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2330 break;
cf852037 2331 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2332 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2333 default:
a9cfca96 2334 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2335 break;
2336 }
2337}
2338
2339/**
2340 * ipr_process_error - Op done function for an adapter error log.
2341 * @ipr_cmd: ipr command struct
2342 *
2343 * This function is the op done function for an error log host
2344 * controlled async from the adapter. It will log the error and
2345 * send the HCAM back to the adapter.
2346 *
2347 * Return value:
2348 * none
2349 **/
2350static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2351{
2352 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2353 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2354 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4565e370
WB
2355 u32 fd_ioasc;
2356
2357 if (ioa_cfg->sis64)
2358 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2359 else
2360 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2361
2362 list_del(&hostrcb->queue);
2363 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2364
2365 if (!ioasc) {
2366 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2367 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2368 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2369 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2370 dev_err(&ioa_cfg->pdev->dev,
2371 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2372 }
2373
2374 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2375}
2376
2377/**
2378 * ipr_timeout - An internally generated op has timed out.
2379 * @ipr_cmd: ipr command struct
2380 *
2381 * This function blocks host requests and initiates an
2382 * adapter reset.
2383 *
2384 * Return value:
2385 * none
2386 **/
2387static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2388{
2389 unsigned long lock_flags = 0;
2390 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2391
2392 ENTER;
2393 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2394
2395 ioa_cfg->errors_logged++;
2396 dev_err(&ioa_cfg->pdev->dev,
2397 "Adapter being reset due to command timeout.\n");
2398
2399 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2400 ioa_cfg->sdt_state = GET_DUMP;
2401
2402 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2403 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2404
2405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2406 LEAVE;
2407}
2408
2409/**
2410 * ipr_oper_timeout - Adapter timed out transitioning to operational
2411 * @ipr_cmd: ipr command struct
2412 *
2413 * This function blocks host requests and initiates an
2414 * adapter reset.
2415 *
2416 * Return value:
2417 * none
2418 **/
2419static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2420{
2421 unsigned long lock_flags = 0;
2422 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2423
2424 ENTER;
2425 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2426
2427 ioa_cfg->errors_logged++;
2428 dev_err(&ioa_cfg->pdev->dev,
2429 "Adapter timed out transitioning to operational.\n");
2430
2431 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2432 ioa_cfg->sdt_state = GET_DUMP;
2433
2434 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2435 if (ipr_fastfail)
2436 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2437 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2438 }
2439
2440 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2441 LEAVE;
2442}
2443
2444/**
2445 * ipr_reset_reload - Reset/Reload the IOA
2446 * @ioa_cfg: ioa config struct
2447 * @shutdown_type: shutdown type
2448 *
2449 * This function resets the adapter and re-initializes it.
2450 * This function assumes that all new host commands have been stopped.
2451 * Return value:
2452 * SUCCESS / FAILED
2453 **/
2454static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2455 enum ipr_shutdown_type shutdown_type)
2456{
2457 if (!ioa_cfg->in_reset_reload)
2458 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2459
2460 spin_unlock_irq(ioa_cfg->host->host_lock);
2461 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2462 spin_lock_irq(ioa_cfg->host->host_lock);
2463
2464 /* If we got hit with a host reset while we were already resetting
2465 the adapter for some reason, and the reset failed. */
2466 if (ioa_cfg->ioa_is_dead) {
2467 ipr_trace;
2468 return FAILED;
2469 }
2470
2471 return SUCCESS;
2472}
2473
2474/**
2475 * ipr_find_ses_entry - Find matching SES in SES table
2476 * @res: resource entry struct of SES
2477 *
2478 * Return value:
2479 * pointer to SES table entry / NULL on failure
2480 **/
2481static const struct ipr_ses_table_entry *
2482ipr_find_ses_entry(struct ipr_resource_entry *res)
2483{
2484 int i, j, matches;
3e7ebdfa 2485 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2486 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2487
2488 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2489 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2490 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2491 vpids = &res->std_inq_data.vpids;
2492 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2493 matches++;
2494 else
2495 break;
2496 } else
2497 matches++;
2498 }
2499
2500 if (matches == IPR_PROD_ID_LEN)
2501 return ste;
2502 }
2503
2504 return NULL;
2505}
2506
2507/**
2508 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2509 * @ioa_cfg: ioa config struct
2510 * @bus: SCSI bus
2511 * @bus_width: bus width
2512 *
2513 * Return value:
2514 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2515 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2516 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2517 * max 160MHz = max 320MB/sec).
2518 **/
2519static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2520{
2521 struct ipr_resource_entry *res;
2522 const struct ipr_ses_table_entry *ste;
2523 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2524
2525 /* Loop through each config table entry in the config table buffer */
2526 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2527 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2528 continue;
2529
3e7ebdfa 2530 if (bus != res->bus)
1da177e4
LT
2531 continue;
2532
2533 if (!(ste = ipr_find_ses_entry(res)))
2534 continue;
2535
2536 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2537 }
2538
2539 return max_xfer_rate;
2540}
2541
2542/**
2543 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2544 * @ioa_cfg: ioa config struct
2545 * @max_delay: max delay in micro-seconds to wait
2546 *
2547 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2548 *
2549 * Return value:
2550 * 0 on success / other on failure
2551 **/
2552static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2553{
2554 volatile u32 pcii_reg;
2555 int delay = 1;
2556
2557 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2558 while (delay < max_delay) {
2559 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2560
2561 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2562 return 0;
2563
2564 /* udelay cannot be used if delay is more than a few milliseconds */
2565 if ((delay / 1000) > MAX_UDELAY_MS)
2566 mdelay(delay / 1000);
2567 else
2568 udelay(delay);
2569
2570 delay += delay;
2571 }
2572 return -EIO;
2573}
2574
dcbad00e
WB
2575/**
2576 * ipr_get_sis64_dump_data_section - Dump IOA memory
2577 * @ioa_cfg: ioa config struct
2578 * @start_addr: adapter address to dump
2579 * @dest: destination kernel buffer
2580 * @length_in_words: length to dump in 4 byte words
2581 *
2582 * Return value:
2583 * 0 on success
2584 **/
2585static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2586 u32 start_addr,
2587 __be32 *dest, u32 length_in_words)
2588{
2589 int i;
2590
2591 for (i = 0; i < length_in_words; i++) {
2592 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2593 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2594 dest++;
2595 }
2596
2597 return 0;
2598}
2599
1da177e4
LT
2600/**
2601 * ipr_get_ldump_data_section - Dump IOA memory
2602 * @ioa_cfg: ioa config struct
2603 * @start_addr: adapter address to dump
2604 * @dest: destination kernel buffer
2605 * @length_in_words: length to dump in 4 byte words
2606 *
2607 * Return value:
2608 * 0 on success / -EIO on failure
2609 **/
2610static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2611 u32 start_addr,
2612 __be32 *dest, u32 length_in_words)
2613{
2614 volatile u32 temp_pcii_reg;
2615 int i, delay = 0;
2616
dcbad00e
WB
2617 if (ioa_cfg->sis64)
2618 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2619 dest, length_in_words);
2620
1da177e4
LT
2621 /* Write IOA interrupt reg starting LDUMP state */
2622 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2623 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2624
2625 /* Wait for IO debug acknowledge */
2626 if (ipr_wait_iodbg_ack(ioa_cfg,
2627 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2628 dev_err(&ioa_cfg->pdev->dev,
2629 "IOA dump long data transfer timeout\n");
2630 return -EIO;
2631 }
2632
2633 /* Signal LDUMP interlocked - clear IO debug ack */
2634 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2635 ioa_cfg->regs.clr_interrupt_reg);
2636
2637 /* Write Mailbox with starting address */
2638 writel(start_addr, ioa_cfg->ioa_mailbox);
2639
2640 /* Signal address valid - clear IOA Reset alert */
2641 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2642 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2643
2644 for (i = 0; i < length_in_words; i++) {
2645 /* Wait for IO debug acknowledge */
2646 if (ipr_wait_iodbg_ack(ioa_cfg,
2647 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2648 dev_err(&ioa_cfg->pdev->dev,
2649 "IOA dump short data transfer timeout\n");
2650 return -EIO;
2651 }
2652
2653 /* Read data from mailbox and increment destination pointer */
2654 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2655 dest++;
2656
2657 /* For all but the last word of data, signal data received */
2658 if (i < (length_in_words - 1)) {
2659 /* Signal dump data received - Clear IO debug Ack */
2660 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2661 ioa_cfg->regs.clr_interrupt_reg);
2662 }
2663 }
2664
2665 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2666 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2667 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2668
2669 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2670 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2671
2672 /* Signal dump data received - Clear IO debug Ack */
2673 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2674 ioa_cfg->regs.clr_interrupt_reg);
2675
2676 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2677 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2678 temp_pcii_reg =
214777ba 2679 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2680
2681 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2682 return 0;
2683
2684 udelay(10);
2685 delay += 10;
2686 }
2687
2688 return 0;
2689}
2690
2691#ifdef CONFIG_SCSI_IPR_DUMP
2692/**
2693 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2694 * @ioa_cfg: ioa config struct
2695 * @pci_address: adapter address
2696 * @length: length of data to copy
2697 *
2698 * Copy data from PCI adapter to kernel buffer.
2699 * Note: length MUST be a 4 byte multiple
2700 * Return value:
2701 * 0 on success / other on failure
2702 **/
2703static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2704 unsigned long pci_address, u32 length)
2705{
2706 int bytes_copied = 0;
2707 int cur_len, rc, rem_len, rem_page_len;
2708 __be32 *page;
2709 unsigned long lock_flags = 0;
2710 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2711
2712 while (bytes_copied < length &&
2713 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2714 if (ioa_dump->page_offset >= PAGE_SIZE ||
2715 ioa_dump->page_offset == 0) {
2716 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2717
2718 if (!page) {
2719 ipr_trace;
2720 return bytes_copied;
2721 }
2722
2723 ioa_dump->page_offset = 0;
2724 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2725 ioa_dump->next_page_index++;
2726 } else
2727 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2728
2729 rem_len = length - bytes_copied;
2730 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2731 cur_len = min(rem_len, rem_page_len);
2732
2733 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2734 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2735 rc = -EIO;
2736 } else {
2737 rc = ipr_get_ldump_data_section(ioa_cfg,
2738 pci_address + bytes_copied,
2739 &page[ioa_dump->page_offset / 4],
2740 (cur_len / sizeof(u32)));
2741 }
2742 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2743
2744 if (!rc) {
2745 ioa_dump->page_offset += cur_len;
2746 bytes_copied += cur_len;
2747 } else {
2748 ipr_trace;
2749 break;
2750 }
2751 schedule();
2752 }
2753
2754 return bytes_copied;
2755}
2756
2757/**
2758 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2759 * @hdr: dump entry header struct
2760 *
2761 * Return value:
2762 * nothing
2763 **/
2764static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2765{
2766 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2767 hdr->num_elems = 1;
2768 hdr->offset = sizeof(*hdr);
2769 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2770}
2771
2772/**
2773 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2774 * @ioa_cfg: ioa config struct
2775 * @driver_dump: driver dump struct
2776 *
2777 * Return value:
2778 * nothing
2779 **/
2780static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2781 struct ipr_driver_dump *driver_dump)
2782{
2783 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2784
2785 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2786 driver_dump->ioa_type_entry.hdr.len =
2787 sizeof(struct ipr_dump_ioa_type_entry) -
2788 sizeof(struct ipr_dump_entry_header);
2789 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2790 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2791 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2792 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2793 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2794 ucode_vpd->minor_release[1];
2795 driver_dump->hdr.num_entries++;
2796}
2797
2798/**
2799 * ipr_dump_version_data - Fill in the driver version in the dump.
2800 * @ioa_cfg: ioa config struct
2801 * @driver_dump: driver dump struct
2802 *
2803 * Return value:
2804 * nothing
2805 **/
2806static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2807 struct ipr_driver_dump *driver_dump)
2808{
2809 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2810 driver_dump->version_entry.hdr.len =
2811 sizeof(struct ipr_dump_version_entry) -
2812 sizeof(struct ipr_dump_entry_header);
2813 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2814 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2815 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2816 driver_dump->hdr.num_entries++;
2817}
2818
2819/**
2820 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2821 * @ioa_cfg: ioa config struct
2822 * @driver_dump: driver dump struct
2823 *
2824 * Return value:
2825 * nothing
2826 **/
2827static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2828 struct ipr_driver_dump *driver_dump)
2829{
2830 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2831 driver_dump->trace_entry.hdr.len =
2832 sizeof(struct ipr_dump_trace_entry) -
2833 sizeof(struct ipr_dump_entry_header);
2834 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2835 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2836 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2837 driver_dump->hdr.num_entries++;
2838}
2839
2840/**
2841 * ipr_dump_location_data - Fill in the IOA location in the dump.
2842 * @ioa_cfg: ioa config struct
2843 * @driver_dump: driver dump struct
2844 *
2845 * Return value:
2846 * nothing
2847 **/
2848static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2849 struct ipr_driver_dump *driver_dump)
2850{
2851 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2852 driver_dump->location_entry.hdr.len =
2853 sizeof(struct ipr_dump_location_entry) -
2854 sizeof(struct ipr_dump_entry_header);
2855 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2856 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2857 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2858 driver_dump->hdr.num_entries++;
2859}
2860
2861/**
2862 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2863 * @ioa_cfg: ioa config struct
2864 * @dump: dump struct
2865 *
2866 * Return value:
2867 * nothing
2868 **/
2869static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2870{
2871 unsigned long start_addr, sdt_word;
2872 unsigned long lock_flags = 0;
2873 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2874 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2875 u32 num_entries, start_off, end_off;
2876 u32 bytes_to_copy, bytes_copied, rc;
2877 struct ipr_sdt *sdt;
dcbad00e 2878 int valid = 1;
1da177e4
LT
2879 int i;
2880
2881 ENTER;
2882
2883 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2884
2885 if (ioa_cfg->sdt_state != GET_DUMP) {
2886 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2887 return;
2888 }
2889
2890 start_addr = readl(ioa_cfg->ioa_mailbox);
2891
dcbad00e 2892 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
2893 dev_err(&ioa_cfg->pdev->dev,
2894 "Invalid dump table format: %lx\n", start_addr);
2895 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2896 return;
2897 }
2898
2899 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2900
2901 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2902
2903 /* Initialize the overall dump header */
2904 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2905 driver_dump->hdr.num_entries = 1;
2906 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2907 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2908 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2909 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2910
2911 ipr_dump_version_data(ioa_cfg, driver_dump);
2912 ipr_dump_location_data(ioa_cfg, driver_dump);
2913 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2914 ipr_dump_trace_data(ioa_cfg, driver_dump);
2915
2916 /* Update dump_header */
2917 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2918
2919 /* IOA Dump entry */
2920 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
2921 ioa_dump->hdr.len = 0;
2922 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2923 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2924
2925 /* First entries in sdt are actually a list of dump addresses and
2926 lengths to gather the real dump data. sdt represents the pointer
2927 to the ioa generated dump table. Dump data will be extracted based
2928 on entries in this table */
2929 sdt = &ioa_dump->sdt;
2930
2931 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2932 sizeof(struct ipr_sdt) / sizeof(__be32));
2933
2934 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
2935 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2936 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
2937 dev_err(&ioa_cfg->pdev->dev,
2938 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2939 rc, be32_to_cpu(sdt->hdr.state));
2940 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2941 ioa_cfg->sdt_state = DUMP_OBTAINED;
2942 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2943 return;
2944 }
2945
2946 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2947
2948 if (num_entries > IPR_NUM_SDT_ENTRIES)
2949 num_entries = IPR_NUM_SDT_ENTRIES;
2950
2951 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2952
2953 for (i = 0; i < num_entries; i++) {
2954 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2955 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2956 break;
2957 }
2958
2959 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
2960 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2961 if (ioa_cfg->sis64)
2962 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2963 else {
2964 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2965 end_off = be32_to_cpu(sdt->entry[i].end_token);
2966
2967 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2968 bytes_to_copy = end_off - start_off;
2969 else
2970 valid = 0;
2971 }
2972 if (valid) {
1da177e4
LT
2973 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2974 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2975 continue;
2976 }
2977
2978 /* Copy data from adapter to driver buffers */
2979 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2980 bytes_to_copy);
2981
2982 ioa_dump->hdr.len += bytes_copied;
2983
2984 if (bytes_copied != bytes_to_copy) {
2985 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2986 break;
2987 }
2988 }
2989 }
2990 }
2991
2992 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2993
2994 /* Update dump_header */
2995 driver_dump->hdr.len += ioa_dump->hdr.len;
2996 wmb();
2997 ioa_cfg->sdt_state = DUMP_OBTAINED;
2998 LEAVE;
2999}
3000
3001#else
3002#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3003#endif
3004
3005/**
3006 * ipr_release_dump - Free adapter dump memory
3007 * @kref: kref struct
3008 *
3009 * Return value:
3010 * nothing
3011 **/
3012static void ipr_release_dump(struct kref *kref)
3013{
3014 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3015 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3016 unsigned long lock_flags = 0;
3017 int i;
3018
3019 ENTER;
3020 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3021 ioa_cfg->dump = NULL;
3022 ioa_cfg->sdt_state = INACTIVE;
3023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3024
3025 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3026 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3027
3028 kfree(dump);
3029 LEAVE;
3030}
3031
3032/**
3033 * ipr_worker_thread - Worker thread
c4028958 3034 * @work: ioa config struct
1da177e4
LT
3035 *
3036 * Called at task level from a work thread. This function takes care
3037 * of adding and removing device from the mid-layer as configuration
3038 * changes are detected by the adapter.
3039 *
3040 * Return value:
3041 * nothing
3042 **/
c4028958 3043static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3044{
3045 unsigned long lock_flags;
3046 struct ipr_resource_entry *res;
3047 struct scsi_device *sdev;
3048 struct ipr_dump *dump;
c4028958
DH
3049 struct ipr_ioa_cfg *ioa_cfg =
3050 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3051 u8 bus, target, lun;
3052 int did_work;
3053
3054 ENTER;
3055 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3056
3057 if (ioa_cfg->sdt_state == GET_DUMP) {
3058 dump = ioa_cfg->dump;
3059 if (!dump) {
3060 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3061 return;
3062 }
3063 kref_get(&dump->kref);
3064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3065 ipr_get_ioa_dump(ioa_cfg, dump);
3066 kref_put(&dump->kref, ipr_release_dump);
3067
3068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3069 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3070 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072 return;
3073 }
3074
3075restart:
3076 do {
3077 did_work = 0;
3078 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3079 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3080 return;
3081 }
3082
3083 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3084 if (res->del_from_ml && res->sdev) {
3085 did_work = 1;
3086 sdev = res->sdev;
3087 if (!scsi_device_get(sdev)) {
1da177e4
LT
3088 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3089 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3090 scsi_remove_device(sdev);
3091 scsi_device_put(sdev);
3092 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3093 }
3094 break;
3095 }
3096 }
3097 } while(did_work);
3098
3099 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3100 if (res->add_to_ml) {
3e7ebdfa
WB
3101 bus = res->bus;
3102 target = res->target;
3103 lun = res->lun;
1121b794 3104 res->add_to_ml = 0;
1da177e4
LT
3105 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3106 scsi_add_device(ioa_cfg->host, bus, target, lun);
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3108 goto restart;
3109 }
3110 }
3111
3112 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3113 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3114 LEAVE;
3115}
3116
3117#ifdef CONFIG_SCSI_IPR_TRACE
3118/**
3119 * ipr_read_trace - Dump the adapter trace
3120 * @kobj: kobject struct
91a69029 3121 * @bin_attr: bin_attribute struct
1da177e4
LT
3122 * @buf: buffer
3123 * @off: offset
3124 * @count: buffer size
3125 *
3126 * Return value:
3127 * number of bytes printed to buffer
3128 **/
91a69029
ZR
3129static ssize_t ipr_read_trace(struct kobject *kobj,
3130 struct bin_attribute *bin_attr,
3131 char *buf, loff_t off, size_t count)
1da177e4 3132{
ee959b00
TJ
3133 struct device *dev = container_of(kobj, struct device, kobj);
3134 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3135 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3136 unsigned long lock_flags = 0;
d777aaf3 3137 ssize_t ret;
1da177e4
LT
3138
3139 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3140 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3141 IPR_TRACE_SIZE);
1da177e4 3142 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3143
3144 return ret;
1da177e4
LT
3145}
3146
3147static struct bin_attribute ipr_trace_attr = {
3148 .attr = {
3149 .name = "trace",
3150 .mode = S_IRUGO,
3151 },
3152 .size = 0,
3153 .read = ipr_read_trace,
3154};
3155#endif
3156
3157/**
3158 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3159 * @dev: class device struct
3160 * @buf: buffer
1da177e4
LT
3161 *
3162 * Return value:
3163 * number of bytes printed to buffer
3164 **/
ee959b00
TJ
3165static ssize_t ipr_show_fw_version(struct device *dev,
3166 struct device_attribute *attr, char *buf)
1da177e4 3167{
ee959b00 3168 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3169 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3170 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3171 unsigned long lock_flags = 0;
3172 int len;
3173
3174 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3175 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3176 ucode_vpd->major_release, ucode_vpd->card_type,
3177 ucode_vpd->minor_release[0],
3178 ucode_vpd->minor_release[1]);
3179 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3180 return len;
3181}
3182
ee959b00 3183static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3184 .attr = {
3185 .name = "fw_version",
3186 .mode = S_IRUGO,
3187 },
3188 .show = ipr_show_fw_version,
3189};
3190
3191/**
3192 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3193 * @dev: class device struct
3194 * @buf: buffer
1da177e4
LT
3195 *
3196 * Return value:
3197 * number of bytes printed to buffer
3198 **/
ee959b00
TJ
3199static ssize_t ipr_show_log_level(struct device *dev,
3200 struct device_attribute *attr, char *buf)
1da177e4 3201{
ee959b00 3202 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3203 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3204 unsigned long lock_flags = 0;
3205 int len;
3206
3207 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3208 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3209 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3210 return len;
3211}
3212
3213/**
3214 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3215 * @dev: class device struct
3216 * @buf: buffer
1da177e4
LT
3217 *
3218 * Return value:
3219 * number of bytes printed to buffer
3220 **/
ee959b00
TJ
3221static ssize_t ipr_store_log_level(struct device *dev,
3222 struct device_attribute *attr,
1da177e4
LT
3223 const char *buf, size_t count)
3224{
ee959b00 3225 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3226 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3227 unsigned long lock_flags = 0;
3228
3229 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3230 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3232 return strlen(buf);
3233}
3234
ee959b00 3235static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3236 .attr = {
3237 .name = "log_level",
3238 .mode = S_IRUGO | S_IWUSR,
3239 },
3240 .show = ipr_show_log_level,
3241 .store = ipr_store_log_level
3242};
3243
3244/**
3245 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3246 * @dev: device struct
3247 * @buf: buffer
3248 * @count: buffer size
1da177e4
LT
3249 *
3250 * This function will reset the adapter and wait a reasonable
3251 * amount of time for any errors that the adapter might log.
3252 *
3253 * Return value:
3254 * count on success / other on failure
3255 **/
ee959b00
TJ
3256static ssize_t ipr_store_diagnostics(struct device *dev,
3257 struct device_attribute *attr,
1da177e4
LT
3258 const char *buf, size_t count)
3259{
ee959b00 3260 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3261 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3262 unsigned long lock_flags = 0;
3263 int rc = count;
3264
3265 if (!capable(CAP_SYS_ADMIN))
3266 return -EACCES;
3267
1da177e4 3268 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3269 while(ioa_cfg->in_reset_reload) {
3270 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3271 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3273 }
3274
1da177e4
LT
3275 ioa_cfg->errors_logged = 0;
3276 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3277
3278 if (ioa_cfg->in_reset_reload) {
3279 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3280 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3281
3282 /* Wait for a second for any errors to be logged */
3283 msleep(1000);
3284 } else {
3285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3286 return -EIO;
3287 }
3288
3289 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3290 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3291 rc = -EIO;
3292 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3293
3294 return rc;
3295}
3296
ee959b00 3297static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3298 .attr = {
3299 .name = "run_diagnostics",
3300 .mode = S_IWUSR,
3301 },
3302 .store = ipr_store_diagnostics
3303};
3304
f37eb54b
BK
3305/**
3306 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3307 * @class_dev: device struct
3308 * @buf: buffer
f37eb54b
BK
3309 *
3310 * Return value:
3311 * number of bytes printed to buffer
3312 **/
ee959b00
TJ
3313static ssize_t ipr_show_adapter_state(struct device *dev,
3314 struct device_attribute *attr, char *buf)
f37eb54b 3315{
ee959b00 3316 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3317 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3318 unsigned long lock_flags = 0;
3319 int len;
3320
3321 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3322 if (ioa_cfg->ioa_is_dead)
3323 len = snprintf(buf, PAGE_SIZE, "offline\n");
3324 else
3325 len = snprintf(buf, PAGE_SIZE, "online\n");
3326 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3327 return len;
3328}
3329
3330/**
3331 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3332 * @dev: device struct
3333 * @buf: buffer
3334 * @count: buffer size
f37eb54b
BK
3335 *
3336 * This function will change the adapter's state.
3337 *
3338 * Return value:
3339 * count on success / other on failure
3340 **/
ee959b00
TJ
3341static ssize_t ipr_store_adapter_state(struct device *dev,
3342 struct device_attribute *attr,
f37eb54b
BK
3343 const char *buf, size_t count)
3344{
ee959b00 3345 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3346 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3347 unsigned long lock_flags;
3348 int result = count;
3349
3350 if (!capable(CAP_SYS_ADMIN))
3351 return -EACCES;
3352
3353 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3354 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3355 ioa_cfg->ioa_is_dead = 0;
3356 ioa_cfg->reset_retries = 0;
3357 ioa_cfg->in_ioa_bringdown = 0;
3358 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3359 }
3360 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3361 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3362
3363 return result;
3364}
3365
ee959b00 3366static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3367 .attr = {
49dd0961 3368 .name = "online_state",
f37eb54b
BK
3369 .mode = S_IRUGO | S_IWUSR,
3370 },
3371 .show = ipr_show_adapter_state,
3372 .store = ipr_store_adapter_state
3373};
3374
1da177e4
LT
3375/**
3376 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3377 * @dev: device struct
3378 * @buf: buffer
3379 * @count: buffer size
1da177e4
LT
3380 *
3381 * This function will reset the adapter.
3382 *
3383 * Return value:
3384 * count on success / other on failure
3385 **/
ee959b00
TJ
3386static ssize_t ipr_store_reset_adapter(struct device *dev,
3387 struct device_attribute *attr,
1da177e4
LT
3388 const char *buf, size_t count)
3389{
ee959b00 3390 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3391 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3392 unsigned long lock_flags;
3393 int result = count;
3394
3395 if (!capable(CAP_SYS_ADMIN))
3396 return -EACCES;
3397
3398 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3399 if (!ioa_cfg->in_reset_reload)
3400 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3401 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3402 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3403
3404 return result;
3405}
3406
ee959b00 3407static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3408 .attr = {
3409 .name = "reset_host",
3410 .mode = S_IWUSR,
3411 },
3412 .store = ipr_store_reset_adapter
3413};
3414
3415/**
3416 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3417 * @buf_len: buffer length
3418 *
3419 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3420 * list to use for microcode download
3421 *
3422 * Return value:
3423 * pointer to sglist / NULL on failure
3424 **/
3425static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3426{
3427 int sg_size, order, bsize_elem, num_elem, i, j;
3428 struct ipr_sglist *sglist;
3429 struct scatterlist *scatterlist;
3430 struct page *page;
3431
3432 /* Get the minimum size per scatter/gather element */
3433 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3434
3435 /* Get the actual size per element */
3436 order = get_order(sg_size);
3437
3438 /* Determine the actual number of bytes per element */
3439 bsize_elem = PAGE_SIZE * (1 << order);
3440
3441 /* Determine the actual number of sg entries needed */
3442 if (buf_len % bsize_elem)
3443 num_elem = (buf_len / bsize_elem) + 1;
3444 else
3445 num_elem = buf_len / bsize_elem;
3446
3447 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3448 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3449 (sizeof(struct scatterlist) * (num_elem - 1)),
3450 GFP_KERNEL);
3451
3452 if (sglist == NULL) {
3453 ipr_trace;
3454 return NULL;
3455 }
3456
1da177e4 3457 scatterlist = sglist->scatterlist;
45711f1a 3458 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3459
3460 sglist->order = order;
3461 sglist->num_sg = num_elem;
3462
3463 /* Allocate a bunch of sg elements */
3464 for (i = 0; i < num_elem; i++) {
3465 page = alloc_pages(GFP_KERNEL, order);
3466 if (!page) {
3467 ipr_trace;
3468
3469 /* Free up what we already allocated */
3470 for (j = i - 1; j >= 0; j--)
45711f1a 3471 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3472 kfree(sglist);
3473 return NULL;
3474 }
3475
642f1490 3476 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3477 }
3478
3479 return sglist;
3480}
3481
3482/**
3483 * ipr_free_ucode_buffer - Frees a microcode download buffer
3484 * @p_dnld: scatter/gather list pointer
3485 *
3486 * Free a DMA'able ucode download buffer previously allocated with
3487 * ipr_alloc_ucode_buffer
3488 *
3489 * Return value:
3490 * nothing
3491 **/
3492static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3493{
3494 int i;
3495
3496 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3497 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3498
3499 kfree(sglist);
3500}
3501
3502/**
3503 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3504 * @sglist: scatter/gather list pointer
3505 * @buffer: buffer pointer
3506 * @len: buffer length
3507 *
3508 * Copy a microcode image from a user buffer into a buffer allocated by
3509 * ipr_alloc_ucode_buffer
3510 *
3511 * Return value:
3512 * 0 on success / other on failure
3513 **/
3514static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3515 u8 *buffer, u32 len)
3516{
3517 int bsize_elem, i, result = 0;
3518 struct scatterlist *scatterlist;
3519 void *kaddr;
3520
3521 /* Determine the actual number of bytes per element */
3522 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3523
3524 scatterlist = sglist->scatterlist;
3525
3526 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3527 struct page *page = sg_page(&scatterlist[i]);
3528
3529 kaddr = kmap(page);
1da177e4 3530 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3531 kunmap(page);
1da177e4
LT
3532
3533 scatterlist[i].length = bsize_elem;
3534
3535 if (result != 0) {
3536 ipr_trace;
3537 return result;
3538 }
3539 }
3540
3541 if (len % bsize_elem) {
45711f1a
JA
3542 struct page *page = sg_page(&scatterlist[i]);
3543
3544 kaddr = kmap(page);
1da177e4 3545 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3546 kunmap(page);
1da177e4
LT
3547
3548 scatterlist[i].length = len % bsize_elem;
3549 }
3550
3551 sglist->buffer_len = len;
3552 return result;
3553}
3554
a32c055f
WB
3555/**
3556 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3557 * @ipr_cmd: ipr command struct
3558 * @sglist: scatter/gather list
3559 *
3560 * Builds a microcode download IOA data list (IOADL).
3561 *
3562 **/
3563static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3564 struct ipr_sglist *sglist)
3565{
3566 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3567 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3568 struct scatterlist *scatterlist = sglist->scatterlist;
3569 int i;
3570
3571 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3572 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3573 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3574
3575 ioarcb->ioadl_len =
3576 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3577 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3578 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3579 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3580 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3581 }
3582
3583 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3584}
3585
1da177e4 3586/**
12baa420 3587 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3588 * @ipr_cmd: ipr command struct
3589 * @sglist: scatter/gather list
1da177e4 3590 *
12baa420 3591 * Builds a microcode download IOA data list (IOADL).
1da177e4 3592 *
1da177e4 3593 **/
12baa420
BK
3594static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3595 struct ipr_sglist *sglist)
1da177e4 3596{
1da177e4 3597 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3598 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3599 struct scatterlist *scatterlist = sglist->scatterlist;
3600 int i;
3601
12baa420 3602 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3603 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3604 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3605
3606 ioarcb->ioadl_len =
1da177e4
LT
3607 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3608
3609 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3610 ioadl[i].flags_and_data_len =
3611 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3612 ioadl[i].address =
3613 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3614 }
3615
12baa420
BK
3616 ioadl[i-1].flags_and_data_len |=
3617 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3618}
3619
3620/**
3621 * ipr_update_ioa_ucode - Update IOA's microcode
3622 * @ioa_cfg: ioa config struct
3623 * @sglist: scatter/gather list
3624 *
3625 * Initiate an adapter reset to update the IOA's microcode
3626 *
3627 * Return value:
3628 * 0 on success / -EIO on failure
3629 **/
3630static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3631 struct ipr_sglist *sglist)
3632{
3633 unsigned long lock_flags;
3634
3635 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3636 while(ioa_cfg->in_reset_reload) {
3637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3638 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3639 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3640 }
12baa420
BK
3641
3642 if (ioa_cfg->ucode_sglist) {
3643 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3644 dev_err(&ioa_cfg->pdev->dev,
3645 "Microcode download already in progress\n");
3646 return -EIO;
1da177e4 3647 }
12baa420
BK
3648
3649 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3650 sglist->num_sg, DMA_TO_DEVICE);
3651
3652 if (!sglist->num_dma_sg) {
3653 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3654 dev_err(&ioa_cfg->pdev->dev,
3655 "Failed to map microcode download buffer!\n");
1da177e4
LT
3656 return -EIO;
3657 }
3658
12baa420
BK
3659 ioa_cfg->ucode_sglist = sglist;
3660 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3661 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3662 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3663
3664 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3665 ioa_cfg->ucode_sglist = NULL;
3666 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3667 return 0;
3668}
3669
3670/**
3671 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3672 * @class_dev: device struct
3673 * @buf: buffer
3674 * @count: buffer size
1da177e4
LT
3675 *
3676 * This function will update the firmware on the adapter.
3677 *
3678 * Return value:
3679 * count on success / other on failure
3680 **/
ee959b00
TJ
3681static ssize_t ipr_store_update_fw(struct device *dev,
3682 struct device_attribute *attr,
3683 const char *buf, size_t count)
1da177e4 3684{
ee959b00 3685 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3686 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3687 struct ipr_ucode_image_header *image_hdr;
3688 const struct firmware *fw_entry;
3689 struct ipr_sglist *sglist;
1da177e4
LT
3690 char fname[100];
3691 char *src;
3692 int len, result, dnld_size;
3693
3694 if (!capable(CAP_SYS_ADMIN))
3695 return -EACCES;
3696
3697 len = snprintf(fname, 99, "%s", buf);
3698 fname[len-1] = '\0';
3699
3700 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3701 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3702 return -EIO;
3703 }
3704
3705 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3706
3707 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3708 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3709 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3710 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3711 release_firmware(fw_entry);
3712 return -EINVAL;
3713 }
3714
3715 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3716 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3717 sglist = ipr_alloc_ucode_buffer(dnld_size);
3718
3719 if (!sglist) {
3720 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3721 release_firmware(fw_entry);
3722 return -ENOMEM;
3723 }
3724
3725 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3726
3727 if (result) {
3728 dev_err(&ioa_cfg->pdev->dev,
3729 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3730 goto out;
1da177e4
LT
3731 }
3732
12baa420 3733 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3734
12baa420
BK
3735 if (!result)
3736 result = count;
3737out:
1da177e4
LT
3738 ipr_free_ucode_buffer(sglist);
3739 release_firmware(fw_entry);
12baa420 3740 return result;
1da177e4
LT
3741}
3742
ee959b00 3743static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3744 .attr = {
3745 .name = "update_fw",
3746 .mode = S_IWUSR,
3747 },
3748 .store = ipr_store_update_fw
3749};
3750
ee959b00 3751static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3752 &ipr_fw_version_attr,
3753 &ipr_log_level_attr,
3754 &ipr_diagnostics_attr,
f37eb54b 3755 &ipr_ioa_state_attr,
1da177e4
LT
3756 &ipr_ioa_reset_attr,
3757 &ipr_update_fw_attr,
3758 NULL,
3759};
3760
3761#ifdef CONFIG_SCSI_IPR_DUMP
3762/**
3763 * ipr_read_dump - Dump the adapter
3764 * @kobj: kobject struct
91a69029 3765 * @bin_attr: bin_attribute struct
1da177e4
LT
3766 * @buf: buffer
3767 * @off: offset
3768 * @count: buffer size
3769 *
3770 * Return value:
3771 * number of bytes printed to buffer
3772 **/
91a69029
ZR
3773static ssize_t ipr_read_dump(struct kobject *kobj,
3774 struct bin_attribute *bin_attr,
3775 char *buf, loff_t off, size_t count)
1da177e4 3776{
ee959b00 3777 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3778 struct Scsi_Host *shost = class_to_shost(cdev);
3779 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3780 struct ipr_dump *dump;
3781 unsigned long lock_flags = 0;
3782 char *src;
3783 int len;
3784 size_t rc = count;
3785
3786 if (!capable(CAP_SYS_ADMIN))
3787 return -EACCES;
3788
3789 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3790 dump = ioa_cfg->dump;
3791
3792 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3793 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3794 return 0;
3795 }
3796 kref_get(&dump->kref);
3797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3798
3799 if (off > dump->driver_dump.hdr.len) {
3800 kref_put(&dump->kref, ipr_release_dump);
3801 return 0;
3802 }
3803
3804 if (off + count > dump->driver_dump.hdr.len) {
3805 count = dump->driver_dump.hdr.len - off;
3806 rc = count;
3807 }
3808
3809 if (count && off < sizeof(dump->driver_dump)) {
3810 if (off + count > sizeof(dump->driver_dump))
3811 len = sizeof(dump->driver_dump) - off;
3812 else
3813 len = count;
3814 src = (u8 *)&dump->driver_dump + off;
3815 memcpy(buf, src, len);
3816 buf += len;
3817 off += len;
3818 count -= len;
3819 }
3820
3821 off -= sizeof(dump->driver_dump);
3822
3823 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3824 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3825 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3826 else
3827 len = count;
3828 src = (u8 *)&dump->ioa_dump + off;
3829 memcpy(buf, src, len);
3830 buf += len;
3831 off += len;
3832 count -= len;
3833 }
3834
3835 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3836
3837 while (count) {
3838 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3839 len = PAGE_ALIGN(off) - off;
3840 else
3841 len = count;
3842 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3843 src += off & ~PAGE_MASK;
3844 memcpy(buf, src, len);
3845 buf += len;
3846 off += len;
3847 count -= len;
3848 }
3849
3850 kref_put(&dump->kref, ipr_release_dump);
3851 return rc;
3852}
3853
3854/**
3855 * ipr_alloc_dump - Prepare for adapter dump
3856 * @ioa_cfg: ioa config struct
3857 *
3858 * Return value:
3859 * 0 on success / other on failure
3860 **/
3861static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3862{
3863 struct ipr_dump *dump;
3864 unsigned long lock_flags = 0;
3865
0bc42e35 3866 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3867
3868 if (!dump) {
3869 ipr_err("Dump memory allocation failed\n");
3870 return -ENOMEM;
3871 }
3872
1da177e4
LT
3873 kref_init(&dump->kref);
3874 dump->ioa_cfg = ioa_cfg;
3875
3876 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3877
3878 if (INACTIVE != ioa_cfg->sdt_state) {
3879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3880 kfree(dump);
3881 return 0;
3882 }
3883
3884 ioa_cfg->dump = dump;
3885 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3886 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3887 ioa_cfg->dump_taken = 1;
3888 schedule_work(&ioa_cfg->work_q);
3889 }
3890 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3891
1da177e4
LT
3892 return 0;
3893}
3894
3895/**
3896 * ipr_free_dump - Free adapter dump memory
3897 * @ioa_cfg: ioa config struct
3898 *
3899 * Return value:
3900 * 0 on success / other on failure
3901 **/
3902static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3903{
3904 struct ipr_dump *dump;
3905 unsigned long lock_flags = 0;
3906
3907 ENTER;
3908
3909 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3910 dump = ioa_cfg->dump;
3911 if (!dump) {
3912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3913 return 0;
3914 }
3915
3916 ioa_cfg->dump = NULL;
3917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3918
3919 kref_put(&dump->kref, ipr_release_dump);
3920
3921 LEAVE;
3922 return 0;
3923}
3924
3925/**
3926 * ipr_write_dump - Setup dump state of adapter
3927 * @kobj: kobject struct
91a69029 3928 * @bin_attr: bin_attribute struct
1da177e4
LT
3929 * @buf: buffer
3930 * @off: offset
3931 * @count: buffer size
3932 *
3933 * Return value:
3934 * number of bytes printed to buffer
3935 **/
91a69029
ZR
3936static ssize_t ipr_write_dump(struct kobject *kobj,
3937 struct bin_attribute *bin_attr,
3938 char *buf, loff_t off, size_t count)
1da177e4 3939{
ee959b00 3940 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3941 struct Scsi_Host *shost = class_to_shost(cdev);
3942 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3943 int rc;
3944
3945 if (!capable(CAP_SYS_ADMIN))
3946 return -EACCES;
3947
3948 if (buf[0] == '1')
3949 rc = ipr_alloc_dump(ioa_cfg);
3950 else if (buf[0] == '0')
3951 rc = ipr_free_dump(ioa_cfg);
3952 else
3953 return -EINVAL;
3954
3955 if (rc)
3956 return rc;
3957 else
3958 return count;
3959}
3960
3961static struct bin_attribute ipr_dump_attr = {
3962 .attr = {
3963 .name = "dump",
3964 .mode = S_IRUSR | S_IWUSR,
3965 },
3966 .size = 0,
3967 .read = ipr_read_dump,
3968 .write = ipr_write_dump
3969};
3970#else
3971static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3972#endif
3973
3974/**
3975 * ipr_change_queue_depth - Change the device's queue depth
3976 * @sdev: scsi device struct
3977 * @qdepth: depth to set
e881a172 3978 * @reason: calling context
1da177e4
LT
3979 *
3980 * Return value:
3981 * actual depth set
3982 **/
e881a172
MC
3983static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3984 int reason)
1da177e4 3985{
35a39691
BK
3986 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3987 struct ipr_resource_entry *res;
3988 unsigned long lock_flags = 0;
3989
e881a172
MC
3990 if (reason != SCSI_QDEPTH_DEFAULT)
3991 return -EOPNOTSUPP;
3992
35a39691
BK
3993 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3994 res = (struct ipr_resource_entry *)sdev->hostdata;
3995
3996 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3997 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3998 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3999
1da177e4
LT
4000 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4001 return sdev->queue_depth;
4002}
4003
4004/**
4005 * ipr_change_queue_type - Change the device's queue type
4006 * @dsev: scsi device struct
4007 * @tag_type: type of tags to use
4008 *
4009 * Return value:
4010 * actual queue type set
4011 **/
4012static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4013{
4014 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4015 struct ipr_resource_entry *res;
4016 unsigned long lock_flags = 0;
4017
4018 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4019 res = (struct ipr_resource_entry *)sdev->hostdata;
4020
4021 if (res) {
4022 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4023 /*
4024 * We don't bother quiescing the device here since the
4025 * adapter firmware does it for us.
4026 */
4027 scsi_set_tag_type(sdev, tag_type);
4028
4029 if (tag_type)
4030 scsi_activate_tcq(sdev, sdev->queue_depth);
4031 else
4032 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4033 } else
4034 tag_type = 0;
4035 } else
4036 tag_type = 0;
4037
4038 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4039 return tag_type;
4040}
4041
4042/**
4043 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4044 * @dev: device struct
4045 * @buf: buffer
4046 *
4047 * Return value:
4048 * number of bytes printed to buffer
4049 **/
10523b3b 4050static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4051{
4052 struct scsi_device *sdev = to_scsi_device(dev);
4053 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4054 struct ipr_resource_entry *res;
4055 unsigned long lock_flags = 0;
4056 ssize_t len = -ENXIO;
4057
4058 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4059 res = (struct ipr_resource_entry *)sdev->hostdata;
4060 if (res)
3e7ebdfa 4061 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4063 return len;
4064}
4065
4066static struct device_attribute ipr_adapter_handle_attr = {
4067 .attr = {
4068 .name = "adapter_handle",
4069 .mode = S_IRUSR,
4070 },
4071 .show = ipr_show_adapter_handle
4072};
4073
3e7ebdfa
WB
4074/**
4075 * ipr_show_resource_path - Show the resource path for this device.
4076 * @dev: device struct
4077 * @buf: buffer
4078 *
4079 * Return value:
4080 * number of bytes printed to buffer
4081 **/
4082static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4083{
4084 struct scsi_device *sdev = to_scsi_device(dev);
4085 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4086 struct ipr_resource_entry *res;
4087 unsigned long lock_flags = 0;
4088 ssize_t len = -ENXIO;
4089 char buffer[IPR_MAX_RES_PATH_LENGTH];
4090
4091 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4092 res = (struct ipr_resource_entry *)sdev->hostdata;
4093 if (res)
4094 len = snprintf(buf, PAGE_SIZE, "%s\n",
4095 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4096 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4097 return len;
4098}
4099
4100static struct device_attribute ipr_resource_path_attr = {
4101 .attr = {
4102 .name = "resource_path",
4103 .mode = S_IRUSR,
4104 },
4105 .show = ipr_show_resource_path
4106};
4107
1da177e4
LT
4108static struct device_attribute *ipr_dev_attrs[] = {
4109 &ipr_adapter_handle_attr,
3e7ebdfa 4110 &ipr_resource_path_attr,
1da177e4
LT
4111 NULL,
4112};
4113
4114/**
4115 * ipr_biosparam - Return the HSC mapping
4116 * @sdev: scsi device struct
4117 * @block_device: block device pointer
4118 * @capacity: capacity of the device
4119 * @parm: Array containing returned HSC values.
4120 *
4121 * This function generates the HSC parms that fdisk uses.
4122 * We want to make sure we return something that places partitions
4123 * on 4k boundaries for best performance with the IOA.
4124 *
4125 * Return value:
4126 * 0 on success
4127 **/
4128static int ipr_biosparam(struct scsi_device *sdev,
4129 struct block_device *block_device,
4130 sector_t capacity, int *parm)
4131{
4132 int heads, sectors;
4133 sector_t cylinders;
4134
4135 heads = 128;
4136 sectors = 32;
4137
4138 cylinders = capacity;
4139 sector_div(cylinders, (128 * 32));
4140
4141 /* return result */
4142 parm[0] = heads;
4143 parm[1] = sectors;
4144 parm[2] = cylinders;
4145
4146 return 0;
4147}
4148
35a39691
BK
4149/**
4150 * ipr_find_starget - Find target based on bus/target.
4151 * @starget: scsi target struct
4152 *
4153 * Return value:
4154 * resource entry pointer if found / NULL if not found
4155 **/
4156static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4157{
4158 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4159 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4160 struct ipr_resource_entry *res;
4161
4162 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4163 if ((res->bus == starget->channel) &&
4164 (res->target == starget->id) &&
4165 (res->lun == 0)) {
35a39691
BK
4166 return res;
4167 }
4168 }
4169
4170 return NULL;
4171}
4172
4173static struct ata_port_info sata_port_info;
4174
4175/**
4176 * ipr_target_alloc - Prepare for commands to a SCSI target
4177 * @starget: scsi target struct
4178 *
4179 * If the device is a SATA device, this function allocates an
4180 * ATA port with libata, else it does nothing.
4181 *
4182 * Return value:
4183 * 0 on success / non-0 on failure
4184 **/
4185static int ipr_target_alloc(struct scsi_target *starget)
4186{
4187 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4188 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4189 struct ipr_sata_port *sata_port;
4190 struct ata_port *ap;
4191 struct ipr_resource_entry *res;
4192 unsigned long lock_flags;
4193
4194 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4195 res = ipr_find_starget(starget);
4196 starget->hostdata = NULL;
4197
4198 if (res && ipr_is_gata(res)) {
4199 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4200 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4201 if (!sata_port)
4202 return -ENOMEM;
4203
4204 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4205 if (ap) {
4206 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4207 sata_port->ioa_cfg = ioa_cfg;
4208 sata_port->ap = ap;
4209 sata_port->res = res;
4210
4211 res->sata_port = sata_port;
4212 ap->private_data = sata_port;
4213 starget->hostdata = sata_port;
4214 } else {
4215 kfree(sata_port);
4216 return -ENOMEM;
4217 }
4218 }
4219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4220
4221 return 0;
4222}
4223
4224/**
4225 * ipr_target_destroy - Destroy a SCSI target
4226 * @starget: scsi target struct
4227 *
4228 * If the device was a SATA device, this function frees the libata
4229 * ATA port, else it does nothing.
4230 *
4231 **/
4232static void ipr_target_destroy(struct scsi_target *starget)
4233{
4234 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4235 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4236 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4237
4238 if (ioa_cfg->sis64) {
4239 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4240 clear_bit(starget->id, ioa_cfg->array_ids);
4241 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4242 clear_bit(starget->id, ioa_cfg->vset_ids);
4243 else if (starget->channel == 0)
4244 clear_bit(starget->id, ioa_cfg->target_ids);
4245 }
35a39691
BK
4246
4247 if (sata_port) {
4248 starget->hostdata = NULL;
4249 ata_sas_port_destroy(sata_port->ap);
4250 kfree(sata_port);
4251 }
4252}
4253
4254/**
4255 * ipr_find_sdev - Find device based on bus/target/lun.
4256 * @sdev: scsi device struct
4257 *
4258 * Return value:
4259 * resource entry pointer if found / NULL if not found
4260 **/
4261static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4262{
4263 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4264 struct ipr_resource_entry *res;
4265
4266 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4267 if ((res->bus == sdev->channel) &&
4268 (res->target == sdev->id) &&
4269 (res->lun == sdev->lun))
35a39691
BK
4270 return res;
4271 }
4272
4273 return NULL;
4274}
4275
1da177e4
LT
4276/**
4277 * ipr_slave_destroy - Unconfigure a SCSI device
4278 * @sdev: scsi device struct
4279 *
4280 * Return value:
4281 * nothing
4282 **/
4283static void ipr_slave_destroy(struct scsi_device *sdev)
4284{
4285 struct ipr_resource_entry *res;
4286 struct ipr_ioa_cfg *ioa_cfg;
4287 unsigned long lock_flags = 0;
4288
4289 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4290
4291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4292 res = (struct ipr_resource_entry *) sdev->hostdata;
4293 if (res) {
35a39691
BK
4294 if (res->sata_port)
4295 ata_port_disable(res->sata_port->ap);
1da177e4
LT
4296 sdev->hostdata = NULL;
4297 res->sdev = NULL;
35a39691 4298 res->sata_port = NULL;
1da177e4
LT
4299 }
4300 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4301}
4302
4303/**
4304 * ipr_slave_configure - Configure a SCSI device
4305 * @sdev: scsi device struct
4306 *
4307 * This function configures the specified scsi device.
4308 *
4309 * Return value:
4310 * 0 on success
4311 **/
4312static int ipr_slave_configure(struct scsi_device *sdev)
4313{
4314 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4315 struct ipr_resource_entry *res;
dd406ef8 4316 struct ata_port *ap = NULL;
1da177e4 4317 unsigned long lock_flags = 0;
3e7ebdfa 4318 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4319
4320 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4321 res = sdev->hostdata;
4322 if (res) {
4323 if (ipr_is_af_dasd_device(res))
4324 sdev->type = TYPE_RAID;
0726ce26 4325 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4326 sdev->scsi_level = 4;
0726ce26
BK
4327 sdev->no_uld_attach = 1;
4328 }
1da177e4 4329 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4330 blk_queue_rq_timeout(sdev->request_queue,
4331 IPR_VSET_RW_TIMEOUT);
086fa5ff 4332 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4333 }
e4fbf44e 4334 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4 4335 sdev->allow_restart = 1;
dd406ef8
BK
4336 if (ipr_is_gata(res) && res->sata_port)
4337 ap = res->sata_port->ap;
4338 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4339
4340 if (ap) {
35a39691 4341 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4342 ata_sas_slave_configure(sdev, ap);
4343 } else
35a39691 4344 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4345 if (ioa_cfg->sis64)
4346 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4347 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
dd406ef8 4348 return 0;
1da177e4
LT
4349 }
4350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4351 return 0;
4352}
4353
35a39691
BK
4354/**
4355 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4356 * @sdev: scsi device struct
4357 *
4358 * This function initializes an ATA port so that future commands
4359 * sent through queuecommand will work.
4360 *
4361 * Return value:
4362 * 0 on success
4363 **/
4364static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4365{
4366 struct ipr_sata_port *sata_port = NULL;
4367 int rc = -ENXIO;
4368
4369 ENTER;
4370 if (sdev->sdev_target)
4371 sata_port = sdev->sdev_target->hostdata;
4372 if (sata_port)
4373 rc = ata_sas_port_init(sata_port->ap);
4374 if (rc)
4375 ipr_slave_destroy(sdev);
4376
4377 LEAVE;
4378 return rc;
4379}
4380
1da177e4
LT
4381/**
4382 * ipr_slave_alloc - Prepare for commands to a device.
4383 * @sdev: scsi device struct
4384 *
4385 * This function saves a pointer to the resource entry
4386 * in the scsi device struct if the device exists. We
4387 * can then use this pointer in ipr_queuecommand when
4388 * handling new commands.
4389 *
4390 * Return value:
692aebfc 4391 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4392 **/
4393static int ipr_slave_alloc(struct scsi_device *sdev)
4394{
4395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4396 struct ipr_resource_entry *res;
4397 unsigned long lock_flags;
692aebfc 4398 int rc = -ENXIO;
1da177e4
LT
4399
4400 sdev->hostdata = NULL;
4401
4402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4403
35a39691
BK
4404 res = ipr_find_sdev(sdev);
4405 if (res) {
4406 res->sdev = sdev;
4407 res->add_to_ml = 0;
4408 res->in_erp = 0;
4409 sdev->hostdata = res;
4410 if (!ipr_is_naca_model(res))
4411 res->needs_sync_complete = 1;
4412 rc = 0;
4413 if (ipr_is_gata(res)) {
4414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4415 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4416 }
4417 }
4418
4419 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4420
692aebfc 4421 return rc;
1da177e4
LT
4422}
4423
4424/**
4425 * ipr_eh_host_reset - Reset the host adapter
4426 * @scsi_cmd: scsi command struct
4427 *
4428 * Return value:
4429 * SUCCESS / FAILED
4430 **/
df0ae249 4431static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4432{
4433 struct ipr_ioa_cfg *ioa_cfg;
4434 int rc;
4435
4436 ENTER;
4437 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4438
4439 dev_err(&ioa_cfg->pdev->dev,
4440 "Adapter being reset as a result of error recovery.\n");
4441
4442 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4443 ioa_cfg->sdt_state = GET_DUMP;
4444
4445 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4446
4447 LEAVE;
4448 return rc;
4449}
4450
df0ae249
JG
4451static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4452{
4453 int rc;
4454
4455 spin_lock_irq(cmd->device->host->host_lock);
4456 rc = __ipr_eh_host_reset(cmd);
4457 spin_unlock_irq(cmd->device->host->host_lock);
4458
4459 return rc;
4460}
4461
c6513096
BK
4462/**
4463 * ipr_device_reset - Reset the device
4464 * @ioa_cfg: ioa config struct
4465 * @res: resource entry struct
4466 *
4467 * This function issues a device reset to the affected device.
4468 * If the device is a SCSI device, a LUN reset will be sent
4469 * to the device first. If that does not work, a target reset
35a39691
BK
4470 * will be sent. If the device is a SATA device, a PHY reset will
4471 * be sent.
c6513096
BK
4472 *
4473 * Return value:
4474 * 0 on success / non-zero on failure
4475 **/
4476static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4477 struct ipr_resource_entry *res)
4478{
4479 struct ipr_cmnd *ipr_cmd;
4480 struct ipr_ioarcb *ioarcb;
4481 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4482 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4483 u32 ioasc;
4484
4485 ENTER;
4486 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4487 ioarcb = &ipr_cmd->ioarcb;
4488 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4489
4490 if (ipr_cmd->ioa_cfg->sis64) {
4491 regs = &ipr_cmd->i.ata_ioadl.regs;
4492 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4493 } else
4494 regs = &ioarcb->u.add_data.u.regs;
c6513096 4495
3e7ebdfa 4496 ioarcb->res_handle = res->res_handle;
c6513096
BK
4497 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4498 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4499 if (ipr_is_gata(res)) {
4500 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4501 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4502 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4503 }
c6513096
BK
4504
4505 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4506 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4507 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
35a39691
BK
4508 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
4509 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4510 sizeof(struct ipr_ioasa_gata));
c6513096
BK
4511
4512 LEAVE;
4513 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4514}
4515
35a39691
BK
4516/**
4517 * ipr_sata_reset - Reset the SATA port
cc0680a5 4518 * @link: SATA link to reset
35a39691
BK
4519 * @classes: class of the attached device
4520 *
cc0680a5 4521 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4522 *
4523 * Return value:
4524 * 0 on success / non-zero on failure
4525 **/
cc0680a5 4526static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4527 unsigned long deadline)
35a39691 4528{
cc0680a5 4529 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4530 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4531 struct ipr_resource_entry *res;
4532 unsigned long lock_flags = 0;
4533 int rc = -ENXIO;
4534
4535 ENTER;
4536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
4537 while(ioa_cfg->in_reset_reload) {
4538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4539 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4540 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4541 }
4542
35a39691
BK
4543 res = sata_port->res;
4544 if (res) {
4545 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4546 *classes = res->ata_class;
35a39691
BK
4547 }
4548
4549 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4550 LEAVE;
4551 return rc;
4552}
4553
1da177e4
LT
4554/**
4555 * ipr_eh_dev_reset - Reset the device
4556 * @scsi_cmd: scsi command struct
4557 *
4558 * This function issues a device reset to the affected device.
4559 * A LUN reset will be sent to the device first. If that does
4560 * not work, a target reset will be sent.
4561 *
4562 * Return value:
4563 * SUCCESS / FAILED
4564 **/
94d0e7b8 4565static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4566{
4567 struct ipr_cmnd *ipr_cmd;
4568 struct ipr_ioa_cfg *ioa_cfg;
4569 struct ipr_resource_entry *res;
35a39691
BK
4570 struct ata_port *ap;
4571 int rc = 0;
1da177e4
LT
4572
4573 ENTER;
4574 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4575 res = scsi_cmd->device->hostdata;
4576
eeb88307 4577 if (!res)
1da177e4
LT
4578 return FAILED;
4579
4580 /*
4581 * If we are currently going through reset/reload, return failed. This will force the
4582 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4583 * reset to complete
4584 */
4585 if (ioa_cfg->in_reset_reload)
4586 return FAILED;
4587 if (ioa_cfg->ioa_is_dead)
4588 return FAILED;
4589
4590 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4591 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
1da177e4
LT
4592 if (ipr_cmd->scsi_cmd)
4593 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
4594 if (ipr_cmd->qc)
4595 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
4596 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4597 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4598 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4599 }
1da177e4
LT
4600 }
4601 }
4602
4603 res->resetting_device = 1;
fb3ed3cb 4604 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4605
4606 if (ipr_is_gata(res) && res->sata_port) {
4607 ap = res->sata_port->ap;
4608 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4609 ata_std_error_handler(ap);
35a39691 4610 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26
BK
4611
4612 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4613 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5af23d26
BK
4614 rc = -EIO;
4615 break;
4616 }
4617 }
35a39691
BK
4618 } else
4619 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4620 res->resetting_device = 0;
4621
1da177e4 4622 LEAVE;
c6513096 4623 return (rc ? FAILED : SUCCESS);
1da177e4
LT
4624}
4625
94d0e7b8
JG
4626static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4627{
4628 int rc;
4629
4630 spin_lock_irq(cmd->device->host->host_lock);
4631 rc = __ipr_eh_dev_reset(cmd);
4632 spin_unlock_irq(cmd->device->host->host_lock);
4633
4634 return rc;
4635}
4636
1da177e4
LT
4637/**
4638 * ipr_bus_reset_done - Op done function for bus reset.
4639 * @ipr_cmd: ipr command struct
4640 *
4641 * This function is the op done function for a bus reset
4642 *
4643 * Return value:
4644 * none
4645 **/
4646static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4647{
4648 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4649 struct ipr_resource_entry *res;
4650
4651 ENTER;
3e7ebdfa
WB
4652 if (!ioa_cfg->sis64)
4653 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4654 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4655 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4656 break;
4657 }
1da177e4 4658 }
1da177e4
LT
4659
4660 /*
4661 * If abort has not completed, indicate the reset has, else call the
4662 * abort's done function to wake the sleeping eh thread
4663 */
4664 if (ipr_cmd->sibling->sibling)
4665 ipr_cmd->sibling->sibling = NULL;
4666 else
4667 ipr_cmd->sibling->done(ipr_cmd->sibling);
4668
4669 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4670 LEAVE;
4671}
4672
4673/**
4674 * ipr_abort_timeout - An abort task has timed out
4675 * @ipr_cmd: ipr command struct
4676 *
4677 * This function handles when an abort task times out. If this
4678 * happens we issue a bus reset since we have resources tied
4679 * up that must be freed before returning to the midlayer.
4680 *
4681 * Return value:
4682 * none
4683 **/
4684static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4685{
4686 struct ipr_cmnd *reset_cmd;
4687 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4688 struct ipr_cmd_pkt *cmd_pkt;
4689 unsigned long lock_flags = 0;
4690
4691 ENTER;
4692 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4693 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4694 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4695 return;
4696 }
4697
fb3ed3cb 4698 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4699 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4700 ipr_cmd->sibling = reset_cmd;
4701 reset_cmd->sibling = ipr_cmd;
4702 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4703 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4704 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4705 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4706 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4707
4708 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4709 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4710 LEAVE;
4711}
4712
4713/**
4714 * ipr_cancel_op - Cancel specified op
4715 * @scsi_cmd: scsi command struct
4716 *
4717 * This function cancels specified op.
4718 *
4719 * Return value:
4720 * SUCCESS / FAILED
4721 **/
4722static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4723{
4724 struct ipr_cmnd *ipr_cmd;
4725 struct ipr_ioa_cfg *ioa_cfg;
4726 struct ipr_resource_entry *res;
4727 struct ipr_cmd_pkt *cmd_pkt;
4728 u32 ioasc;
4729 int op_found = 0;
4730
4731 ENTER;
4732 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4733 res = scsi_cmd->device->hostdata;
4734
8fa728a2
JG
4735 /* If we are currently going through reset/reload, return failed.
4736 * This will force the mid-layer to call ipr_eh_host_reset,
4737 * which will then go to sleep and wait for the reset to complete
4738 */
4739 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4740 return FAILED;
04d9768f 4741 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
4742 return FAILED;
4743
4744 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4745 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4746 ipr_cmd->done = ipr_scsi_eh_done;
4747 op_found = 1;
4748 break;
4749 }
4750 }
4751
4752 if (!op_found)
4753 return SUCCESS;
4754
4755 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 4756 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
4757 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4758 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4759 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4760 ipr_cmd->u.sdev = scsi_cmd->device;
4761
fb3ed3cb
BK
4762 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4763 scsi_cmd->cmnd[0]);
1da177e4
LT
4764 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4765 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4766
4767 /*
4768 * If the abort task timed out and we sent a bus reset, we will get
4769 * one the following responses to the abort
4770 */
4771 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4772 ioasc = 0;
4773 ipr_trace;
4774 }
4775
4776 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa
BK
4777 if (!ipr_is_naca_model(res))
4778 res->needs_sync_complete = 1;
1da177e4
LT
4779
4780 LEAVE;
4781 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4782}
4783
4784/**
4785 * ipr_eh_abort - Abort a single op
4786 * @scsi_cmd: scsi command struct
4787 *
4788 * Return value:
4789 * SUCCESS / FAILED
4790 **/
4791static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4792{
8fa728a2
JG
4793 unsigned long flags;
4794 int rc;
1da177e4
LT
4795
4796 ENTER;
1da177e4 4797
8fa728a2
JG
4798 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4799 rc = ipr_cancel_op(scsi_cmd);
4800 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4801
4802 LEAVE;
8fa728a2 4803 return rc;
1da177e4
LT
4804}
4805
4806/**
4807 * ipr_handle_other_interrupt - Handle "other" interrupts
4808 * @ioa_cfg: ioa config struct
4809 * @int_reg: interrupt register
4810 *
4811 * Return value:
4812 * IRQ_NONE / IRQ_HANDLED
4813 **/
4814static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4815 volatile u32 int_reg)
4816{
4817 irqreturn_t rc = IRQ_HANDLED;
4818
4819 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4820 /* Mask the interrupt */
4821 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4822
4823 /* Clear the interrupt */
4824 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4825 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4826
4827 list_del(&ioa_cfg->reset_cmd->queue);
4828 del_timer(&ioa_cfg->reset_cmd->timer);
4829 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4830 } else {
4831 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4832 ioa_cfg->ioa_unit_checked = 1;
4833 else
4834 dev_err(&ioa_cfg->pdev->dev,
4835 "Permanent IOA failure. 0x%08X\n", int_reg);
4836
4837 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4838 ioa_cfg->sdt_state = GET_DUMP;
4839
4840 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4841 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4842 }
4843
4844 return rc;
4845}
4846
3feeb89d
WB
4847/**
4848 * ipr_isr_eh - Interrupt service routine error handler
4849 * @ioa_cfg: ioa config struct
4850 * @msg: message to log
4851 *
4852 * Return value:
4853 * none
4854 **/
4855static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4856{
4857 ioa_cfg->errors_logged++;
4858 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4859
4860 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4861 ioa_cfg->sdt_state = GET_DUMP;
4862
4863 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4864}
4865
1da177e4
LT
4866/**
4867 * ipr_isr - Interrupt service routine
4868 * @irq: irq number
4869 * @devp: pointer to ioa config struct
1da177e4
LT
4870 *
4871 * Return value:
4872 * IRQ_NONE / IRQ_HANDLED
4873 **/
7d12e780 4874static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
4875{
4876 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4877 unsigned long lock_flags = 0;
4878 volatile u32 int_reg, int_mask_reg;
4879 u32 ioasc;
4880 u16 cmd_index;
3feeb89d 4881 int num_hrrq = 0;
1da177e4
LT
4882 struct ipr_cmnd *ipr_cmd;
4883 irqreturn_t rc = IRQ_NONE;
4884
4885 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4886
4887 /* If interrupts are disabled, ignore the interrupt */
4888 if (!ioa_cfg->allow_interrupts) {
4889 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4890 return IRQ_NONE;
4891 }
4892
214777ba
WB
4893 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4894 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
1da177e4 4895
214777ba
WB
4896 /* If an interrupt on the adapter did not occur, ignore it.
4897 * Or in the case of SIS 64, check for a stage change interrupt.
4898 */
1da177e4 4899 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
214777ba
WB
4900 if (ioa_cfg->sis64) {
4901 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4902 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4903 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4904
4905 /* clear stage change */
4906 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4907 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4908 list_del(&ioa_cfg->reset_cmd->queue);
4909 del_timer(&ioa_cfg->reset_cmd->timer);
4910 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4911 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4912 return IRQ_HANDLED;
4913 }
4914 }
4915
1da177e4
LT
4916 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4917 return IRQ_NONE;
4918 }
4919
4920 while (1) {
4921 ipr_cmd = NULL;
4922
4923 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4924 ioa_cfg->toggle_bit) {
4925
4926 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4927 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4928
4929 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3feeb89d 4930 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
1da177e4
LT
4931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4932 return IRQ_HANDLED;
4933 }
4934
4935 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4936
4937 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4938
4939 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4940
4941 list_del(&ipr_cmd->queue);
4942 del_timer(&ipr_cmd->timer);
4943 ipr_cmd->done(ipr_cmd);
4944
4945 rc = IRQ_HANDLED;
4946
4947 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4948 ioa_cfg->hrrq_curr++;
4949 } else {
4950 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4951 ioa_cfg->toggle_bit ^= 1u;
4952 }
4953 }
4954
4955 if (ipr_cmd != NULL) {
4956 /* Clear the PCI interrupt */
3feeb89d 4957 do {
214777ba
WB
4958 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4959 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
3feeb89d
WB
4960 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4961 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4962
4963 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4964 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4965 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4966 return IRQ_HANDLED;
4967 }
4968
1da177e4
LT
4969 } else
4970 break;
4971 }
4972
4973 if (unlikely(rc == IRQ_NONE))
4974 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4975
4976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4977 return rc;
4978}
4979
a32c055f
WB
4980/**
4981 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4982 * @ioa_cfg: ioa config struct
4983 * @ipr_cmd: ipr command struct
4984 *
4985 * Return value:
4986 * 0 on success / -1 on failure
4987 **/
4988static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4989 struct ipr_cmnd *ipr_cmd)
4990{
4991 int i, nseg;
4992 struct scatterlist *sg;
4993 u32 length;
4994 u32 ioadl_flags = 0;
4995 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4996 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4997 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
4998
4999 length = scsi_bufflen(scsi_cmd);
5000 if (!length)
5001 return 0;
5002
5003 nseg = scsi_dma_map(scsi_cmd);
5004 if (nseg < 0) {
5005 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5006 return -1;
5007 }
5008
5009 ipr_cmd->dma_use_sg = nseg;
5010
5011 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5012 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5013 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5014 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5015 ioadl_flags = IPR_IOADL_FLAGS_READ;
5016
5017 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5018 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5019 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5020 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5021 }
5022
5023 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5024 return 0;
5025}
5026
1da177e4
LT
5027/**
5028 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5029 * @ioa_cfg: ioa config struct
5030 * @ipr_cmd: ipr command struct
5031 *
5032 * Return value:
5033 * 0 on success / -1 on failure
5034 **/
5035static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5036 struct ipr_cmnd *ipr_cmd)
5037{
63015bc9
FT
5038 int i, nseg;
5039 struct scatterlist *sg;
1da177e4
LT
5040 u32 length;
5041 u32 ioadl_flags = 0;
5042 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5043 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5044 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5045
63015bc9
FT
5046 length = scsi_bufflen(scsi_cmd);
5047 if (!length)
1da177e4
LT
5048 return 0;
5049
63015bc9
FT
5050 nseg = scsi_dma_map(scsi_cmd);
5051 if (nseg < 0) {
5052 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5053 return -1;
5054 }
51b1c7e1 5055
63015bc9
FT
5056 ipr_cmd->dma_use_sg = nseg;
5057
5058 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5059 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5060 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5061 ioarcb->data_transfer_length = cpu_to_be32(length);
5062 ioarcb->ioadl_len =
63015bc9
FT
5063 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5064 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5065 ioadl_flags = IPR_IOADL_FLAGS_READ;
5066 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5067 ioarcb->read_ioadl_len =
5068 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5069 }
1da177e4 5070
a32c055f
WB
5071 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5072 ioadl = ioarcb->u.add_data.u.ioadl;
5073 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5074 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5075 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5076 }
1da177e4 5077
63015bc9
FT
5078 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5079 ioadl[i].flags_and_data_len =
5080 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5081 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5082 }
5083
63015bc9
FT
5084 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5085 return 0;
1da177e4
LT
5086}
5087
5088/**
5089 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5090 * @scsi_cmd: scsi command struct
5091 *
5092 * Return value:
5093 * task attributes
5094 **/
5095static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5096{
5097 u8 tag[2];
5098 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5099
5100 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5101 switch (tag[0]) {
5102 case MSG_SIMPLE_TAG:
5103 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5104 break;
5105 case MSG_HEAD_TAG:
5106 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5107 break;
5108 case MSG_ORDERED_TAG:
5109 rc = IPR_FLAGS_LO_ORDERED_TASK;
5110 break;
5111 };
5112 }
5113
5114 return rc;
5115}
5116
5117/**
5118 * ipr_erp_done - Process completion of ERP for a device
5119 * @ipr_cmd: ipr command struct
5120 *
5121 * This function copies the sense buffer into the scsi_cmd
5122 * struct and pushes the scsi_done function.
5123 *
5124 * Return value:
5125 * nothing
5126 **/
5127static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5128{
5129 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5130 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5131 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5132 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5133
5134 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5135 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5136 scmd_printk(KERN_ERR, scsi_cmd,
5137 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5138 } else {
5139 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5140 SCSI_SENSE_BUFFERSIZE);
5141 }
5142
5143 if (res) {
ee0a90fa
BK
5144 if (!ipr_is_naca_model(res))
5145 res->needs_sync_complete = 1;
1da177e4
LT
5146 res->in_erp = 0;
5147 }
63015bc9 5148 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5149 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5150 scsi_cmd->scsi_done(scsi_cmd);
5151}
5152
5153/**
5154 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5155 * @ipr_cmd: ipr command struct
5156 *
5157 * Return value:
5158 * none
5159 **/
5160static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5161{
51b1c7e1
BK
5162 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5163 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
a32c055f 5164 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5165
5166 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5167 ioarcb->data_transfer_length = 0;
1da177e4 5168 ioarcb->read_data_transfer_length = 0;
a32c055f 5169 ioarcb->ioadl_len = 0;
1da177e4
LT
5170 ioarcb->read_ioadl_len = 0;
5171 ioasa->ioasc = 0;
5172 ioasa->residual_data_len = 0;
a32c055f
WB
5173
5174 if (ipr_cmd->ioa_cfg->sis64)
5175 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5176 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5177 else {
5178 ioarcb->write_ioadl_addr =
5179 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5180 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5181 }
1da177e4
LT
5182}
5183
5184/**
5185 * ipr_erp_request_sense - Send request sense to a device
5186 * @ipr_cmd: ipr command struct
5187 *
5188 * This function sends a request sense to a device as a result
5189 * of a check condition.
5190 *
5191 * Return value:
5192 * nothing
5193 **/
5194static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5195{
5196 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5197 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5198
5199 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5200 ipr_erp_done(ipr_cmd);
5201 return;
5202 }
5203
5204 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5205
5206 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5207 cmd_pkt->cdb[0] = REQUEST_SENSE;
5208 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5209 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5210 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5211 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5212
a32c055f
WB
5213 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5214 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5215
5216 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5217 IPR_REQUEST_SENSE_TIMEOUT * 2);
5218}
5219
5220/**
5221 * ipr_erp_cancel_all - Send cancel all to a device
5222 * @ipr_cmd: ipr command struct
5223 *
5224 * This function sends a cancel all to a device to clear the
5225 * queue. If we are running TCQ on the device, QERR is set to 1,
5226 * which means all outstanding ops have been dropped on the floor.
5227 * Cancel all will return them to us.
5228 *
5229 * Return value:
5230 * nothing
5231 **/
5232static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5233{
5234 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5235 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5236 struct ipr_cmd_pkt *cmd_pkt;
5237
5238 res->in_erp = 1;
5239
5240 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5241
5242 if (!scsi_get_tag_type(scsi_cmd->device)) {
5243 ipr_erp_request_sense(ipr_cmd);
5244 return;
5245 }
5246
5247 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5248 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5249 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5250
5251 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5252 IPR_CANCEL_ALL_TIMEOUT);
5253}
5254
5255/**
5256 * ipr_dump_ioasa - Dump contents of IOASA
5257 * @ioa_cfg: ioa config struct
5258 * @ipr_cmd: ipr command struct
fe964d0a 5259 * @res: resource entry struct
1da177e4
LT
5260 *
5261 * This function is invoked by the interrupt handler when ops
5262 * fail. It will log the IOASA if appropriate. Only called
5263 * for GPDD ops.
5264 *
5265 * Return value:
5266 * none
5267 **/
5268static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5269 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5270{
5271 int i;
5272 u16 data_len;
b0692dd4 5273 u32 ioasc, fd_ioasc;
1da177e4
LT
5274 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5275 __be32 *ioasa_data = (__be32 *)ioasa;
5276 int error_index;
5277
5278 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
b0692dd4 5279 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5280
5281 if (0 == ioasc)
5282 return;
5283
5284 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5285 return;
5286
b0692dd4
BK
5287 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5288 error_index = ipr_get_error(fd_ioasc);
5289 else
5290 error_index = ipr_get_error(ioasc);
1da177e4
LT
5291
5292 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5293 /* Don't log an error if the IOA already logged one */
5294 if (ioasa->ilid != 0)
5295 return;
5296
cc9bd5d4
BK
5297 if (!ipr_is_gscsi(res))
5298 return;
5299
1da177e4
LT
5300 if (ipr_error_table[error_index].log_ioasa == 0)
5301 return;
5302 }
5303
fe964d0a 5304 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
5305
5306 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
5307 data_len = sizeof(struct ipr_ioasa);
5308 else
5309 data_len = be16_to_cpu(ioasa->ret_stat_len);
5310
5311 ipr_err("IOASA Dump:\n");
5312
5313 for (i = 0; i < data_len / 4; i += 4) {
5314 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5315 be32_to_cpu(ioasa_data[i]),
5316 be32_to_cpu(ioasa_data[i+1]),
5317 be32_to_cpu(ioasa_data[i+2]),
5318 be32_to_cpu(ioasa_data[i+3]));
5319 }
5320}
5321
5322/**
5323 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5324 * @ioasa: IOASA
5325 * @sense_buf: sense data buffer
5326 *
5327 * Return value:
5328 * none
5329 **/
5330static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5331{
5332 u32 failing_lba;
5333 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5334 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5335 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5336 u32 ioasc = be32_to_cpu(ioasa->ioasc);
5337
5338 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5339
5340 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5341 return;
5342
5343 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5344
5345 if (ipr_is_vset_device(res) &&
5346 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5347 ioasa->u.vset.failing_lba_hi != 0) {
5348 sense_buf[0] = 0x72;
5349 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5350 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5351 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5352
5353 sense_buf[7] = 12;
5354 sense_buf[8] = 0;
5355 sense_buf[9] = 0x0A;
5356 sense_buf[10] = 0x80;
5357
5358 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5359
5360 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5361 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5362 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5363 sense_buf[15] = failing_lba & 0x000000ff;
5364
5365 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5366
5367 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5368 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5369 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5370 sense_buf[19] = failing_lba & 0x000000ff;
5371 } else {
5372 sense_buf[0] = 0x70;
5373 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5374 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5375 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5376
5377 /* Illegal request */
5378 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5379 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5380 sense_buf[7] = 10; /* additional length */
5381
5382 /* IOARCB was in error */
5383 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5384 sense_buf[15] = 0xC0;
5385 else /* Parameter data was invalid */
5386 sense_buf[15] = 0x80;
5387
5388 sense_buf[16] =
5389 ((IPR_FIELD_POINTER_MASK &
5390 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
5391 sense_buf[17] =
5392 (IPR_FIELD_POINTER_MASK &
5393 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
5394 } else {
5395 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5396 if (ipr_is_vset_device(res))
5397 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5398 else
5399 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5400
5401 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5402 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5403 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5404 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5405 sense_buf[6] = failing_lba & 0x000000ff;
5406 }
5407
5408 sense_buf[7] = 6; /* additional length */
5409 }
5410 }
5411}
5412
ee0a90fa
BK
5413/**
5414 * ipr_get_autosense - Copy autosense data to sense buffer
5415 * @ipr_cmd: ipr command struct
5416 *
5417 * This function copies the autosense buffer to the buffer
5418 * in the scsi_cmd, if there is autosense available.
5419 *
5420 * Return value:
5421 * 1 if autosense was available / 0 if not
5422 **/
5423static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5424{
5425 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5426
117d2ce1 5427 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
5428 return 0;
5429
5430 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5431 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5432 SCSI_SENSE_BUFFERSIZE));
5433 return 1;
5434}
5435
1da177e4
LT
5436/**
5437 * ipr_erp_start - Process an error response for a SCSI op
5438 * @ioa_cfg: ioa config struct
5439 * @ipr_cmd: ipr command struct
5440 *
5441 * This function determines whether or not to initiate ERP
5442 * on the affected device.
5443 *
5444 * Return value:
5445 * nothing
5446 **/
5447static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5448 struct ipr_cmnd *ipr_cmd)
5449{
5450 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5451 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5452 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
8a048994 5453 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5454
5455 if (!res) {
5456 ipr_scsi_eh_done(ipr_cmd);
5457 return;
5458 }
5459
8a048994 5460 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5461 ipr_gen_sense(ipr_cmd);
5462
cc9bd5d4
BK
5463 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5464
8a048994 5465 switch (masked_ioasc) {
1da177e4 5466 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
5467 if (ipr_is_naca_model(res))
5468 scsi_cmd->result |= (DID_ABORT << 16);
5469 else
5470 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5471 break;
5472 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5473 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5474 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5475 break;
5476 case IPR_IOASC_HW_SEL_TIMEOUT:
5477 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
5478 if (!ipr_is_naca_model(res))
5479 res->needs_sync_complete = 1;
1da177e4
LT
5480 break;
5481 case IPR_IOASC_SYNC_REQUIRED:
5482 if (!res->in_erp)
5483 res->needs_sync_complete = 1;
5484 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5485 break;
5486 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5487 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5488 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5489 break;
5490 case IPR_IOASC_BUS_WAS_RESET:
5491 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5492 /*
5493 * Report the bus reset and ask for a retry. The device
5494 * will give CC/UA the next command.
5495 */
5496 if (!res->resetting_device)
5497 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5498 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
5499 if (!ipr_is_naca_model(res))
5500 res->needs_sync_complete = 1;
1da177e4
LT
5501 break;
5502 case IPR_IOASC_HW_DEV_BUS_STATUS:
5503 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5504 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
5505 if (!ipr_get_autosense(ipr_cmd)) {
5506 if (!ipr_is_naca_model(res)) {
5507 ipr_erp_cancel_all(ipr_cmd);
5508 return;
5509 }
5510 }
1da177e4 5511 }
ee0a90fa
BK
5512 if (!ipr_is_naca_model(res))
5513 res->needs_sync_complete = 1;
1da177e4
LT
5514 break;
5515 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5516 break;
5517 default:
5b7304fb
BK
5518 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5519 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5520 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5521 res->needs_sync_complete = 1;
5522 break;
5523 }
5524
63015bc9 5525 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5526 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5527 scsi_cmd->scsi_done(scsi_cmd);
5528}
5529
5530/**
5531 * ipr_scsi_done - mid-layer done function
5532 * @ipr_cmd: ipr command struct
5533 *
5534 * This function is invoked by the interrupt handler for
5535 * ops generated by the SCSI mid-layer
5536 *
5537 * Return value:
5538 * none
5539 **/
5540static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5541{
5542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5543 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5544 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5545
63015bc9 5546 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
1da177e4
LT
5547
5548 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
63015bc9 5549 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5550 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5551 scsi_cmd->scsi_done(scsi_cmd);
5552 } else
5553 ipr_erp_start(ioa_cfg, ipr_cmd);
5554}
5555
1da177e4
LT
5556/**
5557 * ipr_queuecommand - Queue a mid-layer request
5558 * @scsi_cmd: scsi command struct
5559 * @done: done function
5560 *
5561 * This function queues a request generated by the mid-layer.
5562 *
5563 * Return value:
5564 * 0 on success
5565 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5566 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5567 **/
5568static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5569 void (*done) (struct scsi_cmnd *))
5570{
5571 struct ipr_ioa_cfg *ioa_cfg;
5572 struct ipr_resource_entry *res;
5573 struct ipr_ioarcb *ioarcb;
5574 struct ipr_cmnd *ipr_cmd;
5575 int rc = 0;
5576
5577 scsi_cmd->scsi_done = done;
5578 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5579 res = scsi_cmd->device->hostdata;
5580 scsi_cmd->result = (DID_OK << 16);
5581
5582 /*
5583 * We are currently blocking all devices due to a host reset
5584 * We have told the host to stop giving us new requests, but
5585 * ERP ops don't count. FIXME
5586 */
5587 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5588 return SCSI_MLQUEUE_HOST_BUSY;
5589
5590 /*
5591 * FIXME - Create scsi_set_host_offline interface
5592 * and the ioa_is_dead check can be removed
5593 */
5594 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5595 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5596 scsi_cmd->result = (DID_NO_CONNECT << 16);
5597 scsi_cmd->scsi_done(scsi_cmd);
5598 return 0;
5599 }
5600
35a39691
BK
5601 if (ipr_is_gata(res) && res->sata_port)
5602 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5603
1da177e4
LT
5604 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5605 ioarcb = &ipr_cmd->ioarcb;
5606 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5607
5608 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5609 ipr_cmd->scsi_cmd = scsi_cmd;
3e7ebdfa 5610 ioarcb->res_handle = res->res_handle;
1da177e4 5611 ipr_cmd->done = ipr_scsi_done;
3e7ebdfa 5612 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
1da177e4
LT
5613
5614 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5615 if (scsi_cmd->underflow == 0)
5616 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5617
5618 if (res->needs_sync_complete) {
5619 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5620 res->needs_sync_complete = 0;
5621 }
5622
5623 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5624 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5625 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5626 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5627 }
5628
5629 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5630 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5631 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5632
a32c055f
WB
5633 if (likely(rc == 0)) {
5634 if (ioa_cfg->sis64)
5635 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5636 else
5637 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5638 }
1da177e4
LT
5639
5640 if (likely(rc == 0)) {
5641 mb();
a32c055f 5642 ipr_send_command(ipr_cmd);
1da177e4
LT
5643 } else {
5644 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5645 return SCSI_MLQUEUE_HOST_BUSY;
5646 }
5647
5648 return 0;
5649}
5650
35a39691
BK
5651/**
5652 * ipr_ioctl - IOCTL handler
5653 * @sdev: scsi device struct
5654 * @cmd: IOCTL cmd
5655 * @arg: IOCTL arg
5656 *
5657 * Return value:
5658 * 0 on success / other on failure
5659 **/
bd705f2d 5660static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
5661{
5662 struct ipr_resource_entry *res;
5663
5664 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
5665 if (res && ipr_is_gata(res)) {
5666 if (cmd == HDIO_GET_IDENTITY)
5667 return -ENOTTY;
94be9a58 5668 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 5669 }
35a39691
BK
5670
5671 return -EINVAL;
5672}
5673
1da177e4
LT
5674/**
5675 * ipr_info - Get information about the card/driver
5676 * @scsi_host: scsi host struct
5677 *
5678 * Return value:
5679 * pointer to buffer with description string
5680 **/
5681static const char * ipr_ioa_info(struct Scsi_Host *host)
5682{
5683 static char buffer[512];
5684 struct ipr_ioa_cfg *ioa_cfg;
5685 unsigned long lock_flags = 0;
5686
5687 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5688
5689 spin_lock_irqsave(host->host_lock, lock_flags);
5690 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5691 spin_unlock_irqrestore(host->host_lock, lock_flags);
5692
5693 return buffer;
5694}
5695
5696static struct scsi_host_template driver_template = {
5697 .module = THIS_MODULE,
5698 .name = "IPR",
5699 .info = ipr_ioa_info,
35a39691 5700 .ioctl = ipr_ioctl,
1da177e4
LT
5701 .queuecommand = ipr_queuecommand,
5702 .eh_abort_handler = ipr_eh_abort,
5703 .eh_device_reset_handler = ipr_eh_dev_reset,
5704 .eh_host_reset_handler = ipr_eh_host_reset,
5705 .slave_alloc = ipr_slave_alloc,
5706 .slave_configure = ipr_slave_configure,
5707 .slave_destroy = ipr_slave_destroy,
35a39691
BK
5708 .target_alloc = ipr_target_alloc,
5709 .target_destroy = ipr_target_destroy,
1da177e4
LT
5710 .change_queue_depth = ipr_change_queue_depth,
5711 .change_queue_type = ipr_change_queue_type,
5712 .bios_param = ipr_biosparam,
5713 .can_queue = IPR_MAX_COMMANDS,
5714 .this_id = -1,
5715 .sg_tablesize = IPR_MAX_SGLIST,
5716 .max_sectors = IPR_IOA_MAX_SECTORS,
5717 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5718 .use_clustering = ENABLE_CLUSTERING,
5719 .shost_attrs = ipr_ioa_attrs,
5720 .sdev_attrs = ipr_dev_attrs,
5721 .proc_name = IPR_NAME
5722};
5723
35a39691
BK
5724/**
5725 * ipr_ata_phy_reset - libata phy_reset handler
5726 * @ap: ata port to reset
5727 *
5728 **/
5729static void ipr_ata_phy_reset(struct ata_port *ap)
5730{
5731 unsigned long flags;
5732 struct ipr_sata_port *sata_port = ap->private_data;
5733 struct ipr_resource_entry *res = sata_port->res;
5734 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5735 int rc;
5736
5737 ENTER;
5738 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5739 while(ioa_cfg->in_reset_reload) {
5740 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5741 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5742 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5743 }
5744
5745 if (!ioa_cfg->allow_cmds)
5746 goto out_unlock;
5747
5748 rc = ipr_device_reset(ioa_cfg, res);
5749
5750 if (rc) {
ac8869d5 5751 ata_port_disable(ap);
35a39691
BK
5752 goto out_unlock;
5753 }
5754
3e7ebdfa
WB
5755 ap->link.device[0].class = res->ata_class;
5756 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
ac8869d5 5757 ata_port_disable(ap);
35a39691
BK
5758
5759out_unlock:
5760 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5761 LEAVE;
5762}
5763
5764/**
5765 * ipr_ata_post_internal - Cleanup after an internal command
5766 * @qc: ATA queued command
5767 *
5768 * Return value:
5769 * none
5770 **/
5771static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5772{
5773 struct ipr_sata_port *sata_port = qc->ap->private_data;
5774 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5775 struct ipr_cmnd *ipr_cmd;
5776 unsigned long flags;
5777
5778 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
5779 while(ioa_cfg->in_reset_reload) {
5780 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5781 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5782 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5783 }
5784
35a39691
BK
5785 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5786 if (ipr_cmd->qc == qc) {
5787 ipr_device_reset(ioa_cfg, sata_port->res);
5788 break;
5789 }
5790 }
5791 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5792}
5793
35a39691
BK
5794/**
5795 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5796 * @regs: destination
5797 * @tf: source ATA taskfile
5798 *
5799 * Return value:
5800 * none
5801 **/
5802static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5803 struct ata_taskfile *tf)
5804{
5805 regs->feature = tf->feature;
5806 regs->nsect = tf->nsect;
5807 regs->lbal = tf->lbal;
5808 regs->lbam = tf->lbam;
5809 regs->lbah = tf->lbah;
5810 regs->device = tf->device;
5811 regs->command = tf->command;
5812 regs->hob_feature = tf->hob_feature;
5813 regs->hob_nsect = tf->hob_nsect;
5814 regs->hob_lbal = tf->hob_lbal;
5815 regs->hob_lbam = tf->hob_lbam;
5816 regs->hob_lbah = tf->hob_lbah;
5817 regs->ctl = tf->ctl;
5818}
5819
5820/**
5821 * ipr_sata_done - done function for SATA commands
5822 * @ipr_cmd: ipr command struct
5823 *
5824 * This function is invoked by the interrupt handler for
5825 * ops generated by the SCSI mid-layer to SATA devices
5826 *
5827 * Return value:
5828 * none
5829 **/
5830static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5831{
5832 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5833 struct ata_queued_cmd *qc = ipr_cmd->qc;
5834 struct ipr_sata_port *sata_port = qc->ap->private_data;
5835 struct ipr_resource_entry *res = sata_port->res;
5836 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5837
5838 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5839 sizeof(struct ipr_ioasa_gata));
5840 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5841
5842 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 5843 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
5844
5845 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5846 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5847 else
5848 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5849 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5850 ata_qc_complete(qc);
5851}
5852
a32c055f
WB
5853/**
5854 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5855 * @ipr_cmd: ipr command struct
5856 * @qc: ATA queued command
5857 *
5858 **/
5859static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5860 struct ata_queued_cmd *qc)
5861{
5862 u32 ioadl_flags = 0;
5863 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5864 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5865 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5866 int len = qc->nbytes;
5867 struct scatterlist *sg;
5868 unsigned int si;
5869 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5870
5871 if (len == 0)
5872 return;
5873
5874 if (qc->dma_dir == DMA_TO_DEVICE) {
5875 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5876 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5877 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5878 ioadl_flags = IPR_IOADL_FLAGS_READ;
5879
5880 ioarcb->data_transfer_length = cpu_to_be32(len);
5881 ioarcb->ioadl_len =
5882 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5883 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5884 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5885
5886 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5887 ioadl64->flags = cpu_to_be32(ioadl_flags);
5888 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5889 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5890
5891 last_ioadl64 = ioadl64;
5892 ioadl64++;
5893 }
5894
5895 if (likely(last_ioadl64))
5896 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5897}
5898
35a39691
BK
5899/**
5900 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5901 * @ipr_cmd: ipr command struct
5902 * @qc: ATA queued command
5903 *
5904 **/
5905static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5906 struct ata_queued_cmd *qc)
5907{
5908 u32 ioadl_flags = 0;
5909 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5910 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 5911 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 5912 int len = qc->nbytes;
35a39691 5913 struct scatterlist *sg;
ff2aeb1e 5914 unsigned int si;
35a39691
BK
5915
5916 if (len == 0)
5917 return;
5918
5919 if (qc->dma_dir == DMA_TO_DEVICE) {
5920 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5921 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5922 ioarcb->data_transfer_length = cpu_to_be32(len);
5923 ioarcb->ioadl_len =
35a39691
BK
5924 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5925 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5926 ioadl_flags = IPR_IOADL_FLAGS_READ;
5927 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5928 ioarcb->read_ioadl_len =
5929 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5930 }
5931
ff2aeb1e 5932 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
5933 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5934 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
5935
5936 last_ioadl = ioadl;
5937 ioadl++;
35a39691 5938 }
3be6cbd7
JG
5939
5940 if (likely(last_ioadl))
5941 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
5942}
5943
5944/**
5945 * ipr_qc_issue - Issue a SATA qc to a device
5946 * @qc: queued command
5947 *
5948 * Return value:
5949 * 0 if success
5950 **/
5951static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5952{
5953 struct ata_port *ap = qc->ap;
5954 struct ipr_sata_port *sata_port = ap->private_data;
5955 struct ipr_resource_entry *res = sata_port->res;
5956 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5957 struct ipr_cmnd *ipr_cmd;
5958 struct ipr_ioarcb *ioarcb;
5959 struct ipr_ioarcb_ata_regs *regs;
5960
5961 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 5962 return AC_ERR_SYSTEM;
35a39691
BK
5963
5964 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5965 ioarcb = &ipr_cmd->ioarcb;
35a39691 5966
a32c055f
WB
5967 if (ioa_cfg->sis64) {
5968 regs = &ipr_cmd->i.ata_ioadl.regs;
5969 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5970 } else
5971 regs = &ioarcb->u.add_data.u.regs;
5972
5973 memset(regs, 0, sizeof(*regs));
5974 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691
BK
5975
5976 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5977 ipr_cmd->qc = qc;
5978 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 5979 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
5980 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5981 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5982 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 5983 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 5984
a32c055f
WB
5985 if (ioa_cfg->sis64)
5986 ipr_build_ata_ioadl64(ipr_cmd, qc);
5987 else
5988 ipr_build_ata_ioadl(ipr_cmd, qc);
5989
35a39691
BK
5990 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5991 ipr_copy_sata_tf(regs, &qc->tf);
5992 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 5993 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
5994
5995 switch (qc->tf.protocol) {
5996 case ATA_PROT_NODATA:
5997 case ATA_PROT_PIO:
5998 break;
5999
6000 case ATA_PROT_DMA:
6001 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6002 break;
6003
0dc36888
TH
6004 case ATAPI_PROT_PIO:
6005 case ATAPI_PROT_NODATA:
35a39691
BK
6006 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6007 break;
6008
0dc36888 6009 case ATAPI_PROT_DMA:
35a39691
BK
6010 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6011 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6012 break;
6013
6014 default:
6015 WARN_ON(1);
0feeed82 6016 return AC_ERR_INVALID;
35a39691
BK
6017 }
6018
6019 mb();
a32c055f
WB
6020
6021 ipr_send_command(ipr_cmd);
6022
35a39691
BK
6023 return 0;
6024}
6025
4c9bf4e7
TH
6026/**
6027 * ipr_qc_fill_rtf - Read result TF
6028 * @qc: ATA queued command
6029 *
6030 * Return value:
6031 * true
6032 **/
6033static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6034{
6035 struct ipr_sata_port *sata_port = qc->ap->private_data;
6036 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6037 struct ata_taskfile *tf = &qc->result_tf;
6038
6039 tf->feature = g->error;
6040 tf->nsect = g->nsect;
6041 tf->lbal = g->lbal;
6042 tf->lbam = g->lbam;
6043 tf->lbah = g->lbah;
6044 tf->device = g->device;
6045 tf->command = g->status;
6046 tf->hob_nsect = g->hob_nsect;
6047 tf->hob_lbal = g->hob_lbal;
6048 tf->hob_lbam = g->hob_lbam;
6049 tf->hob_lbah = g->hob_lbah;
6050 tf->ctl = g->alt_status;
6051
6052 return true;
6053}
6054
35a39691 6055static struct ata_port_operations ipr_sata_ops = {
35a39691 6056 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6057 .hardreset = ipr_sata_reset,
35a39691 6058 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
6059 .qc_prep = ata_noop_qc_prep,
6060 .qc_issue = ipr_qc_issue,
4c9bf4e7 6061 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6062 .port_start = ata_sas_port_start,
6063 .port_stop = ata_sas_port_stop
6064};
6065
6066static struct ata_port_info sata_port_info = {
6067 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6068 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6069 .pio_mask = 0x10, /* pio4 */
6070 .mwdma_mask = 0x07,
6071 .udma_mask = 0x7f, /* udma0-6 */
6072 .port_ops = &ipr_sata_ops
6073};
6074
1da177e4
LT
6075#ifdef CONFIG_PPC_PSERIES
6076static const u16 ipr_blocked_processors[] = {
6077 PV_NORTHSTAR,
6078 PV_PULSAR,
6079 PV_POWER4,
6080 PV_ICESTAR,
6081 PV_SSTAR,
6082 PV_POWER4p,
6083 PV_630,
6084 PV_630p
6085};
6086
6087/**
6088 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6089 * @ioa_cfg: ioa cfg struct
6090 *
6091 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6092 * certain pSeries hardware. This function determines if the given
6093 * adapter is in one of these confgurations or not.
6094 *
6095 * Return value:
6096 * 1 if adapter is not supported / 0 if adapter is supported
6097 **/
6098static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6099{
1da177e4
LT
6100 int i;
6101
44c10138
AK
6102 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6103 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6104 if (__is_processor(ipr_blocked_processors[i]))
6105 return 1;
1da177e4
LT
6106 }
6107 }
6108 return 0;
6109}
6110#else
6111#define ipr_invalid_adapter(ioa_cfg) 0
6112#endif
6113
6114/**
6115 * ipr_ioa_bringdown_done - IOA bring down completion.
6116 * @ipr_cmd: ipr command struct
6117 *
6118 * This function processes the completion of an adapter bring down.
6119 * It wakes any reset sleepers.
6120 *
6121 * Return value:
6122 * IPR_RC_JOB_RETURN
6123 **/
6124static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6125{
6126 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6127
6128 ENTER;
6129 ioa_cfg->in_reset_reload = 0;
6130 ioa_cfg->reset_retries = 0;
6131 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6132 wake_up_all(&ioa_cfg->reset_wait_q);
6133
6134 spin_unlock_irq(ioa_cfg->host->host_lock);
6135 scsi_unblock_requests(ioa_cfg->host);
6136 spin_lock_irq(ioa_cfg->host->host_lock);
6137 LEAVE;
6138
6139 return IPR_RC_JOB_RETURN;
6140}
6141
6142/**
6143 * ipr_ioa_reset_done - IOA reset completion.
6144 * @ipr_cmd: ipr command struct
6145 *
6146 * This function processes the completion of an adapter reset.
6147 * It schedules any necessary mid-layer add/removes and
6148 * wakes any reset sleepers.
6149 *
6150 * Return value:
6151 * IPR_RC_JOB_RETURN
6152 **/
6153static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6154{
6155 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6156 struct ipr_resource_entry *res;
6157 struct ipr_hostrcb *hostrcb, *temp;
6158 int i = 0;
6159
6160 ENTER;
6161 ioa_cfg->in_reset_reload = 0;
6162 ioa_cfg->allow_cmds = 1;
6163 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6164 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6165
6166 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6167 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6168 ipr_trace;
6169 break;
6170 }
6171 }
6172 schedule_work(&ioa_cfg->work_q);
6173
6174 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6175 list_del(&hostrcb->queue);
6176 if (i++ < IPR_NUM_LOG_HCAMS)
6177 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6178 else
6179 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6180 }
6181
6bb04170 6182 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6183 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6184
6185 ioa_cfg->reset_retries = 0;
6186 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6187 wake_up_all(&ioa_cfg->reset_wait_q);
6188
30237853 6189 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6190 scsi_unblock_requests(ioa_cfg->host);
30237853 6191 spin_lock(ioa_cfg->host->host_lock);
1da177e4
LT
6192
6193 if (!ioa_cfg->allow_cmds)
6194 scsi_block_requests(ioa_cfg->host);
6195
6196 LEAVE;
6197 return IPR_RC_JOB_RETURN;
6198}
6199
6200/**
6201 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6202 * @supported_dev: supported device struct
6203 * @vpids: vendor product id struct
6204 *
6205 * Return value:
6206 * none
6207 **/
6208static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6209 struct ipr_std_inq_vpids *vpids)
6210{
6211 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6212 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6213 supported_dev->num_records = 1;
6214 supported_dev->data_length =
6215 cpu_to_be16(sizeof(struct ipr_supported_device));
6216 supported_dev->reserved = 0;
6217}
6218
6219/**
6220 * ipr_set_supported_devs - Send Set Supported Devices for a device
6221 * @ipr_cmd: ipr command struct
6222 *
a32c055f 6223 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6224 *
6225 * Return value:
6226 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6227 **/
6228static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6229{
6230 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6231 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6232 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6233 struct ipr_resource_entry *res = ipr_cmd->u.res;
6234
6235 ipr_cmd->job_step = ipr_ioa_reset_done;
6236
6237 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6238 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6239 continue;
6240
6241 ipr_cmd->u.res = res;
3e7ebdfa 6242 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6243
6244 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6245 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6246 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6247
6248 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6249 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6250 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6251 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6252
a32c055f
WB
6253 ipr_init_ioadl(ipr_cmd,
6254 ioa_cfg->vpd_cbs_dma +
6255 offsetof(struct ipr_misc_cbs, supp_dev),
6256 sizeof(struct ipr_supported_device),
6257 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6258
6259 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6260 IPR_SET_SUP_DEVICE_TIMEOUT);
6261
3e7ebdfa
WB
6262 if (!ioa_cfg->sis64)
6263 ipr_cmd->job_step = ipr_set_supported_devs;
1da177e4
LT
6264 return IPR_RC_JOB_RETURN;
6265 }
6266
6267 return IPR_RC_JOB_CONTINUE;
6268}
6269
6270/**
6271 * ipr_get_mode_page - Locate specified mode page
6272 * @mode_pages: mode page buffer
6273 * @page_code: page code to find
6274 * @len: minimum required length for mode page
6275 *
6276 * Return value:
6277 * pointer to mode page / NULL on failure
6278 **/
6279static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6280 u32 page_code, u32 len)
6281{
6282 struct ipr_mode_page_hdr *mode_hdr;
6283 u32 page_length;
6284 u32 length;
6285
6286 if (!mode_pages || (mode_pages->hdr.length == 0))
6287 return NULL;
6288
6289 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6290 mode_hdr = (struct ipr_mode_page_hdr *)
6291 (mode_pages->data + mode_pages->hdr.block_desc_len);
6292
6293 while (length) {
6294 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6295 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6296 return mode_hdr;
6297 break;
6298 } else {
6299 page_length = (sizeof(struct ipr_mode_page_hdr) +
6300 mode_hdr->page_length);
6301 length -= page_length;
6302 mode_hdr = (struct ipr_mode_page_hdr *)
6303 ((unsigned long)mode_hdr + page_length);
6304 }
6305 }
6306 return NULL;
6307}
6308
6309/**
6310 * ipr_check_term_power - Check for term power errors
6311 * @ioa_cfg: ioa config struct
6312 * @mode_pages: IOAFP mode pages buffer
6313 *
6314 * Check the IOAFP's mode page 28 for term power errors
6315 *
6316 * Return value:
6317 * nothing
6318 **/
6319static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6320 struct ipr_mode_pages *mode_pages)
6321{
6322 int i;
6323 int entry_length;
6324 struct ipr_dev_bus_entry *bus;
6325 struct ipr_mode_page28 *mode_page;
6326
6327 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6328 sizeof(struct ipr_mode_page28));
6329
6330 entry_length = mode_page->entry_length;
6331
6332 bus = mode_page->bus;
6333
6334 for (i = 0; i < mode_page->num_entries; i++) {
6335 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6336 dev_err(&ioa_cfg->pdev->dev,
6337 "Term power is absent on scsi bus %d\n",
6338 bus->res_addr.bus);
6339 }
6340
6341 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6342 }
6343}
6344
6345/**
6346 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6347 * @ioa_cfg: ioa config struct
6348 *
6349 * Looks through the config table checking for SES devices. If
6350 * the SES device is in the SES table indicating a maximum SCSI
6351 * bus speed, the speed is limited for the bus.
6352 *
6353 * Return value:
6354 * none
6355 **/
6356static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6357{
6358 u32 max_xfer_rate;
6359 int i;
6360
6361 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6362 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6363 ioa_cfg->bus_attr[i].bus_width);
6364
6365 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6366 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6367 }
6368}
6369
6370/**
6371 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6372 * @ioa_cfg: ioa config struct
6373 * @mode_pages: mode page 28 buffer
6374 *
6375 * Updates mode page 28 based on driver configuration
6376 *
6377 * Return value:
6378 * none
6379 **/
6380static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6381 struct ipr_mode_pages *mode_pages)
6382{
6383 int i, entry_length;
6384 struct ipr_dev_bus_entry *bus;
6385 struct ipr_bus_attributes *bus_attr;
6386 struct ipr_mode_page28 *mode_page;
6387
6388 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6389 sizeof(struct ipr_mode_page28));
6390
6391 entry_length = mode_page->entry_length;
6392
6393 /* Loop for each device bus entry */
6394 for (i = 0, bus = mode_page->bus;
6395 i < mode_page->num_entries;
6396 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6397 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6398 dev_err(&ioa_cfg->pdev->dev,
6399 "Invalid resource address reported: 0x%08X\n",
6400 IPR_GET_PHYS_LOC(bus->res_addr));
6401 continue;
6402 }
6403
6404 bus_attr = &ioa_cfg->bus_attr[i];
6405 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6406 bus->bus_width = bus_attr->bus_width;
6407 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6408 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6409 if (bus_attr->qas_enabled)
6410 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6411 else
6412 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6413 }
6414}
6415
6416/**
6417 * ipr_build_mode_select - Build a mode select command
6418 * @ipr_cmd: ipr command struct
6419 * @res_handle: resource handle to send command to
6420 * @parm: Byte 2 of Mode Sense command
6421 * @dma_addr: DMA buffer address
6422 * @xfer_len: data transfer length
6423 *
6424 * Return value:
6425 * none
6426 **/
6427static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6428 __be32 res_handle, u8 parm,
6429 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6430{
1da177e4
LT
6431 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6432
6433 ioarcb->res_handle = res_handle;
6434 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6435 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6436 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6437 ioarcb->cmd_pkt.cdb[1] = parm;
6438 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6439
a32c055f 6440 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6441}
6442
6443/**
6444 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6445 * @ipr_cmd: ipr command struct
6446 *
6447 * This function sets up the SCSI bus attributes and sends
6448 * a Mode Select for Page 28 to activate them.
6449 *
6450 * Return value:
6451 * IPR_RC_JOB_RETURN
6452 **/
6453static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6454{
6455 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6456 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6457 int length;
6458
6459 ENTER;
4733804c
BK
6460 ipr_scsi_bus_speed_limit(ioa_cfg);
6461 ipr_check_term_power(ioa_cfg, mode_pages);
6462 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6463 length = mode_pages->hdr.length + 1;
6464 mode_pages->hdr.length = 0;
1da177e4
LT
6465
6466 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6467 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6468 length);
6469
f72919ec
WB
6470 ipr_cmd->job_step = ipr_set_supported_devs;
6471 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6472 struct ipr_resource_entry, queue);
1da177e4
LT
6473 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6474
6475 LEAVE;
6476 return IPR_RC_JOB_RETURN;
6477}
6478
6479/**
6480 * ipr_build_mode_sense - Builds a mode sense command
6481 * @ipr_cmd: ipr command struct
6482 * @res: resource entry struct
6483 * @parm: Byte 2 of mode sense command
6484 * @dma_addr: DMA address of mode sense buffer
6485 * @xfer_len: Size of DMA buffer
6486 *
6487 * Return value:
6488 * none
6489 **/
6490static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6491 __be32 res_handle,
a32c055f 6492 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6493{
1da177e4
LT
6494 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6495
6496 ioarcb->res_handle = res_handle;
6497 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6498 ioarcb->cmd_pkt.cdb[2] = parm;
6499 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6500 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6501
a32c055f 6502 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6503}
6504
dfed823e
BK
6505/**
6506 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6507 * @ipr_cmd: ipr command struct
6508 *
6509 * This function handles the failure of an IOA bringup command.
6510 *
6511 * Return value:
6512 * IPR_RC_JOB_RETURN
6513 **/
6514static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6515{
6516 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6517 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6518
6519 dev_err(&ioa_cfg->pdev->dev,
6520 "0x%02X failed with IOASC: 0x%08X\n",
6521 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6522
6523 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6524 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6525 return IPR_RC_JOB_RETURN;
6526}
6527
6528/**
6529 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6530 * @ipr_cmd: ipr command struct
6531 *
6532 * This function handles the failure of a Mode Sense to the IOAFP.
6533 * Some adapters do not handle all mode pages.
6534 *
6535 * Return value:
6536 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6537 **/
6538static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6539{
f72919ec 6540 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
dfed823e
BK
6541 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6542
6543 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
6544 ipr_cmd->job_step = ipr_set_supported_devs;
6545 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6546 struct ipr_resource_entry, queue);
dfed823e
BK
6547 return IPR_RC_JOB_CONTINUE;
6548 }
6549
6550 return ipr_reset_cmd_failed(ipr_cmd);
6551}
6552
1da177e4
LT
6553/**
6554 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6555 * @ipr_cmd: ipr command struct
6556 *
6557 * This function send a Page 28 mode sense to the IOA to
6558 * retrieve SCSI bus attributes.
6559 *
6560 * Return value:
6561 * IPR_RC_JOB_RETURN
6562 **/
6563static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6564{
6565 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6566
6567 ENTER;
6568 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6569 0x28, ioa_cfg->vpd_cbs_dma +
6570 offsetof(struct ipr_misc_cbs, mode_pages),
6571 sizeof(struct ipr_mode_pages));
6572
6573 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 6574 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
6575
6576 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6577
6578 LEAVE;
6579 return IPR_RC_JOB_RETURN;
6580}
6581
ac09c349
BK
6582/**
6583 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6584 * @ipr_cmd: ipr command struct
6585 *
6586 * This function enables dual IOA RAID support if possible.
6587 *
6588 * Return value:
6589 * IPR_RC_JOB_RETURN
6590 **/
6591static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6592{
6593 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6594 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6595 struct ipr_mode_page24 *mode_page;
6596 int length;
6597
6598 ENTER;
6599 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6600 sizeof(struct ipr_mode_page24));
6601
6602 if (mode_page)
6603 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6604
6605 length = mode_pages->hdr.length + 1;
6606 mode_pages->hdr.length = 0;
6607
6608 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6609 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6610 length);
6611
6612 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6613 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6614
6615 LEAVE;
6616 return IPR_RC_JOB_RETURN;
6617}
6618
6619/**
6620 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6621 * @ipr_cmd: ipr command struct
6622 *
6623 * This function handles the failure of a Mode Sense to the IOAFP.
6624 * Some adapters do not handle all mode pages.
6625 *
6626 * Return value:
6627 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6628 **/
6629static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6630{
6631 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6632
6633 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6634 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6635 return IPR_RC_JOB_CONTINUE;
6636 }
6637
6638 return ipr_reset_cmd_failed(ipr_cmd);
6639}
6640
6641/**
6642 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6643 * @ipr_cmd: ipr command struct
6644 *
6645 * This function send a mode sense to the IOA to retrieve
6646 * the IOA Advanced Function Control mode page.
6647 *
6648 * Return value:
6649 * IPR_RC_JOB_RETURN
6650 **/
6651static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6652{
6653 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6654
6655 ENTER;
6656 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6657 0x24, ioa_cfg->vpd_cbs_dma +
6658 offsetof(struct ipr_misc_cbs, mode_pages),
6659 sizeof(struct ipr_mode_pages));
6660
6661 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6662 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6663
6664 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6665
6666 LEAVE;
6667 return IPR_RC_JOB_RETURN;
6668}
6669
1da177e4
LT
6670/**
6671 * ipr_init_res_table - Initialize the resource table
6672 * @ipr_cmd: ipr command struct
6673 *
6674 * This function looks through the existing resource table, comparing
6675 * it with the config table. This function will take care of old/new
6676 * devices and schedule adding/removing them from the mid-layer
6677 * as appropriate.
6678 *
6679 * Return value:
6680 * IPR_RC_JOB_CONTINUE
6681 **/
6682static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6683{
6684 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6685 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
6686 struct ipr_config_table_entry_wrapper cfgtew;
6687 int entries, found, flag, i;
1da177e4
LT
6688 LIST_HEAD(old_res);
6689
6690 ENTER;
3e7ebdfa
WB
6691 if (ioa_cfg->sis64)
6692 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6693 else
6694 flag = ioa_cfg->u.cfg_table->hdr.flags;
6695
6696 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
6697 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6698
6699 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6700 list_move_tail(&res->queue, &old_res);
6701
3e7ebdfa
WB
6702 if (ioa_cfg->sis64)
6703 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6704 else
6705 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6706
6707 for (i = 0; i < entries; i++) {
6708 if (ioa_cfg->sis64)
6709 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6710 else
6711 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
6712 found = 0;
6713
6714 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 6715 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
6716 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6717 found = 1;
6718 break;
6719 }
6720 }
6721
6722 if (!found) {
6723 if (list_empty(&ioa_cfg->free_res_q)) {
6724 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6725 break;
6726 }
6727
6728 found = 1;
6729 res = list_entry(ioa_cfg->free_res_q.next,
6730 struct ipr_resource_entry, queue);
6731 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 6732 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
6733 res->add_to_ml = 1;
6734 }
6735
6736 if (found)
3e7ebdfa 6737 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
6738 }
6739
6740 list_for_each_entry_safe(res, temp, &old_res, queue) {
6741 if (res->sdev) {
6742 res->del_from_ml = 1;
3e7ebdfa 6743 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 6744 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
6745 }
6746 }
6747
3e7ebdfa
WB
6748 list_for_each_entry_safe(res, temp, &old_res, queue) {
6749 ipr_clear_res_target(res);
6750 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6751 }
6752
ac09c349
BK
6753 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6754 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6755 else
6756 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
6757
6758 LEAVE;
6759 return IPR_RC_JOB_CONTINUE;
6760}
6761
6762/**
6763 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6764 * @ipr_cmd: ipr command struct
6765 *
6766 * This function sends a Query IOA Configuration command
6767 * to the adapter to retrieve the IOA configuration table.
6768 *
6769 * Return value:
6770 * IPR_RC_JOB_RETURN
6771 **/
6772static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6773{
6774 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6775 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 6776 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 6777 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
6778
6779 ENTER;
ac09c349
BK
6780 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6781 ioa_cfg->dual_raid = 1;
1da177e4
LT
6782 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6783 ucode_vpd->major_release, ucode_vpd->card_type,
6784 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6785 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6786 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6787
6788 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
3e7ebdfa
WB
6789 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6790 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 6791
3e7ebdfa 6792 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 6793 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6794
6795 ipr_cmd->job_step = ipr_init_res_table;
6796
6797 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6798
6799 LEAVE;
6800 return IPR_RC_JOB_RETURN;
6801}
6802
6803/**
6804 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6805 * @ipr_cmd: ipr command struct
6806 *
6807 * This utility function sends an inquiry to the adapter.
6808 *
6809 * Return value:
6810 * none
6811 **/
6812static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 6813 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
6814{
6815 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6816
6817 ENTER;
6818 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6819 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6820
6821 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6822 ioarcb->cmd_pkt.cdb[1] = flags;
6823 ioarcb->cmd_pkt.cdb[2] = page;
6824 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6825
a32c055f 6826 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6827
6828 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6829 LEAVE;
6830}
6831
62275040
BK
6832/**
6833 * ipr_inquiry_page_supported - Is the given inquiry page supported
6834 * @page0: inquiry page 0 buffer
6835 * @page: page code.
6836 *
6837 * This function determines if the specified inquiry page is supported.
6838 *
6839 * Return value:
6840 * 1 if page is supported / 0 if not
6841 **/
6842static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6843{
6844 int i;
6845
6846 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6847 if (page0->page[i] == page)
6848 return 1;
6849
6850 return 0;
6851}
6852
ac09c349
BK
6853/**
6854 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6855 * @ipr_cmd: ipr command struct
6856 *
6857 * This function sends a Page 0xD0 inquiry to the adapter
6858 * to retrieve adapter capabilities.
6859 *
6860 * Return value:
6861 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6862 **/
6863static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6864{
6865 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6866 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6867 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6868
6869 ENTER;
6870 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6871 memset(cap, 0, sizeof(*cap));
6872
6873 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6874 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6875 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6876 sizeof(struct ipr_inquiry_cap));
6877 return IPR_RC_JOB_RETURN;
6878 }
6879
6880 LEAVE;
6881 return IPR_RC_JOB_CONTINUE;
6882}
6883
1da177e4
LT
6884/**
6885 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6886 * @ipr_cmd: ipr command struct
6887 *
6888 * This function sends a Page 3 inquiry to the adapter
6889 * to retrieve software VPD information.
6890 *
6891 * Return value:
6892 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6893 **/
6894static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
6895{
6896 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
6897
6898 ENTER;
6899
ac09c349 6900 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
6901
6902 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6903 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6904 sizeof(struct ipr_inquiry_page3));
6905
6906 LEAVE;
6907 return IPR_RC_JOB_RETURN;
6908}
6909
6910/**
6911 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6912 * @ipr_cmd: ipr command struct
6913 *
6914 * This function sends a Page 0 inquiry to the adapter
6915 * to retrieve supported inquiry pages.
6916 *
6917 * Return value:
6918 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6919 **/
6920static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6921{
6922 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6923 char type[5];
6924
6925 ENTER;
6926
6927 /* Grab the type out of the VPD and store it away */
6928 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6929 type[4] = '\0';
6930 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6931
62275040 6932 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 6933
62275040
BK
6934 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6935 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6936 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
6937
6938 LEAVE;
6939 return IPR_RC_JOB_RETURN;
6940}
6941
6942/**
6943 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6944 * @ipr_cmd: ipr command struct
6945 *
6946 * This function sends a standard inquiry to the adapter.
6947 *
6948 * Return value:
6949 * IPR_RC_JOB_RETURN
6950 **/
6951static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6952{
6953 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6954
6955 ENTER;
62275040 6956 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
6957
6958 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6959 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6960 sizeof(struct ipr_ioa_vpd));
6961
6962 LEAVE;
6963 return IPR_RC_JOB_RETURN;
6964}
6965
6966/**
214777ba 6967 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
6968 * @ipr_cmd: ipr command struct
6969 *
6970 * This function send an Identify Host Request Response Queue
6971 * command to establish the HRRQ with the adapter.
6972 *
6973 * Return value:
6974 * IPR_RC_JOB_RETURN
6975 **/
214777ba 6976static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6977{
6978 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6979 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6980
6981 ENTER;
6982 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6983
6984 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6985 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6986
6987 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
214777ba
WB
6988 if (ioa_cfg->sis64)
6989 ioarcb->cmd_pkt.cdb[1] = 0x1;
1da177e4 6990 ioarcb->cmd_pkt.cdb[2] =
214777ba 6991 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
1da177e4 6992 ioarcb->cmd_pkt.cdb[3] =
214777ba 6993 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
1da177e4 6994 ioarcb->cmd_pkt.cdb[4] =
214777ba 6995 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
1da177e4 6996 ioarcb->cmd_pkt.cdb[5] =
214777ba 6997 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
1da177e4
LT
6998 ioarcb->cmd_pkt.cdb[7] =
6999 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7000 ioarcb->cmd_pkt.cdb[8] =
7001 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7002
214777ba
WB
7003 if (ioa_cfg->sis64) {
7004 ioarcb->cmd_pkt.cdb[10] =
7005 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7006 ioarcb->cmd_pkt.cdb[11] =
7007 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7008 ioarcb->cmd_pkt.cdb[12] =
7009 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7010 ioarcb->cmd_pkt.cdb[13] =
7011 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7012 }
7013
1da177e4
LT
7014 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7015
7016 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7017
7018 LEAVE;
7019 return IPR_RC_JOB_RETURN;
7020}
7021
7022/**
7023 * ipr_reset_timer_done - Adapter reset timer function
7024 * @ipr_cmd: ipr command struct
7025 *
7026 * Description: This function is used in adapter reset processing
7027 * for timing events. If the reset_cmd pointer in the IOA
7028 * config struct is not this adapter's we are doing nested
7029 * resets and fail_all_ops will take care of freeing the
7030 * command block.
7031 *
7032 * Return value:
7033 * none
7034 **/
7035static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7036{
7037 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7038 unsigned long lock_flags = 0;
7039
7040 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7041
7042 if (ioa_cfg->reset_cmd == ipr_cmd) {
7043 list_del(&ipr_cmd->queue);
7044 ipr_cmd->done(ipr_cmd);
7045 }
7046
7047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7048}
7049
7050/**
7051 * ipr_reset_start_timer - Start a timer for adapter reset job
7052 * @ipr_cmd: ipr command struct
7053 * @timeout: timeout value
7054 *
7055 * Description: This function is used in adapter reset processing
7056 * for timing events. If the reset_cmd pointer in the IOA
7057 * config struct is not this adapter's we are doing nested
7058 * resets and fail_all_ops will take care of freeing the
7059 * command block.
7060 *
7061 * Return value:
7062 * none
7063 **/
7064static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7065 unsigned long timeout)
7066{
7067 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7068 ipr_cmd->done = ipr_reset_ioa_job;
7069
7070 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7071 ipr_cmd->timer.expires = jiffies + timeout;
7072 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7073 add_timer(&ipr_cmd->timer);
7074}
7075
7076/**
7077 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7078 * @ioa_cfg: ioa cfg struct
7079 *
7080 * Return value:
7081 * nothing
7082 **/
7083static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7084{
7085 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7086
7087 /* Initialize Host RRQ pointers */
7088 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7089 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7090 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7091 ioa_cfg->toggle_bit = 1;
7092
7093 /* Zero out config table */
3e7ebdfa 7094 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7095}
7096
214777ba
WB
7097/**
7098 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7099 * @ipr_cmd: ipr command struct
7100 *
7101 * Return value:
7102 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7103 **/
7104static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7105{
7106 unsigned long stage, stage_time;
7107 u32 feedback;
7108 volatile u32 int_reg;
7109 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7110 u64 maskval = 0;
7111
7112 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7113 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7114 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7115
7116 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7117
7118 /* sanity check the stage_time value */
7119 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7120 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7121 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7122 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7123
7124 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7125 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7126 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7127 stage_time = ioa_cfg->transop_timeout;
7128 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7129 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7130 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7131 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7132 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7133 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7134 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7135 return IPR_RC_JOB_CONTINUE;
7136 }
7137
7138 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7139 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7140 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7141 ipr_cmd->done = ipr_reset_ioa_job;
7142 add_timer(&ipr_cmd->timer);
7143 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7144
7145 return IPR_RC_JOB_RETURN;
7146}
7147
1da177e4
LT
7148/**
7149 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7150 * @ipr_cmd: ipr command struct
7151 *
7152 * This function reinitializes some control blocks and
7153 * enables destructive diagnostics on the adapter.
7154 *
7155 * Return value:
7156 * IPR_RC_JOB_RETURN
7157 **/
7158static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7159{
7160 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7161 volatile u32 int_reg;
7162
7163 ENTER;
214777ba 7164 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7165 ipr_init_ioa_mem(ioa_cfg);
7166
7167 ioa_cfg->allow_interrupts = 1;
7168 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7169
7170 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7171 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7172 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7173 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7174 return IPR_RC_JOB_CONTINUE;
7175 }
7176
7177 /* Enable destructive diagnostics on IOA */
214777ba
WB
7178 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7179
7180 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7181 if (ioa_cfg->sis64)
7182 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
1da177e4 7183
1da177e4
LT
7184 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7185
7186 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7187
214777ba
WB
7188 if (ioa_cfg->sis64) {
7189 ipr_cmd->job_step = ipr_reset_next_stage;
7190 return IPR_RC_JOB_CONTINUE;
7191 }
7192
1da177e4 7193 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7194 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7195 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7196 ipr_cmd->done = ipr_reset_ioa_job;
7197 add_timer(&ipr_cmd->timer);
7198 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7199
7200 LEAVE;
7201 return IPR_RC_JOB_RETURN;
7202}
7203
7204/**
7205 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7206 * @ipr_cmd: ipr command struct
7207 *
7208 * This function is invoked when an adapter dump has run out
7209 * of processing time.
7210 *
7211 * Return value:
7212 * IPR_RC_JOB_CONTINUE
7213 **/
7214static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7215{
7216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7217
7218 if (ioa_cfg->sdt_state == GET_DUMP)
7219 ioa_cfg->sdt_state = ABORT_DUMP;
7220
7221 ipr_cmd->job_step = ipr_reset_alert;
7222
7223 return IPR_RC_JOB_CONTINUE;
7224}
7225
7226/**
7227 * ipr_unit_check_no_data - Log a unit check/no data error log
7228 * @ioa_cfg: ioa config struct
7229 *
7230 * Logs an error indicating the adapter unit checked, but for some
7231 * reason, we were unable to fetch the unit check buffer.
7232 *
7233 * Return value:
7234 * nothing
7235 **/
7236static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7237{
7238 ioa_cfg->errors_logged++;
7239 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7240}
7241
7242/**
7243 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7244 * @ioa_cfg: ioa config struct
7245 *
7246 * Fetches the unit check buffer from the adapter by clocking the data
7247 * through the mailbox register.
7248 *
7249 * Return value:
7250 * nothing
7251 **/
7252static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7253{
7254 unsigned long mailbox;
7255 struct ipr_hostrcb *hostrcb;
7256 struct ipr_uc_sdt sdt;
7257 int rc, length;
65f56475 7258 u32 ioasc;
1da177e4
LT
7259
7260 mailbox = readl(ioa_cfg->ioa_mailbox);
7261
dcbad00e 7262 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7263 ipr_unit_check_no_data(ioa_cfg);
7264 return;
7265 }
7266
7267 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7268 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7269 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7270
dcbad00e
WB
7271 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7272 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7273 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7274 ipr_unit_check_no_data(ioa_cfg);
7275 return;
7276 }
7277
7278 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7279 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7280 length = be32_to_cpu(sdt.entry[0].end_token);
7281 else
7282 length = (be32_to_cpu(sdt.entry[0].end_token) -
7283 be32_to_cpu(sdt.entry[0].start_token)) &
7284 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7285
7286 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7287 struct ipr_hostrcb, queue);
7288 list_del(&hostrcb->queue);
7289 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7290
7291 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7292 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7293 (__be32 *)&hostrcb->hcam,
7294 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7295
65f56475 7296 if (!rc) {
1da177e4 7297 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7298 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7299 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7300 ioa_cfg->sdt_state == GET_DUMP)
7301 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7302 } else
1da177e4
LT
7303 ipr_unit_check_no_data(ioa_cfg);
7304
7305 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7306}
7307
7308/**
7309 * ipr_reset_restore_cfg_space - Restore PCI config space.
7310 * @ipr_cmd: ipr command struct
7311 *
7312 * Description: This function restores the saved PCI config space of
7313 * the adapter, fails all outstanding ops back to the callers, and
7314 * fetches the dump/unit check if applicable to this reset.
7315 *
7316 * Return value:
7317 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7318 **/
7319static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7320{
7321 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7322 int rc;
7323
7324 ENTER;
99c965dd 7325 ioa_cfg->pdev->state_saved = true;
1da177e4
LT
7326 rc = pci_restore_state(ioa_cfg->pdev);
7327
7328 if (rc != PCIBIOS_SUCCESSFUL) {
7329 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7330 return IPR_RC_JOB_CONTINUE;
7331 }
7332
7333 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7334 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7335 return IPR_RC_JOB_CONTINUE;
7336 }
7337
7338 ipr_fail_all_ops(ioa_cfg);
7339
7340 if (ioa_cfg->ioa_unit_checked) {
7341 ioa_cfg->ioa_unit_checked = 0;
7342 ipr_get_unit_check_buffer(ioa_cfg);
7343 ipr_cmd->job_step = ipr_reset_alert;
7344 ipr_reset_start_timer(ipr_cmd, 0);
7345 return IPR_RC_JOB_RETURN;
7346 }
7347
7348 if (ioa_cfg->in_ioa_bringdown) {
7349 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7350 } else {
7351 ipr_cmd->job_step = ipr_reset_enable_ioa;
7352
7353 if (GET_DUMP == ioa_cfg->sdt_state) {
7354 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7355 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7356 schedule_work(&ioa_cfg->work_q);
7357 return IPR_RC_JOB_RETURN;
7358 }
7359 }
7360
7361 ENTER;
7362 return IPR_RC_JOB_CONTINUE;
7363}
7364
e619e1a7
BK
7365/**
7366 * ipr_reset_bist_done - BIST has completed on the adapter.
7367 * @ipr_cmd: ipr command struct
7368 *
7369 * Description: Unblock config space and resume the reset process.
7370 *
7371 * Return value:
7372 * IPR_RC_JOB_CONTINUE
7373 **/
7374static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7375{
7376 ENTER;
7377 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7378 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7379 LEAVE;
7380 return IPR_RC_JOB_CONTINUE;
7381}
7382
1da177e4
LT
7383/**
7384 * ipr_reset_start_bist - Run BIST on the adapter.
7385 * @ipr_cmd: ipr command struct
7386 *
7387 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7388 *
7389 * Return value:
7390 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7391 **/
7392static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7393{
7394 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7395 int rc;
7396
7397 ENTER;
b30197d2 7398 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
7399 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7400
7401 if (rc != PCIBIOS_SUCCESSFUL) {
a9aedb09 7402 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
1da177e4
LT
7403 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7404 rc = IPR_RC_JOB_CONTINUE;
7405 } else {
e619e1a7 7406 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
7407 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7408 rc = IPR_RC_JOB_RETURN;
7409 }
7410
7411 LEAVE;
7412 return rc;
7413}
7414
463fc696
BK
7415/**
7416 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7417 * @ipr_cmd: ipr command struct
7418 *
7419 * Description: This clears PCI reset to the adapter and delays two seconds.
7420 *
7421 * Return value:
7422 * IPR_RC_JOB_RETURN
7423 **/
7424static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7425{
7426 ENTER;
7427 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7428 ipr_cmd->job_step = ipr_reset_bist_done;
7429 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7430 LEAVE;
7431 return IPR_RC_JOB_RETURN;
7432}
7433
7434/**
7435 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7436 * @ipr_cmd: ipr command struct
7437 *
7438 * Description: This asserts PCI reset to the adapter.
7439 *
7440 * Return value:
7441 * IPR_RC_JOB_RETURN
7442 **/
7443static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7444{
7445 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7446 struct pci_dev *pdev = ioa_cfg->pdev;
7447
7448 ENTER;
7449 pci_block_user_cfg_access(pdev);
7450 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7451 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7452 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7453 LEAVE;
7454 return IPR_RC_JOB_RETURN;
7455}
7456
1da177e4
LT
7457/**
7458 * ipr_reset_allowed - Query whether or not IOA can be reset
7459 * @ioa_cfg: ioa config struct
7460 *
7461 * Return value:
7462 * 0 if reset not allowed / non-zero if reset is allowed
7463 **/
7464static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7465{
7466 volatile u32 temp_reg;
7467
7468 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7469 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7470}
7471
7472/**
7473 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7474 * @ipr_cmd: ipr command struct
7475 *
7476 * Description: This function waits for adapter permission to run BIST,
7477 * then runs BIST. If the adapter does not give permission after a
7478 * reasonable time, we will reset the adapter anyway. The impact of
7479 * resetting the adapter without warning the adapter is the risk of
7480 * losing the persistent error log on the adapter. If the adapter is
7481 * reset while it is writing to the flash on the adapter, the flash
7482 * segment will have bad ECC and be zeroed.
7483 *
7484 * Return value:
7485 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7486 **/
7487static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7488{
7489 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7490 int rc = IPR_RC_JOB_RETURN;
7491
7492 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7493 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7494 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7495 } else {
463fc696 7496 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7497 rc = IPR_RC_JOB_CONTINUE;
7498 }
7499
7500 return rc;
7501}
7502
7503/**
7504 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7505 * @ipr_cmd: ipr command struct
7506 *
7507 * Description: This function alerts the adapter that it will be reset.
7508 * If memory space is not currently enabled, proceed directly
7509 * to running BIST on the adapter. The timer must always be started
7510 * so we guarantee we do not run BIST from ipr_isr.
7511 *
7512 * Return value:
7513 * IPR_RC_JOB_RETURN
7514 **/
7515static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7516{
7517 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7518 u16 cmd_reg;
7519 int rc;
7520
7521 ENTER;
7522 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7523
7524 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7525 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 7526 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
7527 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7528 } else {
463fc696 7529 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7530 }
7531
7532 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7533 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7534
7535 LEAVE;
7536 return IPR_RC_JOB_RETURN;
7537}
7538
7539/**
7540 * ipr_reset_ucode_download_done - Microcode download completion
7541 * @ipr_cmd: ipr command struct
7542 *
7543 * Description: This function unmaps the microcode download buffer.
7544 *
7545 * Return value:
7546 * IPR_RC_JOB_CONTINUE
7547 **/
7548static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7549{
7550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7551 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7552
7553 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7554 sglist->num_sg, DMA_TO_DEVICE);
7555
7556 ipr_cmd->job_step = ipr_reset_alert;
7557 return IPR_RC_JOB_CONTINUE;
7558}
7559
7560/**
7561 * ipr_reset_ucode_download - Download microcode to the adapter
7562 * @ipr_cmd: ipr command struct
7563 *
7564 * Description: This function checks to see if it there is microcode
7565 * to download to the adapter. If there is, a download is performed.
7566 *
7567 * Return value:
7568 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7569 **/
7570static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7571{
7572 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7573 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7574
7575 ENTER;
7576 ipr_cmd->job_step = ipr_reset_alert;
7577
7578 if (!sglist)
7579 return IPR_RC_JOB_CONTINUE;
7580
7581 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7582 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7583 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7584 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7585 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7586 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7587 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7588
a32c055f
WB
7589 if (ioa_cfg->sis64)
7590 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7591 else
7592 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
7593 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7594
7595 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7596 IPR_WRITE_BUFFER_TIMEOUT);
7597
7598 LEAVE;
7599 return IPR_RC_JOB_RETURN;
7600}
7601
7602/**
7603 * ipr_reset_shutdown_ioa - Shutdown the adapter
7604 * @ipr_cmd: ipr command struct
7605 *
7606 * Description: This function issues an adapter shutdown of the
7607 * specified type to the specified adapter as part of the
7608 * adapter reset job.
7609 *
7610 * Return value:
7611 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7612 **/
7613static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7614{
7615 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7616 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7617 unsigned long timeout;
7618 int rc = IPR_RC_JOB_CONTINUE;
7619
7620 ENTER;
7621 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7622 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7623 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7624 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7625 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7626
ac09c349
BK
7627 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7628 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
7629 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7630 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
7631 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7632 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 7633 else
ac09c349 7634 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
7635
7636 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7637
7638 rc = IPR_RC_JOB_RETURN;
7639 ipr_cmd->job_step = ipr_reset_ucode_download;
7640 } else
7641 ipr_cmd->job_step = ipr_reset_alert;
7642
7643 LEAVE;
7644 return rc;
7645}
7646
7647/**
7648 * ipr_reset_ioa_job - Adapter reset job
7649 * @ipr_cmd: ipr command struct
7650 *
7651 * Description: This function is the job router for the adapter reset job.
7652 *
7653 * Return value:
7654 * none
7655 **/
7656static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7657{
7658 u32 rc, ioasc;
1da177e4
LT
7659 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7660
7661 do {
7662 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7663
7664 if (ioa_cfg->reset_cmd != ipr_cmd) {
7665 /*
7666 * We are doing nested adapter resets and this is
7667 * not the current reset job.
7668 */
7669 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7670 return;
7671 }
7672
7673 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
7674 rc = ipr_cmd->job_step_failed(ipr_cmd);
7675 if (rc == IPR_RC_JOB_RETURN)
7676 return;
1da177e4
LT
7677 }
7678
7679 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 7680 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
7681 rc = ipr_cmd->job_step(ipr_cmd);
7682 } while(rc == IPR_RC_JOB_CONTINUE);
7683}
7684
7685/**
7686 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7687 * @ioa_cfg: ioa config struct
7688 * @job_step: first job step of reset job
7689 * @shutdown_type: shutdown type
7690 *
7691 * Description: This function will initiate the reset of the given adapter
7692 * starting at the selected job step.
7693 * If the caller needs to wait on the completion of the reset,
7694 * the caller must sleep on the reset_wait_q.
7695 *
7696 * Return value:
7697 * none
7698 **/
7699static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7700 int (*job_step) (struct ipr_cmnd *),
7701 enum ipr_shutdown_type shutdown_type)
7702{
7703 struct ipr_cmnd *ipr_cmd;
7704
7705 ioa_cfg->in_reset_reload = 1;
7706 ioa_cfg->allow_cmds = 0;
7707 scsi_block_requests(ioa_cfg->host);
7708
7709 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7710 ioa_cfg->reset_cmd = ipr_cmd;
7711 ipr_cmd->job_step = job_step;
7712 ipr_cmd->u.shutdown_type = shutdown_type;
7713
7714 ipr_reset_ioa_job(ipr_cmd);
7715}
7716
7717/**
7718 * ipr_initiate_ioa_reset - Initiate an adapter reset
7719 * @ioa_cfg: ioa config struct
7720 * @shutdown_type: shutdown type
7721 *
7722 * Description: This function will initiate the reset of the given adapter.
7723 * If the caller needs to wait on the completion of the reset,
7724 * the caller must sleep on the reset_wait_q.
7725 *
7726 * Return value:
7727 * none
7728 **/
7729static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7730 enum ipr_shutdown_type shutdown_type)
7731{
7732 if (ioa_cfg->ioa_is_dead)
7733 return;
7734
7735 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7736 ioa_cfg->sdt_state = ABORT_DUMP;
7737
7738 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7739 dev_err(&ioa_cfg->pdev->dev,
7740 "IOA taken offline - error recovery failed\n");
7741
7742 ioa_cfg->reset_retries = 0;
7743 ioa_cfg->ioa_is_dead = 1;
7744
7745 if (ioa_cfg->in_ioa_bringdown) {
7746 ioa_cfg->reset_cmd = NULL;
7747 ioa_cfg->in_reset_reload = 0;
7748 ipr_fail_all_ops(ioa_cfg);
7749 wake_up_all(&ioa_cfg->reset_wait_q);
7750
7751 spin_unlock_irq(ioa_cfg->host->host_lock);
7752 scsi_unblock_requests(ioa_cfg->host);
7753 spin_lock_irq(ioa_cfg->host->host_lock);
7754 return;
7755 } else {
7756 ioa_cfg->in_ioa_bringdown = 1;
7757 shutdown_type = IPR_SHUTDOWN_NONE;
7758 }
7759 }
7760
7761 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7762 shutdown_type);
7763}
7764
f8a88b19
LV
7765/**
7766 * ipr_reset_freeze - Hold off all I/O activity
7767 * @ipr_cmd: ipr command struct
7768 *
7769 * Description: If the PCI slot is frozen, hold off all I/O
7770 * activity; then, as soon as the slot is available again,
7771 * initiate an adapter reset.
7772 */
7773static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7774{
7775 /* Disallow new interrupts, avoid loop */
7776 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7777 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7778 ipr_cmd->done = ipr_reset_ioa_job;
7779 return IPR_RC_JOB_RETURN;
7780}
7781
7782/**
7783 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7784 * @pdev: PCI device struct
7785 *
7786 * Description: This routine is called to tell us that the PCI bus
7787 * is down. Can't do anything here, except put the device driver
7788 * into a holding pattern, waiting for the PCI bus to come back.
7789 */
7790static void ipr_pci_frozen(struct pci_dev *pdev)
7791{
7792 unsigned long flags = 0;
7793 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7794
7795 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7796 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7798}
7799
7800/**
7801 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7802 * @pdev: PCI device struct
7803 *
7804 * Description: This routine is called by the pci error recovery
7805 * code after the PCI slot has been reset, just before we
7806 * should resume normal operations.
7807 */
7808static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7809{
7810 unsigned long flags = 0;
7811 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7812
7813 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
7814 if (ioa_cfg->needs_warm_reset)
7815 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7816 else
7817 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7818 IPR_SHUTDOWN_NONE);
f8a88b19
LV
7819 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7820 return PCI_ERS_RESULT_RECOVERED;
7821}
7822
7823/**
7824 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7825 * @pdev: PCI device struct
7826 *
7827 * Description: This routine is called when the PCI bus has
7828 * permanently failed.
7829 */
7830static void ipr_pci_perm_failure(struct pci_dev *pdev)
7831{
7832 unsigned long flags = 0;
7833 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7834
7835 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7836 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7837 ioa_cfg->sdt_state = ABORT_DUMP;
7838 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7839 ioa_cfg->in_ioa_bringdown = 1;
6ff63896 7840 ioa_cfg->allow_cmds = 0;
f8a88b19
LV
7841 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7842 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7843}
7844
7845/**
7846 * ipr_pci_error_detected - Called when a PCI error is detected.
7847 * @pdev: PCI device struct
7848 * @state: PCI channel state
7849 *
7850 * Description: Called when a PCI error is detected.
7851 *
7852 * Return value:
7853 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7854 */
7855static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7856 pci_channel_state_t state)
7857{
7858 switch (state) {
7859 case pci_channel_io_frozen:
7860 ipr_pci_frozen(pdev);
7861 return PCI_ERS_RESULT_NEED_RESET;
7862 case pci_channel_io_perm_failure:
7863 ipr_pci_perm_failure(pdev);
7864 return PCI_ERS_RESULT_DISCONNECT;
7865 break;
7866 default:
7867 break;
7868 }
7869 return PCI_ERS_RESULT_NEED_RESET;
7870}
7871
1da177e4
LT
7872/**
7873 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7874 * @ioa_cfg: ioa cfg struct
7875 *
7876 * Description: This is the second phase of adapter intialization
7877 * This function takes care of initilizing the adapter to the point
7878 * where it can accept new commands.
7879
7880 * Return value:
b1c11812 7881 * 0 on success / -EIO on failure
1da177e4
LT
7882 **/
7883static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7884{
7885 int rc = 0;
7886 unsigned long host_lock_flags = 0;
7887
7888 ENTER;
7889 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7890 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
7891 if (ioa_cfg->needs_hard_reset) {
7892 ioa_cfg->needs_hard_reset = 0;
7893 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7894 } else
7895 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7896 IPR_SHUTDOWN_NONE);
1da177e4
LT
7897
7898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7899 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7900 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7901
7902 if (ioa_cfg->ioa_is_dead) {
7903 rc = -EIO;
7904 } else if (ipr_invalid_adapter(ioa_cfg)) {
7905 if (!ipr_testmode)
7906 rc = -EIO;
7907
7908 dev_err(&ioa_cfg->pdev->dev,
7909 "Adapter not supported in this hardware configuration.\n");
7910 }
7911
7912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7913
7914 LEAVE;
7915 return rc;
7916}
7917
7918/**
7919 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7920 * @ioa_cfg: ioa config struct
7921 *
7922 * Return value:
7923 * none
7924 **/
7925static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7926{
7927 int i;
7928
7929 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7930 if (ioa_cfg->ipr_cmnd_list[i])
7931 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7932 ioa_cfg->ipr_cmnd_list[i],
7933 ioa_cfg->ipr_cmnd_list_dma[i]);
7934
7935 ioa_cfg->ipr_cmnd_list[i] = NULL;
7936 }
7937
7938 if (ioa_cfg->ipr_cmd_pool)
7939 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7940
7941 ioa_cfg->ipr_cmd_pool = NULL;
7942}
7943
7944/**
7945 * ipr_free_mem - Frees memory allocated for an adapter
7946 * @ioa_cfg: ioa cfg struct
7947 *
7948 * Return value:
7949 * nothing
7950 **/
7951static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7952{
7953 int i;
7954
7955 kfree(ioa_cfg->res_entries);
7956 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7957 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7958 ipr_free_cmd_blks(ioa_cfg);
7959 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7960 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
3e7ebdfa
WB
7961 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7962 ioa_cfg->u.cfg_table,
1da177e4
LT
7963 ioa_cfg->cfg_table_dma);
7964
7965 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7966 pci_free_consistent(ioa_cfg->pdev,
7967 sizeof(struct ipr_hostrcb),
7968 ioa_cfg->hostrcb[i],
7969 ioa_cfg->hostrcb_dma[i]);
7970 }
7971
7972 ipr_free_dump(ioa_cfg);
1da177e4
LT
7973 kfree(ioa_cfg->trace);
7974}
7975
7976/**
7977 * ipr_free_all_resources - Free all allocated resources for an adapter.
7978 * @ipr_cmd: ipr command struct
7979 *
7980 * This function frees all allocated resources for the
7981 * specified adapter.
7982 *
7983 * Return value:
7984 * none
7985 **/
7986static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7987{
7988 struct pci_dev *pdev = ioa_cfg->pdev;
7989
7990 ENTER;
7991 free_irq(pdev->irq, ioa_cfg);
5a9ef25b 7992 pci_disable_msi(pdev);
1da177e4
LT
7993 iounmap(ioa_cfg->hdw_dma_regs);
7994 pci_release_regions(pdev);
7995 ipr_free_mem(ioa_cfg);
7996 scsi_host_put(ioa_cfg->host);
7997 pci_disable_device(pdev);
7998 LEAVE;
7999}
8000
8001/**
8002 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8003 * @ioa_cfg: ioa config struct
8004 *
8005 * Return value:
8006 * 0 on success / -ENOMEM on allocation failure
8007 **/
8008static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8009{
8010 struct ipr_cmnd *ipr_cmd;
8011 struct ipr_ioarcb *ioarcb;
8012 dma_addr_t dma_addr;
8013 int i;
8014
8015 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
a32c055f 8016 sizeof(struct ipr_cmnd), 16, 0);
1da177e4
LT
8017
8018 if (!ioa_cfg->ipr_cmd_pool)
8019 return -ENOMEM;
8020
8021 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 8022 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8023
8024 if (!ipr_cmd) {
8025 ipr_free_cmd_blks(ioa_cfg);
8026 return -ENOMEM;
8027 }
8028
8029 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8030 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8031 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8032
8033 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8034 ipr_cmd->dma_addr = dma_addr;
8035 if (ioa_cfg->sis64)
8036 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8037 else
8038 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8039
1da177e4 8040 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8041 if (ioa_cfg->sis64) {
8042 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8043 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8044 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8045 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8046 } else {
8047 ioarcb->write_ioadl_addr =
8048 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8049 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8050 ioarcb->ioasa_host_pci_addr =
8051 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8052 }
1da177e4
LT
8053 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8054 ipr_cmd->cmd_index = i;
8055 ipr_cmd->ioa_cfg = ioa_cfg;
8056 ipr_cmd->sense_buffer_dma = dma_addr +
8057 offsetof(struct ipr_cmnd, sense_buffer);
8058
8059 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8060 }
8061
8062 return 0;
8063}
8064
8065/**
8066 * ipr_alloc_mem - Allocate memory for an adapter
8067 * @ioa_cfg: ioa config struct
8068 *
8069 * Return value:
8070 * 0 on success / non-zero for error
8071 **/
8072static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8073{
8074 struct pci_dev *pdev = ioa_cfg->pdev;
8075 int i, rc = -ENOMEM;
8076
8077 ENTER;
0bc42e35 8078 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8079 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8080
8081 if (!ioa_cfg->res_entries)
8082 goto out;
8083
3e7ebdfa
WB
8084 if (ioa_cfg->sis64) {
8085 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8086 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8087 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8088 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8089 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8090 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8091 }
8092
8093 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8094 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8095 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8096 }
1da177e4
LT
8097
8098 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8099 sizeof(struct ipr_misc_cbs),
8100 &ioa_cfg->vpd_cbs_dma);
8101
8102 if (!ioa_cfg->vpd_cbs)
8103 goto out_free_res_entries;
8104
8105 if (ipr_alloc_cmd_blks(ioa_cfg))
8106 goto out_free_vpd_cbs;
8107
8108 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8109 sizeof(u32) * IPR_NUM_CMD_BLKS,
8110 &ioa_cfg->host_rrq_dma);
8111
8112 if (!ioa_cfg->host_rrq)
8113 goto out_ipr_free_cmd_blocks;
8114
3e7ebdfa
WB
8115 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8116 ioa_cfg->cfg_table_size,
8117 &ioa_cfg->cfg_table_dma);
1da177e4 8118
3e7ebdfa 8119 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8120 goto out_free_host_rrq;
8121
8122 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8123 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8124 sizeof(struct ipr_hostrcb),
8125 &ioa_cfg->hostrcb_dma[i]);
8126
8127 if (!ioa_cfg->hostrcb[i])
8128 goto out_free_hostrcb_dma;
8129
8130 ioa_cfg->hostrcb[i]->hostrcb_dma =
8131 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8132 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8133 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8134 }
8135
0bc42e35 8136 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8137 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8138
8139 if (!ioa_cfg->trace)
8140 goto out_free_hostrcb_dma;
8141
1da177e4
LT
8142 rc = 0;
8143out:
8144 LEAVE;
8145 return rc;
8146
8147out_free_hostrcb_dma:
8148 while (i-- > 0) {
8149 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8150 ioa_cfg->hostrcb[i],
8151 ioa_cfg->hostrcb_dma[i]);
8152 }
3e7ebdfa
WB
8153 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8154 ioa_cfg->u.cfg_table,
8155 ioa_cfg->cfg_table_dma);
1da177e4
LT
8156out_free_host_rrq:
8157 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8158 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8159out_ipr_free_cmd_blocks:
8160 ipr_free_cmd_blks(ioa_cfg);
8161out_free_vpd_cbs:
8162 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8163 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8164out_free_res_entries:
8165 kfree(ioa_cfg->res_entries);
8166 goto out;
8167}
8168
8169/**
8170 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8171 * @ioa_cfg: ioa config struct
8172 *
8173 * Return value:
8174 * none
8175 **/
8176static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8177{
8178 int i;
8179
8180 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8181 ioa_cfg->bus_attr[i].bus = i;
8182 ioa_cfg->bus_attr[i].qas_enabled = 0;
8183 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8184 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8185 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8186 else
8187 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8188 }
8189}
8190
8191/**
8192 * ipr_init_ioa_cfg - Initialize IOA config struct
8193 * @ioa_cfg: ioa config struct
8194 * @host: scsi host struct
8195 * @pdev: PCI dev struct
8196 *
8197 * Return value:
8198 * none
8199 **/
8200static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8201 struct Scsi_Host *host, struct pci_dev *pdev)
8202{
8203 const struct ipr_interrupt_offsets *p;
8204 struct ipr_interrupts *t;
8205 void __iomem *base;
8206
8207 ioa_cfg->host = host;
8208 ioa_cfg->pdev = pdev;
8209 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 8210 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
8211 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8212 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8213 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8214 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8215 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8216 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8217 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8218 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8219
8220 INIT_LIST_HEAD(&ioa_cfg->free_q);
8221 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8222 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8223 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8224 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8225 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 8226 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 8227 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 8228 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
8229 ioa_cfg->sdt_state = INACTIVE;
8230
8231 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 8232 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 8233
3e7ebdfa
WB
8234 if (ioa_cfg->sis64) {
8235 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8236 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8237 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8238 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8239 } else {
8240 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8241 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8242 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8243 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8244 }
1da177e4
LT
8245 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8246 host->unique_id = host->host_no;
8247 host->max_cmd_len = IPR_MAX_CDB_LEN;
8248 pci_set_drvdata(pdev, ioa_cfg);
8249
8250 p = &ioa_cfg->chip_cfg->regs;
8251 t = &ioa_cfg->regs;
8252 base = ioa_cfg->hdw_dma_regs;
8253
8254 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8255 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 8256 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 8257 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 8258 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 8259 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 8260 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 8261 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 8262 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
8263 t->ioarrin_reg = base + p->ioarrin_reg;
8264 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 8265 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 8266 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 8267 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 8268 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 8269 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
8270
8271 if (ioa_cfg->sis64) {
214777ba 8272 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
8273 t->dump_addr_reg = base + p->dump_addr_reg;
8274 t->dump_data_reg = base + p->dump_data_reg;
8275 }
1da177e4
LT
8276}
8277
8278/**
1be7bd82 8279 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
8280 * @dev_id: PCI device id struct
8281 *
8282 * Return value:
1be7bd82 8283 * ptr to chip information on success / NULL on failure
1da177e4 8284 **/
1be7bd82
WB
8285static const struct ipr_chip_t * __devinit
8286ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
8287{
8288 int i;
8289
1da177e4
LT
8290 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8291 if (ipr_chip[i].vendor == dev_id->vendor &&
8292 ipr_chip[i].device == dev_id->device)
1be7bd82 8293 return &ipr_chip[i];
1da177e4
LT
8294 return NULL;
8295}
8296
95fecd90
WB
8297/**
8298 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8299 * @pdev: PCI device struct
8300 *
8301 * Description: Simply set the msi_received flag to 1 indicating that
8302 * Message Signaled Interrupts are supported.
8303 *
8304 * Return value:
8305 * 0 on success / non-zero on failure
8306 **/
8307static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8308{
8309 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8310 unsigned long lock_flags = 0;
8311 irqreturn_t rc = IRQ_HANDLED;
8312
8313 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8314
8315 ioa_cfg->msi_received = 1;
8316 wake_up(&ioa_cfg->msi_wait_q);
8317
8318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8319 return rc;
8320}
8321
8322/**
8323 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8324 * @pdev: PCI device struct
8325 *
8326 * Description: The return value from pci_enable_msi() can not always be
8327 * trusted. This routine sets up and initiates a test interrupt to determine
8328 * if the interrupt is received via the ipr_test_intr() service routine.
8329 * If the tests fails, the driver will fall back to LSI.
8330 *
8331 * Return value:
8332 * 0 on success / non-zero on failure
8333 **/
8334static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8335 struct pci_dev *pdev)
8336{
8337 int rc;
8338 volatile u32 int_reg;
8339 unsigned long lock_flags = 0;
8340
8341 ENTER;
8342
8343 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8344 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8345 ioa_cfg->msi_received = 0;
8346 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 8347 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
8348 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8350
8351 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8352 if (rc) {
8353 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8354 return rc;
8355 } else if (ipr_debug)
8356 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8357
214777ba 8358 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
8359 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8360 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8361 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8362
8363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8364 if (!ioa_cfg->msi_received) {
8365 /* MSI test failed */
8366 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8367 rc = -EOPNOTSUPP;
8368 } else if (ipr_debug)
8369 dev_info(&pdev->dev, "MSI test succeeded.\n");
8370
8371 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8372
8373 free_irq(pdev->irq, ioa_cfg);
8374
8375 LEAVE;
8376
8377 return rc;
8378}
8379
1da177e4
LT
8380/**
8381 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8382 * @pdev: PCI device struct
8383 * @dev_id: PCI device id struct
8384 *
8385 * Return value:
8386 * 0 on success / non-zero on failure
8387 **/
8388static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8389 const struct pci_device_id *dev_id)
8390{
8391 struct ipr_ioa_cfg *ioa_cfg;
8392 struct Scsi_Host *host;
8393 unsigned long ipr_regs_pci;
8394 void __iomem *ipr_regs;
a2a65a3e 8395 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 8396 volatile u32 mask, uproc, interrupts;
1da177e4
LT
8397
8398 ENTER;
8399
8400 if ((rc = pci_enable_device(pdev))) {
8401 dev_err(&pdev->dev, "Cannot enable adapter\n");
8402 goto out;
8403 }
8404
8405 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8406
8407 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8408
8409 if (!host) {
8410 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8411 rc = -ENOMEM;
8412 goto out_disable;
8413 }
8414
8415 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8416 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
8417 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8418 sata_port_info.flags, &ipr_sata_ops);
1da177e4 8419
1be7bd82 8420 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 8421
1be7bd82 8422 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
8423 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8424 dev_id->vendor, dev_id->device);
8425 goto out_scsi_host_put;
8426 }
8427
a32c055f
WB
8428 /* set SIS 32 or SIS 64 */
8429 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82
WB
8430 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8431
5469cb5b
BK
8432 if (ipr_transop_timeout)
8433 ioa_cfg->transop_timeout = ipr_transop_timeout;
8434 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8435 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8436 else
8437 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8438
44c10138 8439 ioa_cfg->revid = pdev->revision;
463fc696 8440
1da177e4
LT
8441 ipr_regs_pci = pci_resource_start(pdev, 0);
8442
8443 rc = pci_request_regions(pdev, IPR_NAME);
8444 if (rc < 0) {
8445 dev_err(&pdev->dev,
8446 "Couldn't register memory range of registers\n");
8447 goto out_scsi_host_put;
8448 }
8449
25729a7f 8450 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
8451
8452 if (!ipr_regs) {
8453 dev_err(&pdev->dev,
8454 "Couldn't map memory range of registers\n");
8455 rc = -ENOMEM;
8456 goto out_release_regions;
8457 }
8458
8459 ioa_cfg->hdw_dma_regs = ipr_regs;
8460 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8461 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8462
8463 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8464
8465 pci_set_master(pdev);
8466
a32c055f
WB
8467 if (ioa_cfg->sis64) {
8468 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8469 if (rc < 0) {
8470 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8471 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8472 }
8473
8474 } else
8475 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8476
1da177e4
LT
8477 if (rc < 0) {
8478 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8479 goto cleanup_nomem;
8480 }
8481
8482 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8483 ioa_cfg->chip_cfg->cache_line_size);
8484
8485 if (rc != PCIBIOS_SUCCESSFUL) {
8486 dev_err(&pdev->dev, "Write of cache line size failed\n");
8487 rc = -EIO;
8488 goto cleanup_nomem;
8489 }
8490
95fecd90 8491 /* Enable MSI style interrupts if they are supported. */
1be7bd82 8492 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
95fecd90
WB
8493 rc = ipr_test_msi(ioa_cfg, pdev);
8494 if (rc == -EOPNOTSUPP)
8495 pci_disable_msi(pdev);
8496 else if (rc)
8497 goto out_msi_disable;
8498 else
8499 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8500 } else if (ipr_debug)
8501 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8502
1da177e4
LT
8503 /* Save away PCI config space for use following IOA reset */
8504 rc = pci_save_state(pdev);
8505
8506 if (rc != PCIBIOS_SUCCESSFUL) {
8507 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8508 rc = -EIO;
8509 goto cleanup_nomem;
8510 }
8511
8512 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8513 goto cleanup_nomem;
8514
8515 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8516 goto cleanup_nomem;
8517
3e7ebdfa
WB
8518 if (ioa_cfg->sis64)
8519 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8520 + ((sizeof(struct ipr_config_table_entry64)
8521 * ioa_cfg->max_devs_supported)));
8522 else
8523 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8524 + ((sizeof(struct ipr_config_table_entry)
8525 * ioa_cfg->max_devs_supported)));
8526
1da177e4
LT
8527 rc = ipr_alloc_mem(ioa_cfg);
8528 if (rc < 0) {
8529 dev_err(&pdev->dev,
8530 "Couldn't allocate enough memory for device driver!\n");
8531 goto cleanup_nomem;
8532 }
8533
ce155cce
BK
8534 /*
8535 * If HRRQ updated interrupt is not masked, or reset alert is set,
8536 * the card is in an unknown state and needs a hard reset
8537 */
214777ba
WB
8538 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8539 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8540 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
8541 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8542 ioa_cfg->needs_hard_reset = 1;
473b1e8e
BK
8543 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8544 ioa_cfg->needs_hard_reset = 1;
8545 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8546 ioa_cfg->ioa_unit_checked = 1;
ce155cce 8547
1da177e4 8548 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
95fecd90
WB
8549 rc = request_irq(pdev->irq, ipr_isr,
8550 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8551 IPR_NAME, ioa_cfg);
1da177e4
LT
8552
8553 if (rc) {
8554 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8555 pdev->irq, rc);
8556 goto cleanup_nolog;
8557 }
8558
463fc696
BK
8559 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8560 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8561 ioa_cfg->needs_warm_reset = 1;
8562 ioa_cfg->reset = ipr_reset_slot_reset;
8563 } else
8564 ioa_cfg->reset = ipr_reset_start_bist;
8565
1da177e4
LT
8566 spin_lock(&ipr_driver_lock);
8567 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8568 spin_unlock(&ipr_driver_lock);
8569
8570 LEAVE;
8571out:
8572 return rc;
8573
8574cleanup_nolog:
8575 ipr_free_mem(ioa_cfg);
8576cleanup_nomem:
8577 iounmap(ipr_regs);
95fecd90
WB
8578out_msi_disable:
8579 pci_disable_msi(pdev);
1da177e4
LT
8580out_release_regions:
8581 pci_release_regions(pdev);
8582out_scsi_host_put:
8583 scsi_host_put(host);
8584out_disable:
8585 pci_disable_device(pdev);
8586 goto out;
8587}
8588
8589/**
8590 * ipr_scan_vsets - Scans for VSET devices
8591 * @ioa_cfg: ioa config struct
8592 *
8593 * Description: Since the VSET resources do not follow SAM in that we can have
8594 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8595 *
8596 * Return value:
8597 * none
8598 **/
8599static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8600{
8601 int target, lun;
8602
8603 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8604 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8605 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8606}
8607
8608/**
8609 * ipr_initiate_ioa_bringdown - Bring down an adapter
8610 * @ioa_cfg: ioa config struct
8611 * @shutdown_type: shutdown type
8612 *
8613 * Description: This function will initiate bringing down the adapter.
8614 * This consists of issuing an IOA shutdown to the adapter
8615 * to flush the cache, and running BIST.
8616 * If the caller needs to wait on the completion of the reset,
8617 * the caller must sleep on the reset_wait_q.
8618 *
8619 * Return value:
8620 * none
8621 **/
8622static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8623 enum ipr_shutdown_type shutdown_type)
8624{
8625 ENTER;
8626 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8627 ioa_cfg->sdt_state = ABORT_DUMP;
8628 ioa_cfg->reset_retries = 0;
8629 ioa_cfg->in_ioa_bringdown = 1;
8630 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8631 LEAVE;
8632}
8633
8634/**
8635 * __ipr_remove - Remove a single adapter
8636 * @pdev: pci device struct
8637 *
8638 * Adapter hot plug remove entry point.
8639 *
8640 * Return value:
8641 * none
8642 **/
8643static void __ipr_remove(struct pci_dev *pdev)
8644{
8645 unsigned long host_lock_flags = 0;
8646 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8647 ENTER;
8648
8649 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
970ea294
BK
8650 while(ioa_cfg->in_reset_reload) {
8651 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8652 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8653 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8654 }
8655
1da177e4
LT
8656 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8657
8658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8659 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 8660 flush_scheduled_work();
1da177e4
LT
8661 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8662
8663 spin_lock(&ipr_driver_lock);
8664 list_del(&ioa_cfg->queue);
8665 spin_unlock(&ipr_driver_lock);
8666
8667 if (ioa_cfg->sdt_state == ABORT_DUMP)
8668 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8669 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8670
8671 ipr_free_all_resources(ioa_cfg);
8672
8673 LEAVE;
8674}
8675
8676/**
8677 * ipr_remove - IOA hot plug remove entry point
8678 * @pdev: pci device struct
8679 *
8680 * Adapter hot plug remove entry point.
8681 *
8682 * Return value:
8683 * none
8684 **/
f381642d 8685static void __devexit ipr_remove(struct pci_dev *pdev)
1da177e4
LT
8686{
8687 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8688
8689 ENTER;
8690
ee959b00 8691 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 8692 &ipr_trace_attr);
ee959b00 8693 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8694 &ipr_dump_attr);
8695 scsi_remove_host(ioa_cfg->host);
8696
8697 __ipr_remove(pdev);
8698
8699 LEAVE;
8700}
8701
8702/**
8703 * ipr_probe - Adapter hot plug add entry point
8704 *
8705 * Return value:
8706 * 0 on success / non-zero on failure
8707 **/
8708static int __devinit ipr_probe(struct pci_dev *pdev,
8709 const struct pci_device_id *dev_id)
8710{
8711 struct ipr_ioa_cfg *ioa_cfg;
8712 int rc;
8713
8714 rc = ipr_probe_ioa(pdev, dev_id);
8715
8716 if (rc)
8717 return rc;
8718
8719 ioa_cfg = pci_get_drvdata(pdev);
8720 rc = ipr_probe_ioa_part2(ioa_cfg);
8721
8722 if (rc) {
8723 __ipr_remove(pdev);
8724 return rc;
8725 }
8726
8727 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8728
8729 if (rc) {
8730 __ipr_remove(pdev);
8731 return rc;
8732 }
8733
ee959b00 8734 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8735 &ipr_trace_attr);
8736
8737 if (rc) {
8738 scsi_remove_host(ioa_cfg->host);
8739 __ipr_remove(pdev);
8740 return rc;
8741 }
8742
ee959b00 8743 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8744 &ipr_dump_attr);
8745
8746 if (rc) {
ee959b00 8747 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8748 &ipr_trace_attr);
8749 scsi_remove_host(ioa_cfg->host);
8750 __ipr_remove(pdev);
8751 return rc;
8752 }
8753
8754 scsi_scan_host(ioa_cfg->host);
8755 ipr_scan_vsets(ioa_cfg);
8756 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8757 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 8758 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
8759 schedule_work(&ioa_cfg->work_q);
8760 return 0;
8761}
8762
8763/**
8764 * ipr_shutdown - Shutdown handler.
d18c3db5 8765 * @pdev: pci device struct
1da177e4
LT
8766 *
8767 * This function is invoked upon system shutdown/reboot. It will issue
8768 * an adapter shutdown to the adapter to flush the write cache.
8769 *
8770 * Return value:
8771 * none
8772 **/
d18c3db5 8773static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 8774{
d18c3db5 8775 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
8776 unsigned long lock_flags = 0;
8777
8778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
8779 while(ioa_cfg->in_reset_reload) {
8780 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8781 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8782 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8783 }
8784
1da177e4
LT
8785 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8786 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8787 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8788}
8789
8790static struct pci_device_id ipr_pci_table[] __devinitdata = {
8791 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8792 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 8793 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8794 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 8795 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8796 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 8797 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8798 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 8799 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8800 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 8801 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8802 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 8803 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8804 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 8805 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
8806 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8807 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8808 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 8809 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8810 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
8811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8812 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8813 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
8814 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8815 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8816 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 8817 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8818 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
8819 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8820 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 8821 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
8822 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8823 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 8824 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
8825 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8826 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
8827 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8828 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
8829 IPR_USE_LONG_TRANSOP_TIMEOUT },
8830 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8831 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
60e7486b 8832 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 8833 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 8834 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 8835 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 8836 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 8837 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 8838 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 8839 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8840 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8841 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8842 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8843 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8844 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
8845 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
8846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
8847 IPR_USE_LONG_TRANSOP_TIMEOUT },
1da177e4
LT
8848 { }
8849};
8850MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8851
f8a88b19
LV
8852static struct pci_error_handlers ipr_err_handler = {
8853 .error_detected = ipr_pci_error_detected,
8854 .slot_reset = ipr_pci_slot_reset,
8855};
8856
1da177e4
LT
8857static struct pci_driver ipr_driver = {
8858 .name = IPR_NAME,
8859 .id_table = ipr_pci_table,
8860 .probe = ipr_probe,
f381642d 8861 .remove = __devexit_p(ipr_remove),
d18c3db5 8862 .shutdown = ipr_shutdown,
f8a88b19 8863 .err_handler = &ipr_err_handler,
1da177e4
LT
8864};
8865
f72919ec
WB
8866/**
8867 * ipr_halt_done - Shutdown prepare completion
8868 *
8869 * Return value:
8870 * none
8871 **/
8872static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8873{
8874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8875
8876 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8877}
8878
8879/**
8880 * ipr_halt - Issue shutdown prepare to all adapters
8881 *
8882 * Return value:
8883 * NOTIFY_OK on success / NOTIFY_DONE on failure
8884 **/
8885static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8886{
8887 struct ipr_cmnd *ipr_cmd;
8888 struct ipr_ioa_cfg *ioa_cfg;
8889 unsigned long flags = 0;
8890
8891 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8892 return NOTIFY_DONE;
8893
8894 spin_lock(&ipr_driver_lock);
8895
8896 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8897 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8898 if (!ioa_cfg->allow_cmds) {
8899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8900 continue;
8901 }
8902
8903 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8904 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8905 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8906 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8907 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8908
8909 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8910 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8911 }
8912 spin_unlock(&ipr_driver_lock);
8913
8914 return NOTIFY_OK;
8915}
8916
8917static struct notifier_block ipr_notifier = {
8918 ipr_halt, NULL, 0
8919};
8920
1da177e4
LT
8921/**
8922 * ipr_init - Module entry point
8923 *
8924 * Return value:
8925 * 0 on success / negative value on failure
8926 **/
8927static int __init ipr_init(void)
8928{
8929 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8930 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8931
f72919ec 8932 register_reboot_notifier(&ipr_notifier);
dcbccbde 8933 return pci_register_driver(&ipr_driver);
1da177e4
LT
8934}
8935
8936/**
8937 * ipr_exit - Module unload
8938 *
8939 * Module unload entry point.
8940 *
8941 * Return value:
8942 * none
8943 **/
8944static void __exit ipr_exit(void)
8945{
f72919ec 8946 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
8947 pci_unregister_driver(&ipr_driver);
8948}
8949
8950module_init(ipr_init);
8951module_exit(ipr_exit);