]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/scsi/ipr.c
ipr: Reboot speed improvements
[mirror_ubuntu-eoan-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
05a6538a 101static unsigned int ipr_number_of_msix = 2;
4fdd7c7a 102static unsigned int ipr_fast_reboot;
1da177e4
LT
103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 108 .mailbox = 0x0042C,
89aad428 109 .max_cmds = 100,
1da177e4 110 .cache_line_size = 0x20,
7dd21308 111 .clear_isr = 1,
b53d124a 112 .iopoll_weight = 0,
1da177e4
LT
113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
214777ba 116 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 117 .sense_interrupt_mask_reg = 0x0022C,
214777ba 118 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 119 .clr_interrupt_reg = 0x00228,
214777ba 120 .clr_interrupt_reg32 = 0x00228,
1da177e4 121 .sense_interrupt_reg = 0x00224,
214777ba 122 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
214777ba 125 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 126 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
89aad428 134 .max_cmds = 100,
1da177e4 135 .cache_line_size = 0x20,
7dd21308 136 .clear_isr = 1,
b53d124a 137 .iopoll_weight = 0,
1da177e4
LT
138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
214777ba 141 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 142 .sense_interrupt_mask_reg = 0x00288,
214777ba 143 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 144 .clr_interrupt_reg = 0x00284,
214777ba 145 .clr_interrupt_reg32 = 0x00284,
1da177e4 146 .sense_interrupt_reg = 0x00280,
214777ba 147 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
214777ba 150 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 151 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
155 }
156 },
a74c1639 157 { /* CRoC */
110def85 158 .mailbox = 0x00044,
89aad428 159 .max_cmds = 1000,
a74c1639 160 .cache_line_size = 0x20,
7dd21308 161 .clear_isr = 0,
b53d124a 162 .iopoll_weight = 64,
a74c1639
WB
163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
214777ba 166 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 167 .sense_interrupt_mask_reg = 0x00010,
214777ba 168 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 169 .clr_interrupt_reg = 0x00008,
214777ba 170 .clr_interrupt_reg32 = 0x0000C,
a74c1639 171 .sense_interrupt_reg = 0x00000,
214777ba 172 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
214777ba 175 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 176 .set_uproc_interrupt_reg = 0x00020,
214777ba 177 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 178 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
dcbad00e 181 .dump_addr_reg = 0x00064,
8701f185
WB
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
a74c1639
WB
184 }
185 },
1da177e4
LT
186};
187
188static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
198};
199
203fa3fe 200static int ipr_max_bus_speeds[] = {
1da177e4
LT
201 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
202};
203
204MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206module_param_named(max_speed, ipr_max_speed, uint, 0);
207MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208module_param_named(log_level, ipr_log_level, uint, 0);
209MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210module_param_named(testmode, ipr_testmode, int, 0);
211MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 212module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
213MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
214module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
215MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 216module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 217MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
218module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
219MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
220module_param_named(max_devs, ipr_max_devs, int, 0);
221MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
222 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 223module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
6634ff7c 224MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
4fdd7c7a
BK
225module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
226MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
1da177e4
LT
227MODULE_LICENSE("GPL");
228MODULE_VERSION(IPR_DRIVER_VERSION);
229
1da177e4
LT
230/* A constant array of IOASCs/URCs/Error Messages */
231static const
232struct ipr_error_table_t ipr_error_table[] = {
933916f3 233 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
234 "8155: An unknown error was received"},
235 {0x00330000, 0, 0,
236 "Soft underlength error"},
237 {0x005A0000, 0, 0,
238 "Command to be cancelled not found"},
239 {0x00808000, 0, 0,
240 "Qualified success"},
933916f3 241 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 242 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 243 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 244 "4101: Soft device bus fabric error"},
5aa3a333
WB
245 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
246 "FFFC: Logical block guard error recovered by the device"},
247 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFFC: Logical block reference tag error recovered by the device"},
249 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
250 "4171: Recovered scatter list tag / sequence number error"},
251 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
252 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
254 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
256 "FFFD: Recovered logical block reference tag error detected by the IOA"},
257 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
258 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 259 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FFF9: Device sector reassign successful"},
933916f3 261 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 262 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 263 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "7001: IOA sector reassignment successful"},
933916f3 265 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 266 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 267 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 268 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 269 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 270 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 271 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 272 "FFF6: Device hardware error recovered by the IOA"},
933916f3 273 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 274 "FFF6: Device hardware error recovered by the device"},
933916f3 275 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 276 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 277 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 278 "FFFA: Undefined device response recovered by the IOA"},
933916f3 279 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 280 "FFF6: Device bus error, message or command phase"},
933916f3 281 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 282 "FFFE: Task Management Function failed"},
933916f3 283 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 284 "FFF6: Failure prediction threshold exceeded"},
933916f3 285 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 286 "8009: Impending cache battery pack failure"},
ed7bd661 287 {0x02040100, 0, 0,
288 "Logical Unit in process of becoming ready"},
289 {0x02040200, 0, 0,
290 "Initializing command required"},
1da177e4
LT
291 {0x02040400, 0, 0,
292 "34FF: Disk device format in progress"},
ed7bd661 293 {0x02040C00, 0, 0,
294 "Logical unit not accessible, target port in unavailable state"},
65f56475
BK
295 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
296 "9070: IOA requested reset"},
1da177e4
LT
297 {0x023F0000, 0, 0,
298 "Synchronization required"},
ed7bd661 299 {0x02408500, 0, 0,
300 "IOA microcode download required"},
301 {0x02408600, 0, 0,
302 "Device bus connection is prohibited by host"},
1da177e4
LT
303 {0x024E0000, 0, 0,
304 "No ready, IOA shutdown"},
305 {0x025A0000, 0, 0,
306 "Not ready, IOA has been shutdown"},
933916f3 307 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
308 "3020: Storage subsystem configuration error"},
309 {0x03110B00, 0, 0,
310 "FFF5: Medium error, data unreadable, recommend reassign"},
311 {0x03110C00, 0, 0,
312 "7000: Medium error, data unreadable, do not reassign"},
933916f3 313 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 314 "FFF3: Disk media format bad"},
933916f3 315 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 316 "3002: Addressed device failed to respond to selection"},
933916f3 317 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 318 "3100: Device bus error"},
933916f3 319 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
320 "3109: IOA timed out a device command"},
321 {0x04088000, 0, 0,
322 "3120: SCSI bus is not operational"},
933916f3 323 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 324 "4100: Hard device bus fabric error"},
5aa3a333
WB
325 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
326 "310C: Logical block guard error detected by the device"},
327 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
328 "310C: Logical block reference tag error detected by the device"},
329 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
330 "4170: Scatter list tag / sequence number error"},
331 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
332 "8150: Logical block CRC error on IOA to Host transfer"},
333 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
334 "4170: Logical block sequence number error on IOA to Host transfer"},
335 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
336 "310D: Logical block reference tag error detected by the IOA"},
337 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
338 "310D: Logical block guard error detected by the IOA"},
933916f3 339 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 340 "9000: IOA reserved area data check"},
933916f3 341 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 342 "9001: IOA reserved area invalid data pattern"},
933916f3 343 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 344 "9002: IOA reserved area LRC error"},
5aa3a333
WB
345 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
346 "Hardware Error, IOA metadata access error"},
933916f3 347 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 348 "102E: Out of alternate sectors for disk storage"},
933916f3 349 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 350 "FFF4: Data transfer underlength error"},
933916f3 351 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 352 "FFF4: Data transfer overlength error"},
933916f3 353 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 354 "3400: Logical unit failure"},
933916f3 355 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "FFF4: Device microcode is corrupt"},
933916f3 357 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
358 "8150: PCI bus error"},
359 {0x04430000, 1, 0,
360 "Unsupported device bus message received"},
933916f3 361 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "FFF4: Disk device problem"},
933916f3 363 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 364 "8150: Permanent IOA failure"},
933916f3 365 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 366 "3010: Disk device returned wrong response to IOA"},
933916f3 367 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
368 "8151: IOA microcode error"},
369 {0x04448500, 0, 0,
370 "Device bus status error"},
933916f3 371 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 372 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
373 {0x04448700, 0, 0,
374 "ATA device status error"},
1da177e4
LT
375 {0x04490000, 0, 0,
376 "Message reject received from the device"},
933916f3 377 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 378 "8008: A permanent cache battery pack failure occurred"},
933916f3 379 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 380 "9090: Disk unit has been modified after the last known status"},
933916f3 381 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 382 "9081: IOA detected device error"},
933916f3 383 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 384 "9082: IOA detected device error"},
933916f3 385 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 386 "3110: Device bus error, message or command phase"},
933916f3 387 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 388 "3110: SAS Command / Task Management Function failed"},
933916f3 389 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 390 "9091: Incorrect hardware configuration change has been detected"},
933916f3 391 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 392 "9073: Invalid multi-adapter configuration"},
933916f3 393 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 394 "4010: Incorrect connection between cascaded expanders"},
933916f3 395 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 396 "4020: Connections exceed IOA design limits"},
933916f3 397 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 398 "4030: Incorrect multipath connection"},
933916f3 399 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 400 "4110: Unsupported enclosure function"},
ed7bd661 401 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
402 "4120: SAS cable VPD cannot be read"},
933916f3 403 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
404 "FFF4: Command to logical unit failed"},
405 {0x05240000, 1, 0,
406 "Illegal request, invalid request type or request packet"},
407 {0x05250000, 0, 0,
408 "Illegal request, invalid resource handle"},
b0df54bb
BK
409 {0x05258000, 0, 0,
410 "Illegal request, commands not allowed to this device"},
411 {0x05258100, 0, 0,
412 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
413 {0x05258200, 0, 0,
414 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
415 {0x05260000, 0, 0,
416 "Illegal request, invalid field in parameter list"},
417 {0x05260100, 0, 0,
418 "Illegal request, parameter not supported"},
419 {0x05260200, 0, 0,
420 "Illegal request, parameter value invalid"},
421 {0x052C0000, 0, 0,
422 "Illegal request, command sequence error"},
b0df54bb
BK
423 {0x052C8000, 1, 0,
424 "Illegal request, dual adapter support not enabled"},
ed7bd661 425 {0x052C8100, 1, 0,
426 "Illegal request, another cable connector was physically disabled"},
427 {0x054E8000, 1, 0,
428 "Illegal request, inconsistent group id/group count"},
933916f3 429 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 430 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 431 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 432 "9040: Array protection temporarily suspended, protection resuming"},
ed7bd661 433 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
434 "4080: IOA exceeded maximum operating temperature"},
435 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
436 "4085: Service required"},
933916f3 437 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 438 "3140: Device bus not ready to ready transition"},
933916f3 439 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
440 "FFFB: SCSI bus was reset"},
441 {0x06290500, 0, 0,
442 "FFFE: SCSI bus transition to single ended"},
443 {0x06290600, 0, 0,
444 "FFFE: SCSI bus transition to LVD"},
933916f3 445 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 446 "FFFB: SCSI bus was reset by another initiator"},
933916f3 447 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 448 "3029: A device replacement has occurred"},
ed7bd661 449 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
450 "4102: Device bus fabric performance degradation"},
933916f3 451 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 452 "9051: IOA cache data exists for a missing or failed device"},
933916f3 453 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 454 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 455 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 456 "9025: Disk unit is not supported at its physical location"},
933916f3 457 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 458 "3020: IOA detected a SCSI bus configuration error"},
933916f3 459 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 460 "3150: SCSI bus configuration error"},
933916f3 461 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 462 "9074: Asymmetric advanced function disk configuration"},
933916f3 463 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 464 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 465 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 466 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 467 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 468 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 469 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 470 "9076: Configuration error, missing remote IOA"},
933916f3 471 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 472 "4050: Enclosure does not support a required multipath function"},
ed7bd661 473 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
474 "4121: Configuration error, required cable is missing"},
475 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
476 "4122: Cable is not plugged into the correct location on remote IOA"},
477 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
478 "4123: Configuration error, invalid cable vital product data"},
479 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
480 "4124: Configuration error, both cable ends are plugged into the same IOA"},
b75424fc
WB
481 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
482 "4070: Logically bad block written on device"},
933916f3 483 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 484 "9041: Array protection temporarily suspended"},
933916f3 485 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 486 "9042: Corrupt array parity detected on specified device"},
933916f3 487 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 488 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 489 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 490 "9071: Link operational transition"},
933916f3 491 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 492 "9072: Link not operational transition"},
933916f3 493 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 494 "9032: Array exposed but still protected"},
e435340c
BK
495 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
496 "70DD: Device forced failed by disrupt device command"},
933916f3 497 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 498 "4061: Multipath redundancy level got better"},
933916f3 499 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 500 "4060: Multipath redundancy level got worse"},
1da177e4
LT
501 {0x07270000, 0, 0,
502 "Failure due to other device"},
933916f3 503 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 504 "9008: IOA does not support functions expected by devices"},
933916f3 505 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 506 "9010: Cache data associated with attached devices cannot be found"},
933916f3 507 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 508 "9011: Cache data belongs to devices other than those attached"},
933916f3 509 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 510 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 511 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 512 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 513 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 514 "9022: Exposed array is missing a required device"},
933916f3 515 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 516 "9023: Array member(s) not at required physical locations"},
933916f3 517 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 518 "9024: Array not functional due to present hardware configuration"},
933916f3 519 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 520 "9026: Array not functional due to present hardware configuration"},
933916f3 521 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 522 "9027: Array is missing a device and parity is out of sync"},
933916f3 523 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 524 "9028: Maximum number of arrays already exist"},
933916f3 525 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 526 "9050: Required cache data cannot be located for a disk unit"},
933916f3 527 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 528 "9052: Cache data exists for a device that has been modified"},
933916f3 529 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 530 "9054: IOA resources not available due to previous problems"},
933916f3 531 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 532 "9092: Disk unit requires initialization before use"},
933916f3 533 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 534 "9029: Incorrect hardware configuration change has been detected"},
933916f3 535 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 536 "9060: One or more disk pairs are missing from an array"},
933916f3 537 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 538 "9061: One or more disks are missing from an array"},
933916f3 539 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 540 "9062: One or more disks are missing from an array"},
933916f3 541 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 542 "9063: Maximum number of functional arrays has been exceeded"},
ed7bd661 543 {0x07279A00, 0, 0,
544 "Data protect, other volume set problem"},
1da177e4
LT
545 {0x0B260000, 0, 0,
546 "Aborted command, invalid descriptor"},
ed7bd661 547 {0x0B3F9000, 0, 0,
548 "Target operating conditions have changed, dual adapter takeover"},
549 {0x0B530200, 0, 0,
550 "Aborted command, medium removal prevented"},
1da177e4 551 {0x0B5A0000, 0, 0,
ed7bd661 552 "Command terminated by host"},
553 {0x0B5B8000, 0, 0,
554 "Aborted command, command terminated by host"}
1da177e4
LT
555};
556
557static const struct ipr_ses_table_entry ipr_ses_table[] = {
558 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
559 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
560 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
561 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
562 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
563 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
564 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
565 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
568 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
569 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
570 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
571};
572
573/*
574 * Function Prototypes
575 */
576static int ipr_reset_alert(struct ipr_cmnd *);
577static void ipr_process_ccn(struct ipr_cmnd *);
578static void ipr_process_error(struct ipr_cmnd *);
579static void ipr_reset_ioa_job(struct ipr_cmnd *);
580static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
581 enum ipr_shutdown_type);
582
583#ifdef CONFIG_SCSI_IPR_TRACE
584/**
585 * ipr_trc_hook - Add a trace entry to the driver trace
586 * @ipr_cmd: ipr command struct
587 * @type: trace type
588 * @add_data: additional data
589 *
590 * Return value:
591 * none
592 **/
593static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
594 u8 type, u32 add_data)
595{
596 struct ipr_trace_entry *trace_entry;
597 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
598
56d6aa33 599 trace_entry = &ioa_cfg->trace[atomic_add_return
600 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
1da177e4
LT
601 trace_entry->time = jiffies;
602 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
603 trace_entry->type = type;
a32c055f
WB
604 if (ipr_cmd->ioa_cfg->sis64)
605 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
606 else
607 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 608 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
609 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
610 trace_entry->u.add_data = add_data;
56d6aa33 611 wmb();
1da177e4
LT
612}
613#else
203fa3fe 614#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
615#endif
616
172cd6e1
BK
617/**
618 * ipr_lock_and_done - Acquire lock and complete command
619 * @ipr_cmd: ipr command struct
620 *
621 * Return value:
622 * none
623 **/
624static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
625{
626 unsigned long lock_flags;
627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
628
629 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
630 ipr_cmd->done(ipr_cmd);
631 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
632}
633
1da177e4
LT
634/**
635 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
636 * @ipr_cmd: ipr command struct
637 *
638 * Return value:
639 * none
640 **/
641static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
642{
643 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
644 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
645 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 646 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 647 int hrrq_id;
1da177e4 648
05a6538a 649 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 650 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 651 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 652 ioarcb->data_transfer_length = 0;
1da177e4 653 ioarcb->read_data_transfer_length = 0;
a32c055f 654 ioarcb->ioadl_len = 0;
1da177e4 655 ioarcb->read_ioadl_len = 0;
a32c055f 656
96d21f00 657 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
658 ioarcb->u.sis64_addr_data.data_ioadl_addr =
659 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
660 ioasa64->u.gata.status = 0;
661 } else {
a32c055f
WB
662 ioarcb->write_ioadl_addr =
663 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
664 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 665 ioasa->u.gata.status = 0;
a32c055f
WB
666 }
667
96d21f00
WB
668 ioasa->hdr.ioasc = 0;
669 ioasa->hdr.residual_data_len = 0;
1da177e4 670 ipr_cmd->scsi_cmd = NULL;
35a39691 671 ipr_cmd->qc = NULL;
1da177e4
LT
672 ipr_cmd->sense_buffer[0] = 0;
673 ipr_cmd->dma_use_sg = 0;
674}
675
676/**
677 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
678 * @ipr_cmd: ipr command struct
679 *
680 * Return value:
681 * none
682 **/
172cd6e1
BK
683static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
684 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
685{
686 ipr_reinit_ipr_cmnd(ipr_cmd);
687 ipr_cmd->u.scratch = 0;
688 ipr_cmd->sibling = NULL;
6cdb0817 689 ipr_cmd->eh_comp = NULL;
172cd6e1 690 ipr_cmd->fast_done = fast_done;
1da177e4
LT
691 init_timer(&ipr_cmd->timer);
692}
693
694/**
00bfef2c 695 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
696 * @ioa_cfg: ioa config struct
697 *
698 * Return value:
699 * pointer to ipr command struct
700 **/
701static
05a6538a 702struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 703{
05a6538a 704 struct ipr_cmnd *ipr_cmd = NULL;
705
706 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
707 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
708 struct ipr_cmnd, queue);
709 list_del(&ipr_cmd->queue);
710 }
1da177e4 711
1da177e4
LT
712
713 return ipr_cmd;
714}
715
00bfef2c
BK
716/**
717 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
718 * @ioa_cfg: ioa config struct
719 *
720 * Return value:
721 * pointer to ipr command struct
722 **/
723static
724struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
725{
05a6538a 726 struct ipr_cmnd *ipr_cmd =
727 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 728 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
729 return ipr_cmd;
730}
731
1da177e4
LT
732/**
733 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
734 * @ioa_cfg: ioa config struct
735 * @clr_ints: interrupts to clear
736 *
737 * This function masks all interrupts on the adapter, then clears the
738 * interrupts specified in the mask
739 *
740 * Return value:
741 * none
742 **/
743static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
744 u32 clr_ints)
745{
746 volatile u32 int_reg;
56d6aa33 747 int i;
1da177e4
LT
748
749 /* Stop new interrupts */
56d6aa33 750 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
751 spin_lock(&ioa_cfg->hrrq[i]._lock);
752 ioa_cfg->hrrq[i].allow_interrupts = 0;
753 spin_unlock(&ioa_cfg->hrrq[i]._lock);
754 }
755 wmb();
1da177e4
LT
756
757 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
758 if (ioa_cfg->sis64)
759 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
760 else
761 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
762
763 /* Clear any pending interrupts */
214777ba
WB
764 if (ioa_cfg->sis64)
765 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
766 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
767 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
768}
769
770/**
771 * ipr_save_pcix_cmd_reg - Save PCI-X command register
772 * @ioa_cfg: ioa config struct
773 *
774 * Return value:
775 * 0 on success / -EIO on failure
776 **/
777static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
778{
779 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
780
7dce0e1c
BK
781 if (pcix_cmd_reg == 0)
782 return 0;
1da177e4
LT
783
784 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
785 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
786 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
787 return -EIO;
788 }
789
790 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
791 return 0;
792}
793
794/**
795 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
796 * @ioa_cfg: ioa config struct
797 *
798 * Return value:
799 * 0 on success / -EIO on failure
800 **/
801static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
802{
803 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
804
805 if (pcix_cmd_reg) {
806 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
807 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
808 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
809 return -EIO;
810 }
1da177e4
LT
811 }
812
813 return 0;
814}
815
35a39691
BK
816/**
817 * ipr_sata_eh_done - done function for aborted SATA commands
818 * @ipr_cmd: ipr command struct
819 *
820 * This function is invoked for ops generated to SATA
821 * devices which are being aborted.
822 *
823 * Return value:
824 * none
825 **/
826static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
827{
35a39691
BK
828 struct ata_queued_cmd *qc = ipr_cmd->qc;
829 struct ipr_sata_port *sata_port = qc->ap->private_data;
830
831 qc->err_mask |= AC_ERR_OTHER;
832 sata_port->ioasa.status |= ATA_BUSY;
05a6538a 833 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
834 ata_qc_complete(qc);
835}
836
1da177e4
LT
837/**
838 * ipr_scsi_eh_done - mid-layer done function for aborted ops
839 * @ipr_cmd: ipr command struct
840 *
841 * This function is invoked by the interrupt handler for
842 * ops generated by the SCSI mid-layer which are being aborted.
843 *
844 * Return value:
845 * none
846 **/
847static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
848{
1da177e4
LT
849 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
850
851 scsi_cmd->result |= (DID_ERROR << 16);
852
63015bc9 853 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 854 scsi_cmd->scsi_done(scsi_cmd);
6cdb0817
BK
855 if (ipr_cmd->eh_comp)
856 complete(ipr_cmd->eh_comp);
05a6538a 857 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
858}
859
860/**
861 * ipr_fail_all_ops - Fails all outstanding ops.
862 * @ioa_cfg: ioa config struct
863 *
864 * This function fails all outstanding ops.
865 *
866 * Return value:
867 * none
868 **/
869static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
870{
871 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 872 struct ipr_hrr_queue *hrrq;
1da177e4
LT
873
874 ENTER;
05a6538a 875 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 876 spin_lock(&hrrq->_lock);
05a6538a 877 list_for_each_entry_safe(ipr_cmd,
878 temp, &hrrq->hrrq_pending_q, queue) {
879 list_del(&ipr_cmd->queue);
1da177e4 880
05a6538a 881 ipr_cmd->s.ioasa.hdr.ioasc =
882 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
883 ipr_cmd->s.ioasa.hdr.ilid =
884 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 885
05a6538a 886 if (ipr_cmd->scsi_cmd)
887 ipr_cmd->done = ipr_scsi_eh_done;
888 else if (ipr_cmd->qc)
889 ipr_cmd->done = ipr_sata_eh_done;
1da177e4 890
05a6538a 891 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
892 IPR_IOASC_IOA_WAS_RESET);
893 del_timer(&ipr_cmd->timer);
894 ipr_cmd->done(ipr_cmd);
895 }
56d6aa33 896 spin_unlock(&hrrq->_lock);
1da177e4 897 }
1da177e4
LT
898 LEAVE;
899}
900
a32c055f
WB
901/**
902 * ipr_send_command - Send driver initiated requests.
903 * @ipr_cmd: ipr command struct
904 *
905 * This function sends a command to the adapter using the correct write call.
906 * In the case of sis64, calculate the ioarcb size required. Then or in the
907 * appropriate bits.
908 *
909 * Return value:
910 * none
911 **/
912static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
913{
914 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
915 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
916
917 if (ioa_cfg->sis64) {
918 /* The default size is 256 bytes */
919 send_dma_addr |= 0x1;
920
921 /* If the number of ioadls * size of ioadl > 128 bytes,
922 then use a 512 byte ioarcb */
923 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
924 send_dma_addr |= 0x4;
925 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
926 } else
927 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
928}
929
1da177e4
LT
930/**
931 * ipr_do_req - Send driver initiated requests.
932 * @ipr_cmd: ipr command struct
933 * @done: done function
934 * @timeout_func: timeout function
935 * @timeout: timeout value
936 *
937 * This function sends the specified command to the adapter with the
938 * timeout given. The done function is invoked on command completion.
939 *
940 * Return value:
941 * none
942 **/
943static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
944 void (*done) (struct ipr_cmnd *),
945 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
946{
05a6538a 947 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
948
949 ipr_cmd->done = done;
950
951 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
952 ipr_cmd->timer.expires = jiffies + timeout;
953 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
954
955 add_timer(&ipr_cmd->timer);
956
957 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
958
a32c055f 959 ipr_send_command(ipr_cmd);
1da177e4
LT
960}
961
962/**
963 * ipr_internal_cmd_done - Op done function for an internally generated op.
964 * @ipr_cmd: ipr command struct
965 *
966 * This function is the op done function for an internally generated,
967 * blocking op. It simply wakes the sleeping thread.
968 *
969 * Return value:
970 * none
971 **/
972static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
973{
974 if (ipr_cmd->sibling)
975 ipr_cmd->sibling = NULL;
976 else
977 complete(&ipr_cmd->completion);
978}
979
a32c055f
WB
980/**
981 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
982 * @ipr_cmd: ipr command struct
983 * @dma_addr: dma address
984 * @len: transfer length
985 * @flags: ioadl flag value
986 *
987 * This function initializes an ioadl in the case where there is only a single
988 * descriptor.
989 *
990 * Return value:
991 * nothing
992 **/
993static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
994 u32 len, int flags)
995{
996 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
997 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
998
999 ipr_cmd->dma_use_sg = 1;
1000
1001 if (ipr_cmd->ioa_cfg->sis64) {
1002 ioadl64->flags = cpu_to_be32(flags);
1003 ioadl64->data_len = cpu_to_be32(len);
1004 ioadl64->address = cpu_to_be64(dma_addr);
1005
1006 ipr_cmd->ioarcb.ioadl_len =
1007 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1008 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1009 } else {
1010 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1011 ioadl->address = cpu_to_be32(dma_addr);
1012
1013 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1014 ipr_cmd->ioarcb.read_ioadl_len =
1015 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1016 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1017 } else {
1018 ipr_cmd->ioarcb.ioadl_len =
1019 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1020 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1021 }
1022 }
1023}
1024
1da177e4
LT
1025/**
1026 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1027 * @ipr_cmd: ipr command struct
1028 * @timeout_func: function to invoke if command times out
1029 * @timeout: timeout
1030 *
1031 * Return value:
1032 * none
1033 **/
1034static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1035 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1036 u32 timeout)
1037{
1038 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1039
1040 init_completion(&ipr_cmd->completion);
1041 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1042
1043 spin_unlock_irq(ioa_cfg->host->host_lock);
1044 wait_for_completion(&ipr_cmd->completion);
1045 spin_lock_irq(ioa_cfg->host->host_lock);
1046}
1047
05a6538a 1048static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1049{
1050 if (ioa_cfg->hrrq_num == 1)
56d6aa33 1051 return 0;
1052 else
1053 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
05a6538a 1054}
1055
1da177e4
LT
1056/**
1057 * ipr_send_hcam - Send an HCAM to the adapter.
1058 * @ioa_cfg: ioa config struct
1059 * @type: HCAM type
1060 * @hostrcb: hostrcb struct
1061 *
1062 * This function will send a Host Controlled Async command to the adapter.
1063 * If HCAMs are currently not allowed to be issued to the adapter, it will
1064 * place the hostrcb on the free queue.
1065 *
1066 * Return value:
1067 * none
1068 **/
1069static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1070 struct ipr_hostrcb *hostrcb)
1071{
1072 struct ipr_cmnd *ipr_cmd;
1073 struct ipr_ioarcb *ioarcb;
1074
56d6aa33 1075 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1076 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1077 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1078 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1079
1080 ipr_cmd->u.hostrcb = hostrcb;
1081 ioarcb = &ipr_cmd->ioarcb;
1082
1083 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1084 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1085 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1086 ioarcb->cmd_pkt.cdb[1] = type;
1087 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1088 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1089
a32c055f
WB
1090 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1091 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1092
1093 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1094 ipr_cmd->done = ipr_process_ccn;
1095 else
1096 ipr_cmd->done = ipr_process_error;
1097
1098 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1099
a32c055f 1100 ipr_send_command(ipr_cmd);
1da177e4
LT
1101 } else {
1102 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1103 }
1104}
1105
3e7ebdfa
WB
1106/**
1107 * ipr_update_ata_class - Update the ata class in the resource entry
1108 * @res: resource entry struct
1109 * @proto: cfgte device bus protocol value
1110 *
1111 * Return value:
1112 * none
1113 **/
1114static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1115{
203fa3fe 1116 switch (proto) {
3e7ebdfa
WB
1117 case IPR_PROTO_SATA:
1118 case IPR_PROTO_SAS_STP:
1119 res->ata_class = ATA_DEV_ATA;
1120 break;
1121 case IPR_PROTO_SATA_ATAPI:
1122 case IPR_PROTO_SAS_STP_ATAPI:
1123 res->ata_class = ATA_DEV_ATAPI;
1124 break;
1125 default:
1126 res->ata_class = ATA_DEV_UNKNOWN;
1127 break;
1128 };
1129}
1130
1da177e4
LT
1131/**
1132 * ipr_init_res_entry - Initialize a resource entry struct.
1133 * @res: resource entry struct
3e7ebdfa 1134 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1135 *
1136 * Return value:
1137 * none
1138 **/
3e7ebdfa
WB
1139static void ipr_init_res_entry(struct ipr_resource_entry *res,
1140 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1141{
3e7ebdfa
WB
1142 int found = 0;
1143 unsigned int proto;
1144 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1145 struct ipr_resource_entry *gscsi_res = NULL;
1146
ee0a90fa 1147 res->needs_sync_complete = 0;
1da177e4
LT
1148 res->in_erp = 0;
1149 res->add_to_ml = 0;
1150 res->del_from_ml = 0;
1151 res->resetting_device = 0;
0b1f8d44 1152 res->reset_occurred = 0;
1da177e4 1153 res->sdev = NULL;
35a39691 1154 res->sata_port = NULL;
3e7ebdfa
WB
1155
1156 if (ioa_cfg->sis64) {
1157 proto = cfgtew->u.cfgte64->proto;
1158 res->res_flags = cfgtew->u.cfgte64->res_flags;
1159 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1160 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1161
1162 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1163 sizeof(res->res_path));
1164
1165 res->bus = 0;
0cb992ed
WB
1166 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1167 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1168 res->lun = scsilun_to_int(&res->dev_lun);
1169
1170 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1171 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1172 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1173 found = 1;
1174 res->target = gscsi_res->target;
1175 break;
1176 }
1177 }
1178 if (!found) {
1179 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1180 ioa_cfg->max_devs_supported);
1181 set_bit(res->target, ioa_cfg->target_ids);
1182 }
3e7ebdfa
WB
1183 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1184 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1185 res->target = 0;
1186 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1187 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1188 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1189 ioa_cfg->max_devs_supported);
1190 set_bit(res->target, ioa_cfg->array_ids);
1191 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1192 res->bus = IPR_VSET_VIRTUAL_BUS;
1193 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1194 ioa_cfg->max_devs_supported);
1195 set_bit(res->target, ioa_cfg->vset_ids);
1196 } else {
1197 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1198 ioa_cfg->max_devs_supported);
1199 set_bit(res->target, ioa_cfg->target_ids);
1200 }
1201 } else {
1202 proto = cfgtew->u.cfgte->proto;
1203 res->qmodel = IPR_QUEUEING_MODEL(res);
1204 res->flags = cfgtew->u.cfgte->flags;
1205 if (res->flags & IPR_IS_IOA_RESOURCE)
1206 res->type = IPR_RES_TYPE_IOAFP;
1207 else
1208 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1209
1210 res->bus = cfgtew->u.cfgte->res_addr.bus;
1211 res->target = cfgtew->u.cfgte->res_addr.target;
1212 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1213 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1214 }
1215
1216 ipr_update_ata_class(res, proto);
1217}
1218
1219/**
1220 * ipr_is_same_device - Determine if two devices are the same.
1221 * @res: resource entry struct
1222 * @cfgtew: config table entry wrapper struct
1223 *
1224 * Return value:
1225 * 1 if the devices are the same / 0 otherwise
1226 **/
1227static int ipr_is_same_device(struct ipr_resource_entry *res,
1228 struct ipr_config_table_entry_wrapper *cfgtew)
1229{
1230 if (res->ioa_cfg->sis64) {
1231 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1232 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1233 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1234 sizeof(cfgtew->u.cfgte64->lun))) {
1235 return 1;
1236 }
1237 } else {
1238 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1239 res->target == cfgtew->u.cfgte->res_addr.target &&
1240 res->lun == cfgtew->u.cfgte->res_addr.lun)
1241 return 1;
1242 }
1243
1244 return 0;
1245}
1246
1247/**
b3b3b407 1248 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1249 * @res_path: resource path
1250 * @buf: buffer
b3b3b407 1251 * @len: length of buffer provided
3e7ebdfa
WB
1252 *
1253 * Return value:
1254 * pointer to buffer
1255 **/
b3b3b407 1256static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1257{
1258 int i;
5adcbeb3 1259 char *p = buffer;
3e7ebdfa 1260
46d74563 1261 *p = '\0';
5adcbeb3
WB
1262 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1263 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1264 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1265
1266 return buffer;
1267}
1268
b3b3b407
BK
1269/**
1270 * ipr_format_res_path - Format the resource path for printing.
1271 * @ioa_cfg: ioa config struct
1272 * @res_path: resource path
1273 * @buf: buffer
1274 * @len: length of buffer provided
1275 *
1276 * Return value:
1277 * pointer to buffer
1278 **/
1279static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1280 u8 *res_path, char *buffer, int len)
1281{
1282 char *p = buffer;
1283
1284 *p = '\0';
1285 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1286 __ipr_format_res_path(res_path, p, len - (buffer - p));
1287 return buffer;
1288}
1289
3e7ebdfa
WB
1290/**
1291 * ipr_update_res_entry - Update the resource entry.
1292 * @res: resource entry struct
1293 * @cfgtew: config table entry wrapper struct
1294 *
1295 * Return value:
1296 * none
1297 **/
1298static void ipr_update_res_entry(struct ipr_resource_entry *res,
1299 struct ipr_config_table_entry_wrapper *cfgtew)
1300{
1301 char buffer[IPR_MAX_RES_PATH_LENGTH];
1302 unsigned int proto;
1303 int new_path = 0;
1304
1305 if (res->ioa_cfg->sis64) {
1306 res->flags = cfgtew->u.cfgte64->flags;
1307 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1308 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1309
1310 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1311 sizeof(struct ipr_std_inq_data));
1312
1313 res->qmodel = IPR_QUEUEING_MODEL64(res);
1314 proto = cfgtew->u.cfgte64->proto;
1315 res->res_handle = cfgtew->u.cfgte64->res_handle;
1316 res->dev_id = cfgtew->u.cfgte64->dev_id;
1317
1318 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1319 sizeof(res->dev_lun.scsi_lun));
1320
1321 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1322 sizeof(res->res_path))) {
1323 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1324 sizeof(res->res_path));
1325 new_path = 1;
1326 }
1327
1328 if (res->sdev && new_path)
1329 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1330 ipr_format_res_path(res->ioa_cfg,
1331 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1332 } else {
1333 res->flags = cfgtew->u.cfgte->flags;
1334 if (res->flags & IPR_IS_IOA_RESOURCE)
1335 res->type = IPR_RES_TYPE_IOAFP;
1336 else
1337 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1338
1339 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1340 sizeof(struct ipr_std_inq_data));
1341
1342 res->qmodel = IPR_QUEUEING_MODEL(res);
1343 proto = cfgtew->u.cfgte->proto;
1344 res->res_handle = cfgtew->u.cfgte->res_handle;
1345 }
1346
1347 ipr_update_ata_class(res, proto);
1348}
1349
1350/**
1351 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1352 * for the resource.
1353 * @res: resource entry struct
1354 * @cfgtew: config table entry wrapper struct
1355 *
1356 * Return value:
1357 * none
1358 **/
1359static void ipr_clear_res_target(struct ipr_resource_entry *res)
1360{
1361 struct ipr_resource_entry *gscsi_res = NULL;
1362 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1363
1364 if (!ioa_cfg->sis64)
1365 return;
1366
1367 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1368 clear_bit(res->target, ioa_cfg->array_ids);
1369 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1370 clear_bit(res->target, ioa_cfg->vset_ids);
1371 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1372 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1373 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1374 return;
1375 clear_bit(res->target, ioa_cfg->target_ids);
1376
1377 } else if (res->bus == 0)
1378 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1379}
1380
1381/**
1382 * ipr_handle_config_change - Handle a config change from the adapter
1383 * @ioa_cfg: ioa config struct
1384 * @hostrcb: hostrcb
1385 *
1386 * Return value:
1387 * none
1388 **/
1389static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1390 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1391{
1392 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1393 struct ipr_config_table_entry_wrapper cfgtew;
1394 __be32 cc_res_handle;
1395
1da177e4
LT
1396 u32 is_ndn = 1;
1397
3e7ebdfa
WB
1398 if (ioa_cfg->sis64) {
1399 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1400 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1401 } else {
1402 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1403 cc_res_handle = cfgtew.u.cfgte->res_handle;
1404 }
1da177e4
LT
1405
1406 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1407 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1408 is_ndn = 0;
1409 break;
1410 }
1411 }
1412
1413 if (is_ndn) {
1414 if (list_empty(&ioa_cfg->free_res_q)) {
1415 ipr_send_hcam(ioa_cfg,
1416 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1417 hostrcb);
1418 return;
1419 }
1420
1421 res = list_entry(ioa_cfg->free_res_q.next,
1422 struct ipr_resource_entry, queue);
1423
1424 list_del(&res->queue);
3e7ebdfa 1425 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1426 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1427 }
1428
3e7ebdfa 1429 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1430
1431 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1432 if (res->sdev) {
1da177e4 1433 res->del_from_ml = 1;
3e7ebdfa 1434 res->res_handle = IPR_INVALID_RES_HANDLE;
f688f96d 1435 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1436 } else {
1437 ipr_clear_res_target(res);
1da177e4 1438 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1439 }
5767a1c4 1440 } else if (!res->sdev || res->del_from_ml) {
1da177e4 1441 res->add_to_ml = 1;
f688f96d 1442 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
1443 }
1444
1445 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1446}
1447
1448/**
1449 * ipr_process_ccn - Op done function for a CCN.
1450 * @ipr_cmd: ipr command struct
1451 *
1452 * This function is the op done function for a configuration
1453 * change notification host controlled async from the adapter.
1454 *
1455 * Return value:
1456 * none
1457 **/
1458static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1459{
1460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1461 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1462 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1463
1464 list_del(&hostrcb->queue);
05a6538a 1465 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1466
1467 if (ioasc) {
4fdd7c7a
BK
1468 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1469 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1da177e4
LT
1470 dev_err(&ioa_cfg->pdev->dev,
1471 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1472
1473 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1474 } else {
1475 ipr_handle_config_change(ioa_cfg, hostrcb);
1476 }
1477}
1478
8cf093e2
BK
1479/**
1480 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1481 * @i: index into buffer
1482 * @buf: string to modify
1483 *
1484 * This function will strip all trailing whitespace, pad the end
1485 * of the string with a single space, and NULL terminate the string.
1486 *
1487 * Return value:
1488 * new length of string
1489 **/
1490static int strip_and_pad_whitespace(int i, char *buf)
1491{
1492 while (i && buf[i] == ' ')
1493 i--;
1494 buf[i+1] = ' ';
1495 buf[i+2] = '\0';
1496 return i + 2;
1497}
1498
1499/**
1500 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1501 * @prefix: string to print at start of printk
1502 * @hostrcb: hostrcb pointer
1503 * @vpd: vendor/product id/sn struct
1504 *
1505 * Return value:
1506 * none
1507 **/
1508static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1509 struct ipr_vpd *vpd)
1510{
1511 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1512 int i = 0;
1513
1514 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1515 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1516
1517 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1518 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1519
1520 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1521 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1522
1523 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1524}
1525
1da177e4
LT
1526/**
1527 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1528 * @vpd: vendor/product id/sn struct
1da177e4
LT
1529 *
1530 * Return value:
1531 * none
1532 **/
cfc32139 1533static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1534{
1535 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1536 + IPR_SERIAL_NUM_LEN];
1537
cfc32139
BK
1538 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1539 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1540 IPR_PROD_ID_LEN);
1541 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1542 ipr_err("Vendor/Product ID: %s\n", buffer);
1543
cfc32139 1544 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1545 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1546 ipr_err(" Serial Number: %s\n", buffer);
1547}
1548
8cf093e2
BK
1549/**
1550 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1551 * @prefix: string to print at start of printk
1552 * @hostrcb: hostrcb pointer
1553 * @vpd: vendor/product id/sn/wwn struct
1554 *
1555 * Return value:
1556 * none
1557 **/
1558static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1559 struct ipr_ext_vpd *vpd)
1560{
1561 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1562 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1563 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1564}
1565
ee0f05b8
BK
1566/**
1567 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1568 * @vpd: vendor/product id/sn/wwn struct
1569 *
1570 * Return value:
1571 * none
1572 **/
1573static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1574{
1575 ipr_log_vpd(&vpd->vpd);
1576 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1577 be32_to_cpu(vpd->wwid[1]));
1578}
1579
1580/**
1581 * ipr_log_enhanced_cache_error - Log a cache error.
1582 * @ioa_cfg: ioa config struct
1583 * @hostrcb: hostrcb struct
1584 *
1585 * Return value:
1586 * none
1587 **/
1588static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1589 struct ipr_hostrcb *hostrcb)
1590{
4565e370
WB
1591 struct ipr_hostrcb_type_12_error *error;
1592
1593 if (ioa_cfg->sis64)
1594 error = &hostrcb->hcam.u.error64.u.type_12_error;
1595 else
1596 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1597
1598 ipr_err("-----Current Configuration-----\n");
1599 ipr_err("Cache Directory Card Information:\n");
1600 ipr_log_ext_vpd(&error->ioa_vpd);
1601 ipr_err("Adapter Card Information:\n");
1602 ipr_log_ext_vpd(&error->cfc_vpd);
1603
1604 ipr_err("-----Expected Configuration-----\n");
1605 ipr_err("Cache Directory Card Information:\n");
1606 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1607 ipr_err("Adapter Card Information:\n");
1608 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1609
1610 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1611 be32_to_cpu(error->ioa_data[0]),
1612 be32_to_cpu(error->ioa_data[1]),
1613 be32_to_cpu(error->ioa_data[2]));
1614}
1615
1da177e4
LT
1616/**
1617 * ipr_log_cache_error - Log a cache error.
1618 * @ioa_cfg: ioa config struct
1619 * @hostrcb: hostrcb struct
1620 *
1621 * Return value:
1622 * none
1623 **/
1624static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1625 struct ipr_hostrcb *hostrcb)
1626{
1627 struct ipr_hostrcb_type_02_error *error =
1628 &hostrcb->hcam.u.error.u.type_02_error;
1629
1630 ipr_err("-----Current Configuration-----\n");
1631 ipr_err("Cache Directory Card Information:\n");
cfc32139 1632 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1633 ipr_err("Adapter Card Information:\n");
cfc32139 1634 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1635
1636 ipr_err("-----Expected Configuration-----\n");
1637 ipr_err("Cache Directory Card Information:\n");
cfc32139 1638 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1639 ipr_err("Adapter Card Information:\n");
cfc32139 1640 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1641
1642 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1643 be32_to_cpu(error->ioa_data[0]),
1644 be32_to_cpu(error->ioa_data[1]),
1645 be32_to_cpu(error->ioa_data[2]));
1646}
1647
ee0f05b8
BK
1648/**
1649 * ipr_log_enhanced_config_error - Log a configuration error.
1650 * @ioa_cfg: ioa config struct
1651 * @hostrcb: hostrcb struct
1652 *
1653 * Return value:
1654 * none
1655 **/
1656static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1657 struct ipr_hostrcb *hostrcb)
1658{
1659 int errors_logged, i;
1660 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1661 struct ipr_hostrcb_type_13_error *error;
1662
1663 error = &hostrcb->hcam.u.error.u.type_13_error;
1664 errors_logged = be32_to_cpu(error->errors_logged);
1665
1666 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1667 be32_to_cpu(error->errors_detected), errors_logged);
1668
1669 dev_entry = error->dev;
1670
1671 for (i = 0; i < errors_logged; i++, dev_entry++) {
1672 ipr_err_separator;
1673
1674 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1675 ipr_log_ext_vpd(&dev_entry->vpd);
1676
1677 ipr_err("-----New Device Information-----\n");
1678 ipr_log_ext_vpd(&dev_entry->new_vpd);
1679
1680 ipr_err("Cache Directory Card Information:\n");
1681 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1682
1683 ipr_err("Adapter Card Information:\n");
1684 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1685 }
1686}
1687
4565e370
WB
1688/**
1689 * ipr_log_sis64_config_error - Log a device error.
1690 * @ioa_cfg: ioa config struct
1691 * @hostrcb: hostrcb struct
1692 *
1693 * Return value:
1694 * none
1695 **/
1696static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1697 struct ipr_hostrcb *hostrcb)
1698{
1699 int errors_logged, i;
1700 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1701 struct ipr_hostrcb_type_23_error *error;
1702 char buffer[IPR_MAX_RES_PATH_LENGTH];
1703
1704 error = &hostrcb->hcam.u.error64.u.type_23_error;
1705 errors_logged = be32_to_cpu(error->errors_logged);
1706
1707 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1708 be32_to_cpu(error->errors_detected), errors_logged);
1709
1710 dev_entry = error->dev;
1711
1712 for (i = 0; i < errors_logged; i++, dev_entry++) {
1713 ipr_err_separator;
1714
1715 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1716 __ipr_format_res_path(dev_entry->res_path,
1717 buffer, sizeof(buffer)));
4565e370
WB
1718 ipr_log_ext_vpd(&dev_entry->vpd);
1719
1720 ipr_err("-----New Device Information-----\n");
1721 ipr_log_ext_vpd(&dev_entry->new_vpd);
1722
1723 ipr_err("Cache Directory Card Information:\n");
1724 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1725
1726 ipr_err("Adapter Card Information:\n");
1727 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1728 }
1729}
1730
1da177e4
LT
1731/**
1732 * ipr_log_config_error - Log a configuration error.
1733 * @ioa_cfg: ioa config struct
1734 * @hostrcb: hostrcb struct
1735 *
1736 * Return value:
1737 * none
1738 **/
1739static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1740 struct ipr_hostrcb *hostrcb)
1741{
1742 int errors_logged, i;
1743 struct ipr_hostrcb_device_data_entry *dev_entry;
1744 struct ipr_hostrcb_type_03_error *error;
1745
1746 error = &hostrcb->hcam.u.error.u.type_03_error;
1747 errors_logged = be32_to_cpu(error->errors_logged);
1748
1749 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1750 be32_to_cpu(error->errors_detected), errors_logged);
1751
cfc32139 1752 dev_entry = error->dev;
1da177e4
LT
1753
1754 for (i = 0; i < errors_logged; i++, dev_entry++) {
1755 ipr_err_separator;
1756
fa15b1f6 1757 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1758 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1759
1760 ipr_err("-----New Device Information-----\n");
cfc32139 1761 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1762
1763 ipr_err("Cache Directory Card Information:\n");
cfc32139 1764 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1765
1766 ipr_err("Adapter Card Information:\n");
cfc32139 1767 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1768
1769 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1770 be32_to_cpu(dev_entry->ioa_data[0]),
1771 be32_to_cpu(dev_entry->ioa_data[1]),
1772 be32_to_cpu(dev_entry->ioa_data[2]),
1773 be32_to_cpu(dev_entry->ioa_data[3]),
1774 be32_to_cpu(dev_entry->ioa_data[4]));
1775 }
1776}
1777
ee0f05b8
BK
1778/**
1779 * ipr_log_enhanced_array_error - Log an array configuration error.
1780 * @ioa_cfg: ioa config struct
1781 * @hostrcb: hostrcb struct
1782 *
1783 * Return value:
1784 * none
1785 **/
1786static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1787 struct ipr_hostrcb *hostrcb)
1788{
1789 int i, num_entries;
1790 struct ipr_hostrcb_type_14_error *error;
1791 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1792 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1793
1794 error = &hostrcb->hcam.u.error.u.type_14_error;
1795
1796 ipr_err_separator;
1797
1798 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1799 error->protection_level,
1800 ioa_cfg->host->host_no,
1801 error->last_func_vset_res_addr.bus,
1802 error->last_func_vset_res_addr.target,
1803 error->last_func_vset_res_addr.lun);
1804
1805 ipr_err_separator;
1806
1807 array_entry = error->array_member;
1808 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1809 ARRAY_SIZE(error->array_member));
ee0f05b8
BK
1810
1811 for (i = 0; i < num_entries; i++, array_entry++) {
1812 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1813 continue;
1814
1815 if (be32_to_cpu(error->exposed_mode_adn) == i)
1816 ipr_err("Exposed Array Member %d:\n", i);
1817 else
1818 ipr_err("Array Member %d:\n", i);
1819
1820 ipr_log_ext_vpd(&array_entry->vpd);
1821 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1822 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1823 "Expected Location");
1824
1825 ipr_err_separator;
1826 }
1827}
1828
1da177e4
LT
1829/**
1830 * ipr_log_array_error - Log an array configuration error.
1831 * @ioa_cfg: ioa config struct
1832 * @hostrcb: hostrcb struct
1833 *
1834 * Return value:
1835 * none
1836 **/
1837static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1838 struct ipr_hostrcb *hostrcb)
1839{
1840 int i;
1841 struct ipr_hostrcb_type_04_error *error;
1842 struct ipr_hostrcb_array_data_entry *array_entry;
1843 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1844
1845 error = &hostrcb->hcam.u.error.u.type_04_error;
1846
1847 ipr_err_separator;
1848
1849 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1850 error->protection_level,
1851 ioa_cfg->host->host_no,
1852 error->last_func_vset_res_addr.bus,
1853 error->last_func_vset_res_addr.target,
1854 error->last_func_vset_res_addr.lun);
1855
1856 ipr_err_separator;
1857
1858 array_entry = error->array_member;
1859
1860 for (i = 0; i < 18; i++) {
cfc32139 1861 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1862 continue;
1863
fa15b1f6 1864 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1865 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1866 else
1da177e4 1867 ipr_err("Array Member %d:\n", i);
1da177e4 1868
cfc32139 1869 ipr_log_vpd(&array_entry->vpd);
1da177e4 1870
fa15b1f6
BK
1871 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1872 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1873 "Expected Location");
1da177e4
LT
1874
1875 ipr_err_separator;
1876
1877 if (i == 9)
1878 array_entry = error->array_member2;
1879 else
1880 array_entry++;
1881 }
1882}
1883
1884/**
b0df54bb 1885 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1886 * @ioa_cfg: ioa config struct
b0df54bb
BK
1887 * @data: IOA error data
1888 * @len: data length
1da177e4
LT
1889 *
1890 * Return value:
1891 * none
1892 **/
ac719aba 1893static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1894{
1895 int i;
1da177e4 1896
b0df54bb 1897 if (len == 0)
1da177e4
LT
1898 return;
1899
ac719aba
BK
1900 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1901 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1902
b0df54bb 1903 for (i = 0; i < len / 4; i += 4) {
1da177e4 1904 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1905 be32_to_cpu(data[i]),
1906 be32_to_cpu(data[i+1]),
1907 be32_to_cpu(data[i+2]),
1908 be32_to_cpu(data[i+3]));
1da177e4
LT
1909 }
1910}
1911
ee0f05b8
BK
1912/**
1913 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1914 * @ioa_cfg: ioa config struct
1915 * @hostrcb: hostrcb struct
1916 *
1917 * Return value:
1918 * none
1919 **/
1920static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1921 struct ipr_hostrcb *hostrcb)
1922{
1923 struct ipr_hostrcb_type_17_error *error;
1924
4565e370
WB
1925 if (ioa_cfg->sis64)
1926 error = &hostrcb->hcam.u.error64.u.type_17_error;
1927 else
1928 error = &hostrcb->hcam.u.error.u.type_17_error;
1929
ee0f05b8 1930 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1931 strim(error->failure_reason);
ee0f05b8 1932
8cf093e2
BK
1933 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1934 be32_to_cpu(hostrcb->hcam.u.error.prc));
1935 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1936 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1937 be32_to_cpu(hostrcb->hcam.length) -
1938 (offsetof(struct ipr_hostrcb_error, u) +
1939 offsetof(struct ipr_hostrcb_type_17_error, data)));
1940}
1941
b0df54bb
BK
1942/**
1943 * ipr_log_dual_ioa_error - Log a dual adapter error.
1944 * @ioa_cfg: ioa config struct
1945 * @hostrcb: hostrcb struct
1946 *
1947 * Return value:
1948 * none
1949 **/
1950static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1951 struct ipr_hostrcb *hostrcb)
1952{
1953 struct ipr_hostrcb_type_07_error *error;
1954
1955 error = &hostrcb->hcam.u.error.u.type_07_error;
1956 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1957 strim(error->failure_reason);
b0df54bb 1958
8cf093e2
BK
1959 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1960 be32_to_cpu(hostrcb->hcam.u.error.prc));
1961 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1962 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1963 be32_to_cpu(hostrcb->hcam.length) -
1964 (offsetof(struct ipr_hostrcb_error, u) +
1965 offsetof(struct ipr_hostrcb_type_07_error, data)));
1966}
1967
49dc6a18
BK
1968static const struct {
1969 u8 active;
1970 char *desc;
1971} path_active_desc[] = {
1972 { IPR_PATH_NO_INFO, "Path" },
1973 { IPR_PATH_ACTIVE, "Active path" },
1974 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1975};
1976
1977static const struct {
1978 u8 state;
1979 char *desc;
1980} path_state_desc[] = {
1981 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1982 { IPR_PATH_HEALTHY, "is healthy" },
1983 { IPR_PATH_DEGRADED, "is degraded" },
1984 { IPR_PATH_FAILED, "is failed" }
1985};
1986
1987/**
1988 * ipr_log_fabric_path - Log a fabric path error
1989 * @hostrcb: hostrcb struct
1990 * @fabric: fabric descriptor
1991 *
1992 * Return value:
1993 * none
1994 **/
1995static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1996 struct ipr_hostrcb_fabric_desc *fabric)
1997{
1998 int i, j;
1999 u8 path_state = fabric->path_state;
2000 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2001 u8 state = path_state & IPR_PATH_STATE_MASK;
2002
2003 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2004 if (path_active_desc[i].active != active)
2005 continue;
2006
2007 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2008 if (path_state_desc[j].state != state)
2009 continue;
2010
2011 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2012 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2013 path_active_desc[i].desc, path_state_desc[j].desc,
2014 fabric->ioa_port);
2015 } else if (fabric->cascaded_expander == 0xff) {
2016 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2017 path_active_desc[i].desc, path_state_desc[j].desc,
2018 fabric->ioa_port, fabric->phy);
2019 } else if (fabric->phy == 0xff) {
2020 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2021 path_active_desc[i].desc, path_state_desc[j].desc,
2022 fabric->ioa_port, fabric->cascaded_expander);
2023 } else {
2024 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2025 path_active_desc[i].desc, path_state_desc[j].desc,
2026 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2027 }
2028 return;
2029 }
2030 }
2031
2032 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2033 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2034}
2035
4565e370
WB
2036/**
2037 * ipr_log64_fabric_path - Log a fabric path error
2038 * @hostrcb: hostrcb struct
2039 * @fabric: fabric descriptor
2040 *
2041 * Return value:
2042 * none
2043 **/
2044static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2045 struct ipr_hostrcb64_fabric_desc *fabric)
2046{
2047 int i, j;
2048 u8 path_state = fabric->path_state;
2049 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2050 u8 state = path_state & IPR_PATH_STATE_MASK;
2051 char buffer[IPR_MAX_RES_PATH_LENGTH];
2052
2053 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2054 if (path_active_desc[i].active != active)
2055 continue;
2056
2057 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2058 if (path_state_desc[j].state != state)
2059 continue;
2060
2061 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2062 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2063 ipr_format_res_path(hostrcb->ioa_cfg,
2064 fabric->res_path,
2065 buffer, sizeof(buffer)));
4565e370
WB
2066 return;
2067 }
2068 }
2069
2070 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2071 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2072 buffer, sizeof(buffer)));
4565e370
WB
2073}
2074
49dc6a18
BK
2075static const struct {
2076 u8 type;
2077 char *desc;
2078} path_type_desc[] = {
2079 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2080 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2081 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2082 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2083};
2084
2085static const struct {
2086 u8 status;
2087 char *desc;
2088} path_status_desc[] = {
2089 { IPR_PATH_CFG_NO_PROB, "Functional" },
2090 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2091 { IPR_PATH_CFG_FAILED, "Failed" },
2092 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2093 { IPR_PATH_NOT_DETECTED, "Missing" },
2094 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2095};
2096
2097static const char *link_rate[] = {
2098 "unknown",
2099 "disabled",
2100 "phy reset problem",
2101 "spinup hold",
2102 "port selector",
2103 "unknown",
2104 "unknown",
2105 "unknown",
2106 "1.5Gbps",
2107 "3.0Gbps",
2108 "unknown",
2109 "unknown",
2110 "unknown",
2111 "unknown",
2112 "unknown",
2113 "unknown"
2114};
2115
2116/**
2117 * ipr_log_path_elem - Log a fabric path element.
2118 * @hostrcb: hostrcb struct
2119 * @cfg: fabric path element struct
2120 *
2121 * Return value:
2122 * none
2123 **/
2124static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2125 struct ipr_hostrcb_config_element *cfg)
2126{
2127 int i, j;
2128 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2129 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2130
2131 if (type == IPR_PATH_CFG_NOT_EXIST)
2132 return;
2133
2134 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2135 if (path_type_desc[i].type != type)
2136 continue;
2137
2138 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2139 if (path_status_desc[j].status != status)
2140 continue;
2141
2142 if (type == IPR_PATH_CFG_IOA_PORT) {
2143 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2144 path_status_desc[j].desc, path_type_desc[i].desc,
2145 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2146 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2147 } else {
2148 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2149 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2150 path_status_desc[j].desc, path_type_desc[i].desc,
2151 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2152 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2153 } else if (cfg->cascaded_expander == 0xff) {
2154 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2155 "WWN=%08X%08X\n", path_status_desc[j].desc,
2156 path_type_desc[i].desc, cfg->phy,
2157 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2158 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2159 } else if (cfg->phy == 0xff) {
2160 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2161 "WWN=%08X%08X\n", path_status_desc[j].desc,
2162 path_type_desc[i].desc, cfg->cascaded_expander,
2163 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2164 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2165 } else {
2166 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2167 "WWN=%08X%08X\n", path_status_desc[j].desc,
2168 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2169 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2170 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2171 }
2172 }
2173 return;
2174 }
2175 }
2176
2177 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2178 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2179 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2180 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2181}
2182
4565e370
WB
2183/**
2184 * ipr_log64_path_elem - Log a fabric path element.
2185 * @hostrcb: hostrcb struct
2186 * @cfg: fabric path element struct
2187 *
2188 * Return value:
2189 * none
2190 **/
2191static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2192 struct ipr_hostrcb64_config_element *cfg)
2193{
2194 int i, j;
2195 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2196 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2197 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2198 char buffer[IPR_MAX_RES_PATH_LENGTH];
2199
2200 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2201 return;
2202
2203 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2204 if (path_type_desc[i].type != type)
2205 continue;
2206
2207 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2208 if (path_status_desc[j].status != status)
2209 continue;
2210
2211 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2212 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2213 ipr_format_res_path(hostrcb->ioa_cfg,
2214 cfg->res_path, buffer, sizeof(buffer)),
2215 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2216 be32_to_cpu(cfg->wwid[0]),
2217 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2218 return;
2219 }
2220 }
2221 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2222 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2223 ipr_format_res_path(hostrcb->ioa_cfg,
2224 cfg->res_path, buffer, sizeof(buffer)),
2225 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2226 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2227}
2228
49dc6a18
BK
2229/**
2230 * ipr_log_fabric_error - Log a fabric error.
2231 * @ioa_cfg: ioa config struct
2232 * @hostrcb: hostrcb struct
2233 *
2234 * Return value:
2235 * none
2236 **/
2237static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2238 struct ipr_hostrcb *hostrcb)
2239{
2240 struct ipr_hostrcb_type_20_error *error;
2241 struct ipr_hostrcb_fabric_desc *fabric;
2242 struct ipr_hostrcb_config_element *cfg;
2243 int i, add_len;
2244
2245 error = &hostrcb->hcam.u.error.u.type_20_error;
2246 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2247 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2248
2249 add_len = be32_to_cpu(hostrcb->hcam.length) -
2250 (offsetof(struct ipr_hostrcb_error, u) +
2251 offsetof(struct ipr_hostrcb_type_20_error, desc));
2252
2253 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2254 ipr_log_fabric_path(hostrcb, fabric);
2255 for_each_fabric_cfg(fabric, cfg)
2256 ipr_log_path_elem(hostrcb, cfg);
2257
2258 add_len -= be16_to_cpu(fabric->length);
2259 fabric = (struct ipr_hostrcb_fabric_desc *)
2260 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2261 }
2262
ac719aba 2263 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2264}
2265
4565e370
WB
2266/**
2267 * ipr_log_sis64_array_error - Log a sis64 array error.
2268 * @ioa_cfg: ioa config struct
2269 * @hostrcb: hostrcb struct
2270 *
2271 * Return value:
2272 * none
2273 **/
2274static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2275 struct ipr_hostrcb *hostrcb)
2276{
2277 int i, num_entries;
2278 struct ipr_hostrcb_type_24_error *error;
2279 struct ipr_hostrcb64_array_data_entry *array_entry;
2280 char buffer[IPR_MAX_RES_PATH_LENGTH];
2281 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2282
2283 error = &hostrcb->hcam.u.error64.u.type_24_error;
2284
2285 ipr_err_separator;
2286
2287 ipr_err("RAID %s Array Configuration: %s\n",
2288 error->protection_level,
b3b3b407
BK
2289 ipr_format_res_path(ioa_cfg, error->last_res_path,
2290 buffer, sizeof(buffer)));
4565e370
WB
2291
2292 ipr_err_separator;
2293
2294 array_entry = error->array_member;
7262026f
WB
2295 num_entries = min_t(u32, error->num_entries,
2296 ARRAY_SIZE(error->array_member));
4565e370
WB
2297
2298 for (i = 0; i < num_entries; i++, array_entry++) {
2299
2300 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2301 continue;
2302
2303 if (error->exposed_mode_adn == i)
2304 ipr_err("Exposed Array Member %d:\n", i);
2305 else
2306 ipr_err("Array Member %d:\n", i);
2307
2308 ipr_err("Array Member %d:\n", i);
2309 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2310 ipr_err("Current Location: %s\n",
b3b3b407
BK
2311 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2312 buffer, sizeof(buffer)));
7262026f 2313 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2314 ipr_format_res_path(ioa_cfg,
2315 array_entry->expected_res_path,
2316 buffer, sizeof(buffer)));
4565e370
WB
2317
2318 ipr_err_separator;
2319 }
2320}
2321
2322/**
2323 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2324 * @ioa_cfg: ioa config struct
2325 * @hostrcb: hostrcb struct
2326 *
2327 * Return value:
2328 * none
2329 **/
2330static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2331 struct ipr_hostrcb *hostrcb)
2332{
2333 struct ipr_hostrcb_type_30_error *error;
2334 struct ipr_hostrcb64_fabric_desc *fabric;
2335 struct ipr_hostrcb64_config_element *cfg;
2336 int i, add_len;
2337
2338 error = &hostrcb->hcam.u.error64.u.type_30_error;
2339
2340 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2341 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2342
2343 add_len = be32_to_cpu(hostrcb->hcam.length) -
2344 (offsetof(struct ipr_hostrcb64_error, u) +
2345 offsetof(struct ipr_hostrcb_type_30_error, desc));
2346
2347 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2348 ipr_log64_fabric_path(hostrcb, fabric);
2349 for_each_fabric_cfg(fabric, cfg)
2350 ipr_log64_path_elem(hostrcb, cfg);
2351
2352 add_len -= be16_to_cpu(fabric->length);
2353 fabric = (struct ipr_hostrcb64_fabric_desc *)
2354 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2355 }
2356
2357 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2358}
2359
b0df54bb
BK
2360/**
2361 * ipr_log_generic_error - Log an adapter error.
2362 * @ioa_cfg: ioa config struct
2363 * @hostrcb: hostrcb struct
2364 *
2365 * Return value:
2366 * none
2367 **/
2368static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2369 struct ipr_hostrcb *hostrcb)
2370{
ac719aba 2371 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2372 be32_to_cpu(hostrcb->hcam.length));
2373}
2374
169b9ec8
WX
2375/**
2376 * ipr_log_sis64_device_error - Log a cache error.
2377 * @ioa_cfg: ioa config struct
2378 * @hostrcb: hostrcb struct
2379 *
2380 * Return value:
2381 * none
2382 **/
2383static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2384 struct ipr_hostrcb *hostrcb)
2385{
2386 struct ipr_hostrcb_type_21_error *error;
2387 char buffer[IPR_MAX_RES_PATH_LENGTH];
2388
2389 error = &hostrcb->hcam.u.error64.u.type_21_error;
2390
2391 ipr_err("-----Failing Device Information-----\n");
2392 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2393 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2394 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2395 ipr_err("Device Resource Path: %s\n",
2396 __ipr_format_res_path(error->res_path,
2397 buffer, sizeof(buffer)));
2398 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2399 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2400 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2401 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2402 ipr_err("SCSI Sense Data:\n");
2403 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2404 ipr_err("SCSI Command Descriptor Block: \n");
2405 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2406
2407 ipr_err("Additional IOA Data:\n");
2408 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2409}
2410
1da177e4
LT
2411/**
2412 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2413 * @ioasc: IOASC
2414 *
2415 * This function will return the index of into the ipr_error_table
2416 * for the specified IOASC. If the IOASC is not in the table,
2417 * 0 will be returned, which points to the entry used for unknown errors.
2418 *
2419 * Return value:
2420 * index into the ipr_error_table
2421 **/
2422static u32 ipr_get_error(u32 ioasc)
2423{
2424 int i;
2425
2426 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2427 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2428 return i;
2429
2430 return 0;
2431}
2432
2433/**
2434 * ipr_handle_log_data - Log an adapter error.
2435 * @ioa_cfg: ioa config struct
2436 * @hostrcb: hostrcb struct
2437 *
2438 * This function logs an adapter error to the system.
2439 *
2440 * Return value:
2441 * none
2442 **/
2443static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2444 struct ipr_hostrcb *hostrcb)
2445{
2446 u32 ioasc;
2447 int error_index;
3185ea63 2448 struct ipr_hostrcb_type_21_error *error;
1da177e4
LT
2449
2450 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2451 return;
2452
2453 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2454 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2455
4565e370
WB
2456 if (ioa_cfg->sis64)
2457 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2458 else
2459 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2460
4565e370
WB
2461 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2462 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2463 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2464 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2465 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2466 }
2467
2468 error_index = ipr_get_error(ioasc);
2469
2470 if (!ipr_error_table[error_index].log_hcam)
2471 return;
2472
3185ea63 2473 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2474 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2475 error = &hostrcb->hcam.u.error64.u.type_21_error;
2476
2477 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2478 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2479 return;
2480 }
2481
49dc6a18 2482 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2483
2484 /* Set indication we have logged an error */
2485 ioa_cfg->errors_logged++;
2486
933916f3 2487 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2488 return;
cf852037
BK
2489 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2490 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2491
2492 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2493 case IPR_HOST_RCB_OVERLAY_ID_2:
2494 ipr_log_cache_error(ioa_cfg, hostrcb);
2495 break;
2496 case IPR_HOST_RCB_OVERLAY_ID_3:
2497 ipr_log_config_error(ioa_cfg, hostrcb);
2498 break;
2499 case IPR_HOST_RCB_OVERLAY_ID_4:
2500 case IPR_HOST_RCB_OVERLAY_ID_6:
2501 ipr_log_array_error(ioa_cfg, hostrcb);
2502 break;
b0df54bb
BK
2503 case IPR_HOST_RCB_OVERLAY_ID_7:
2504 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2505 break;
ee0f05b8
BK
2506 case IPR_HOST_RCB_OVERLAY_ID_12:
2507 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2508 break;
2509 case IPR_HOST_RCB_OVERLAY_ID_13:
2510 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2511 break;
2512 case IPR_HOST_RCB_OVERLAY_ID_14:
2513 case IPR_HOST_RCB_OVERLAY_ID_16:
2514 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2515 break;
2516 case IPR_HOST_RCB_OVERLAY_ID_17:
2517 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2518 break;
49dc6a18
BK
2519 case IPR_HOST_RCB_OVERLAY_ID_20:
2520 ipr_log_fabric_error(ioa_cfg, hostrcb);
2521 break;
169b9ec8
WX
2522 case IPR_HOST_RCB_OVERLAY_ID_21:
2523 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2524 break;
4565e370
WB
2525 case IPR_HOST_RCB_OVERLAY_ID_23:
2526 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2527 break;
2528 case IPR_HOST_RCB_OVERLAY_ID_24:
2529 case IPR_HOST_RCB_OVERLAY_ID_26:
2530 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2531 break;
2532 case IPR_HOST_RCB_OVERLAY_ID_30:
2533 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2534 break;
cf852037 2535 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2536 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2537 default:
a9cfca96 2538 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2539 break;
2540 }
2541}
2542
2543/**
2544 * ipr_process_error - Op done function for an adapter error log.
2545 * @ipr_cmd: ipr command struct
2546 *
2547 * This function is the op done function for an error log host
2548 * controlled async from the adapter. It will log the error and
2549 * send the HCAM back to the adapter.
2550 *
2551 * Return value:
2552 * none
2553 **/
2554static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2555{
2556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2557 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2558 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2559 u32 fd_ioasc;
2560
2561 if (ioa_cfg->sis64)
2562 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2563 else
2564 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2565
2566 list_del(&hostrcb->queue);
05a6538a 2567 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2568
2569 if (!ioasc) {
2570 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2571 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2572 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4fdd7c7a
BK
2573 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2574 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
1da177e4
LT
2575 dev_err(&ioa_cfg->pdev->dev,
2576 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2577 }
2578
2579 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2580}
2581
2582/**
2583 * ipr_timeout - An internally generated op has timed out.
2584 * @ipr_cmd: ipr command struct
2585 *
2586 * This function blocks host requests and initiates an
2587 * adapter reset.
2588 *
2589 * Return value:
2590 * none
2591 **/
2592static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2593{
2594 unsigned long lock_flags = 0;
2595 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2596
2597 ENTER;
2598 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2599
2600 ioa_cfg->errors_logged++;
2601 dev_err(&ioa_cfg->pdev->dev,
2602 "Adapter being reset due to command timeout.\n");
2603
2604 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2605 ioa_cfg->sdt_state = GET_DUMP;
2606
2607 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2608 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2609
2610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2611 LEAVE;
2612}
2613
2614/**
2615 * ipr_oper_timeout - Adapter timed out transitioning to operational
2616 * @ipr_cmd: ipr command struct
2617 *
2618 * This function blocks host requests and initiates an
2619 * adapter reset.
2620 *
2621 * Return value:
2622 * none
2623 **/
2624static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2625{
2626 unsigned long lock_flags = 0;
2627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2628
2629 ENTER;
2630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2631
2632 ioa_cfg->errors_logged++;
2633 dev_err(&ioa_cfg->pdev->dev,
2634 "Adapter timed out transitioning to operational.\n");
2635
2636 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2637 ioa_cfg->sdt_state = GET_DUMP;
2638
2639 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2640 if (ipr_fastfail)
2641 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2642 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2643 }
2644
2645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2646 LEAVE;
2647}
2648
1da177e4
LT
2649/**
2650 * ipr_find_ses_entry - Find matching SES in SES table
2651 * @res: resource entry struct of SES
2652 *
2653 * Return value:
2654 * pointer to SES table entry / NULL on failure
2655 **/
2656static const struct ipr_ses_table_entry *
2657ipr_find_ses_entry(struct ipr_resource_entry *res)
2658{
2659 int i, j, matches;
3e7ebdfa 2660 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2661 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2662
2663 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2664 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2665 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2666 vpids = &res->std_inq_data.vpids;
2667 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2668 matches++;
2669 else
2670 break;
2671 } else
2672 matches++;
2673 }
2674
2675 if (matches == IPR_PROD_ID_LEN)
2676 return ste;
2677 }
2678
2679 return NULL;
2680}
2681
2682/**
2683 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2684 * @ioa_cfg: ioa config struct
2685 * @bus: SCSI bus
2686 * @bus_width: bus width
2687 *
2688 * Return value:
2689 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2690 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2691 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2692 * max 160MHz = max 320MB/sec).
2693 **/
2694static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2695{
2696 struct ipr_resource_entry *res;
2697 const struct ipr_ses_table_entry *ste;
2698 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2699
2700 /* Loop through each config table entry in the config table buffer */
2701 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2702 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2703 continue;
2704
3e7ebdfa 2705 if (bus != res->bus)
1da177e4
LT
2706 continue;
2707
2708 if (!(ste = ipr_find_ses_entry(res)))
2709 continue;
2710
2711 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2712 }
2713
2714 return max_xfer_rate;
2715}
2716
2717/**
2718 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2719 * @ioa_cfg: ioa config struct
2720 * @max_delay: max delay in micro-seconds to wait
2721 *
2722 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2723 *
2724 * Return value:
2725 * 0 on success / other on failure
2726 **/
2727static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2728{
2729 volatile u32 pcii_reg;
2730 int delay = 1;
2731
2732 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2733 while (delay < max_delay) {
2734 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2735
2736 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2737 return 0;
2738
2739 /* udelay cannot be used if delay is more than a few milliseconds */
2740 if ((delay / 1000) > MAX_UDELAY_MS)
2741 mdelay(delay / 1000);
2742 else
2743 udelay(delay);
2744
2745 delay += delay;
2746 }
2747 return -EIO;
2748}
2749
dcbad00e
WB
2750/**
2751 * ipr_get_sis64_dump_data_section - Dump IOA memory
2752 * @ioa_cfg: ioa config struct
2753 * @start_addr: adapter address to dump
2754 * @dest: destination kernel buffer
2755 * @length_in_words: length to dump in 4 byte words
2756 *
2757 * Return value:
2758 * 0 on success
2759 **/
2760static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2761 u32 start_addr,
2762 __be32 *dest, u32 length_in_words)
2763{
2764 int i;
2765
2766 for (i = 0; i < length_in_words; i++) {
2767 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2768 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2769 dest++;
2770 }
2771
2772 return 0;
2773}
2774
1da177e4
LT
2775/**
2776 * ipr_get_ldump_data_section - Dump IOA memory
2777 * @ioa_cfg: ioa config struct
2778 * @start_addr: adapter address to dump
2779 * @dest: destination kernel buffer
2780 * @length_in_words: length to dump in 4 byte words
2781 *
2782 * Return value:
2783 * 0 on success / -EIO on failure
2784 **/
2785static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2786 u32 start_addr,
2787 __be32 *dest, u32 length_in_words)
2788{
2789 volatile u32 temp_pcii_reg;
2790 int i, delay = 0;
2791
dcbad00e
WB
2792 if (ioa_cfg->sis64)
2793 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2794 dest, length_in_words);
2795
1da177e4
LT
2796 /* Write IOA interrupt reg starting LDUMP state */
2797 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2798 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2799
2800 /* Wait for IO debug acknowledge */
2801 if (ipr_wait_iodbg_ack(ioa_cfg,
2802 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2803 dev_err(&ioa_cfg->pdev->dev,
2804 "IOA dump long data transfer timeout\n");
2805 return -EIO;
2806 }
2807
2808 /* Signal LDUMP interlocked - clear IO debug ack */
2809 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2810 ioa_cfg->regs.clr_interrupt_reg);
2811
2812 /* Write Mailbox with starting address */
2813 writel(start_addr, ioa_cfg->ioa_mailbox);
2814
2815 /* Signal address valid - clear IOA Reset alert */
2816 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2817 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2818
2819 for (i = 0; i < length_in_words; i++) {
2820 /* Wait for IO debug acknowledge */
2821 if (ipr_wait_iodbg_ack(ioa_cfg,
2822 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2823 dev_err(&ioa_cfg->pdev->dev,
2824 "IOA dump short data transfer timeout\n");
2825 return -EIO;
2826 }
2827
2828 /* Read data from mailbox and increment destination pointer */
2829 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2830 dest++;
2831
2832 /* For all but the last word of data, signal data received */
2833 if (i < (length_in_words - 1)) {
2834 /* Signal dump data received - Clear IO debug Ack */
2835 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2836 ioa_cfg->regs.clr_interrupt_reg);
2837 }
2838 }
2839
2840 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2841 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2842 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2843
2844 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2845 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2846
2847 /* Signal dump data received - Clear IO debug Ack */
2848 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2849 ioa_cfg->regs.clr_interrupt_reg);
2850
2851 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2852 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2853 temp_pcii_reg =
214777ba 2854 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2855
2856 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2857 return 0;
2858
2859 udelay(10);
2860 delay += 10;
2861 }
2862
2863 return 0;
2864}
2865
2866#ifdef CONFIG_SCSI_IPR_DUMP
2867/**
2868 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2869 * @ioa_cfg: ioa config struct
2870 * @pci_address: adapter address
2871 * @length: length of data to copy
2872 *
2873 * Copy data from PCI adapter to kernel buffer.
2874 * Note: length MUST be a 4 byte multiple
2875 * Return value:
2876 * 0 on success / other on failure
2877 **/
2878static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2879 unsigned long pci_address, u32 length)
2880{
2881 int bytes_copied = 0;
4d4dd706 2882 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2883 __be32 *page;
2884 unsigned long lock_flags = 0;
2885 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2886
4d4dd706
KSS
2887 if (ioa_cfg->sis64)
2888 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2889 else
2890 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2891
1da177e4 2892 while (bytes_copied < length &&
4d4dd706 2893 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2894 if (ioa_dump->page_offset >= PAGE_SIZE ||
2895 ioa_dump->page_offset == 0) {
2896 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2897
2898 if (!page) {
2899 ipr_trace;
2900 return bytes_copied;
2901 }
2902
2903 ioa_dump->page_offset = 0;
2904 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2905 ioa_dump->next_page_index++;
2906 } else
2907 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2908
2909 rem_len = length - bytes_copied;
2910 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2911 cur_len = min(rem_len, rem_page_len);
2912
2913 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2914 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2915 rc = -EIO;
2916 } else {
2917 rc = ipr_get_ldump_data_section(ioa_cfg,
2918 pci_address + bytes_copied,
2919 &page[ioa_dump->page_offset / 4],
2920 (cur_len / sizeof(u32)));
2921 }
2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2923
2924 if (!rc) {
2925 ioa_dump->page_offset += cur_len;
2926 bytes_copied += cur_len;
2927 } else {
2928 ipr_trace;
2929 break;
2930 }
2931 schedule();
2932 }
2933
2934 return bytes_copied;
2935}
2936
2937/**
2938 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2939 * @hdr: dump entry header struct
2940 *
2941 * Return value:
2942 * nothing
2943 **/
2944static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2945{
2946 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2947 hdr->num_elems = 1;
2948 hdr->offset = sizeof(*hdr);
2949 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2950}
2951
2952/**
2953 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2954 * @ioa_cfg: ioa config struct
2955 * @driver_dump: driver dump struct
2956 *
2957 * Return value:
2958 * nothing
2959 **/
2960static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2961 struct ipr_driver_dump *driver_dump)
2962{
2963 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2964
2965 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2966 driver_dump->ioa_type_entry.hdr.len =
2967 sizeof(struct ipr_dump_ioa_type_entry) -
2968 sizeof(struct ipr_dump_entry_header);
2969 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2970 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2971 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2972 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2973 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2974 ucode_vpd->minor_release[1];
2975 driver_dump->hdr.num_entries++;
2976}
2977
2978/**
2979 * ipr_dump_version_data - Fill in the driver version in the dump.
2980 * @ioa_cfg: ioa config struct
2981 * @driver_dump: driver dump struct
2982 *
2983 * Return value:
2984 * nothing
2985 **/
2986static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2987 struct ipr_driver_dump *driver_dump)
2988{
2989 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2990 driver_dump->version_entry.hdr.len =
2991 sizeof(struct ipr_dump_version_entry) -
2992 sizeof(struct ipr_dump_entry_header);
2993 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2994 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2995 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2996 driver_dump->hdr.num_entries++;
2997}
2998
2999/**
3000 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3001 * @ioa_cfg: ioa config struct
3002 * @driver_dump: driver dump struct
3003 *
3004 * Return value:
3005 * nothing
3006 **/
3007static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3008 struct ipr_driver_dump *driver_dump)
3009{
3010 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3011 driver_dump->trace_entry.hdr.len =
3012 sizeof(struct ipr_dump_trace_entry) -
3013 sizeof(struct ipr_dump_entry_header);
3014 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3015 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3016 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3017 driver_dump->hdr.num_entries++;
3018}
3019
3020/**
3021 * ipr_dump_location_data - Fill in the IOA location in the dump.
3022 * @ioa_cfg: ioa config struct
3023 * @driver_dump: driver dump struct
3024 *
3025 * Return value:
3026 * nothing
3027 **/
3028static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3029 struct ipr_driver_dump *driver_dump)
3030{
3031 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3032 driver_dump->location_entry.hdr.len =
3033 sizeof(struct ipr_dump_location_entry) -
3034 sizeof(struct ipr_dump_entry_header);
3035 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3036 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 3037 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
3038 driver_dump->hdr.num_entries++;
3039}
3040
3041/**
3042 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3043 * @ioa_cfg: ioa config struct
3044 * @dump: dump struct
3045 *
3046 * Return value:
3047 * nothing
3048 **/
3049static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3050{
3051 unsigned long start_addr, sdt_word;
3052 unsigned long lock_flags = 0;
3053 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3054 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
3055 u32 num_entries, max_num_entries, start_off, end_off;
3056 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 3057 struct ipr_sdt *sdt;
dcbad00e 3058 int valid = 1;
1da177e4
LT
3059 int i;
3060
3061 ENTER;
3062
3063 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3064
41e9a696 3065 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
3066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067 return;
3068 }
3069
110def85
WB
3070 if (ioa_cfg->sis64) {
3071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072 ssleep(IPR_DUMP_DELAY_SECONDS);
3073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3074 }
3075
1da177e4
LT
3076 start_addr = readl(ioa_cfg->ioa_mailbox);
3077
dcbad00e 3078 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3079 dev_err(&ioa_cfg->pdev->dev,
3080 "Invalid dump table format: %lx\n", start_addr);
3081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3082 return;
3083 }
3084
3085 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3086
3087 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3088
3089 /* Initialize the overall dump header */
3090 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3091 driver_dump->hdr.num_entries = 1;
3092 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3093 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3094 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3095 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3096
3097 ipr_dump_version_data(ioa_cfg, driver_dump);
3098 ipr_dump_location_data(ioa_cfg, driver_dump);
3099 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3100 ipr_dump_trace_data(ioa_cfg, driver_dump);
3101
3102 /* Update dump_header */
3103 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3104
3105 /* IOA Dump entry */
3106 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3107 ioa_dump->hdr.len = 0;
3108 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3109 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3110
3111 /* First entries in sdt are actually a list of dump addresses and
3112 lengths to gather the real dump data. sdt represents the pointer
3113 to the ioa generated dump table. Dump data will be extracted based
3114 on entries in this table */
3115 sdt = &ioa_dump->sdt;
3116
4d4dd706
KSS
3117 if (ioa_cfg->sis64) {
3118 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3119 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3120 } else {
3121 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3122 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3123 }
3124
3125 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3126 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3127 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3128 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3129
3130 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3131 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3132 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3133 dev_err(&ioa_cfg->pdev->dev,
3134 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3135 rc, be32_to_cpu(sdt->hdr.state));
3136 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3137 ioa_cfg->sdt_state = DUMP_OBTAINED;
3138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3139 return;
3140 }
3141
3142 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3143
4d4dd706
KSS
3144 if (num_entries > max_num_entries)
3145 num_entries = max_num_entries;
3146
3147 /* Update dump length to the actual data to be copied */
3148 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3149 if (ioa_cfg->sis64)
3150 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3151 else
3152 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3153
3154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3155
3156 for (i = 0; i < num_entries; i++) {
4d4dd706 3157 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3158 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3159 break;
3160 }
3161
3162 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3163 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3164 if (ioa_cfg->sis64)
3165 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3166 else {
3167 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3168 end_off = be32_to_cpu(sdt->entry[i].end_token);
3169
3170 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3171 bytes_to_copy = end_off - start_off;
3172 else
3173 valid = 0;
3174 }
3175 if (valid) {
4d4dd706 3176 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3177 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3178 continue;
3179 }
3180
3181 /* Copy data from adapter to driver buffers */
3182 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3183 bytes_to_copy);
3184
3185 ioa_dump->hdr.len += bytes_copied;
3186
3187 if (bytes_copied != bytes_to_copy) {
3188 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3189 break;
3190 }
3191 }
3192 }
3193 }
3194
3195 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3196
3197 /* Update dump_header */
3198 driver_dump->hdr.len += ioa_dump->hdr.len;
3199 wmb();
3200 ioa_cfg->sdt_state = DUMP_OBTAINED;
3201 LEAVE;
3202}
3203
3204#else
203fa3fe 3205#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3206#endif
3207
3208/**
3209 * ipr_release_dump - Free adapter dump memory
3210 * @kref: kref struct
3211 *
3212 * Return value:
3213 * nothing
3214 **/
3215static void ipr_release_dump(struct kref *kref)
3216{
203fa3fe 3217 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3218 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3219 unsigned long lock_flags = 0;
3220 int i;
3221
3222 ENTER;
3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224 ioa_cfg->dump = NULL;
3225 ioa_cfg->sdt_state = INACTIVE;
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227
3228 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3229 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3230
4d4dd706 3231 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3232 kfree(dump);
3233 LEAVE;
3234}
3235
3236/**
3237 * ipr_worker_thread - Worker thread
c4028958 3238 * @work: ioa config struct
1da177e4
LT
3239 *
3240 * Called at task level from a work thread. This function takes care
3241 * of adding and removing device from the mid-layer as configuration
3242 * changes are detected by the adapter.
3243 *
3244 * Return value:
3245 * nothing
3246 **/
c4028958 3247static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3248{
3249 unsigned long lock_flags;
3250 struct ipr_resource_entry *res;
3251 struct scsi_device *sdev;
3252 struct ipr_dump *dump;
c4028958
DH
3253 struct ipr_ioa_cfg *ioa_cfg =
3254 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3255 u8 bus, target, lun;
3256 int did_work;
3257
3258 ENTER;
3259 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3260
41e9a696 3261 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3262 dump = ioa_cfg->dump;
3263 if (!dump) {
3264 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3265 return;
3266 }
3267 kref_get(&dump->kref);
3268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3269 ipr_get_ioa_dump(ioa_cfg, dump);
3270 kref_put(&dump->kref, ipr_release_dump);
3271
3272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3273 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3274 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276 return;
3277 }
3278
3279restart:
3280 do {
3281 did_work = 0;
f688f96d 3282 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4
LT
3283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3284 return;
3285 }
3286
3287 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3288 if (res->del_from_ml && res->sdev) {
3289 did_work = 1;
3290 sdev = res->sdev;
3291 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3292 if (!res->add_to_ml)
3293 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3294 else
3295 res->del_from_ml = 0;
1da177e4
LT
3296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3297 scsi_remove_device(sdev);
3298 scsi_device_put(sdev);
3299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300 }
3301 break;
3302 }
3303 }
203fa3fe 3304 } while (did_work);
1da177e4
LT
3305
3306 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3307 if (res->add_to_ml) {
3e7ebdfa
WB
3308 bus = res->bus;
3309 target = res->target;
3310 lun = res->lun;
1121b794 3311 res->add_to_ml = 0;
1da177e4
LT
3312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3313 scsi_add_device(ioa_cfg->host, bus, target, lun);
3314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3315 goto restart;
3316 }
3317 }
3318
f688f96d 3319 ioa_cfg->scan_done = 1;
1da177e4 3320 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3321 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3322 LEAVE;
3323}
3324
3325#ifdef CONFIG_SCSI_IPR_TRACE
3326/**
3327 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3328 * @filp: open sysfs file
1da177e4 3329 * @kobj: kobject struct
91a69029 3330 * @bin_attr: bin_attribute struct
1da177e4
LT
3331 * @buf: buffer
3332 * @off: offset
3333 * @count: buffer size
3334 *
3335 * Return value:
3336 * number of bytes printed to buffer
3337 **/
2c3c8bea 3338static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3339 struct bin_attribute *bin_attr,
3340 char *buf, loff_t off, size_t count)
1da177e4 3341{
ee959b00
TJ
3342 struct device *dev = container_of(kobj, struct device, kobj);
3343 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3345 unsigned long lock_flags = 0;
d777aaf3 3346 ssize_t ret;
1da177e4
LT
3347
3348 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3349 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3350 IPR_TRACE_SIZE);
1da177e4 3351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3352
3353 return ret;
1da177e4
LT
3354}
3355
3356static struct bin_attribute ipr_trace_attr = {
3357 .attr = {
3358 .name = "trace",
3359 .mode = S_IRUGO,
3360 },
3361 .size = 0,
3362 .read = ipr_read_trace,
3363};
3364#endif
3365
3366/**
3367 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3368 * @dev: class device struct
3369 * @buf: buffer
1da177e4
LT
3370 *
3371 * Return value:
3372 * number of bytes printed to buffer
3373 **/
ee959b00
TJ
3374static ssize_t ipr_show_fw_version(struct device *dev,
3375 struct device_attribute *attr, char *buf)
1da177e4 3376{
ee959b00 3377 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3378 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3379 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3380 unsigned long lock_flags = 0;
3381 int len;
3382
3383 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3384 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3385 ucode_vpd->major_release, ucode_vpd->card_type,
3386 ucode_vpd->minor_release[0],
3387 ucode_vpd->minor_release[1]);
3388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3389 return len;
3390}
3391
ee959b00 3392static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3393 .attr = {
3394 .name = "fw_version",
3395 .mode = S_IRUGO,
3396 },
3397 .show = ipr_show_fw_version,
3398};
3399
3400/**
3401 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3402 * @dev: class device struct
3403 * @buf: buffer
1da177e4
LT
3404 *
3405 * Return value:
3406 * number of bytes printed to buffer
3407 **/
ee959b00
TJ
3408static ssize_t ipr_show_log_level(struct device *dev,
3409 struct device_attribute *attr, char *buf)
1da177e4 3410{
ee959b00 3411 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3412 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3413 unsigned long lock_flags = 0;
3414 int len;
3415
3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3419 return len;
3420}
3421
3422/**
3423 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3424 * @dev: class device struct
3425 * @buf: buffer
1da177e4
LT
3426 *
3427 * Return value:
3428 * number of bytes printed to buffer
3429 **/
ee959b00 3430static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3431 struct device_attribute *attr,
1da177e4
LT
3432 const char *buf, size_t count)
3433{
ee959b00 3434 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3436 unsigned long lock_flags = 0;
3437
3438 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3439 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3440 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3441 return strlen(buf);
3442}
3443
ee959b00 3444static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3445 .attr = {
3446 .name = "log_level",
3447 .mode = S_IRUGO | S_IWUSR,
3448 },
3449 .show = ipr_show_log_level,
3450 .store = ipr_store_log_level
3451};
3452
3453/**
3454 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3455 * @dev: device struct
3456 * @buf: buffer
3457 * @count: buffer size
1da177e4
LT
3458 *
3459 * This function will reset the adapter and wait a reasonable
3460 * amount of time for any errors that the adapter might log.
3461 *
3462 * Return value:
3463 * count on success / other on failure
3464 **/
ee959b00
TJ
3465static ssize_t ipr_store_diagnostics(struct device *dev,
3466 struct device_attribute *attr,
1da177e4
LT
3467 const char *buf, size_t count)
3468{
ee959b00 3469 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3470 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3471 unsigned long lock_flags = 0;
3472 int rc = count;
3473
3474 if (!capable(CAP_SYS_ADMIN))
3475 return -EACCES;
3476
1da177e4 3477 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3478 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3480 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3481 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3482 }
3483
1da177e4
LT
3484 ioa_cfg->errors_logged = 0;
3485 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3486
3487 if (ioa_cfg->in_reset_reload) {
3488 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3489 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3490
3491 /* Wait for a second for any errors to be logged */
3492 msleep(1000);
3493 } else {
3494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3495 return -EIO;
3496 }
3497
3498 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3499 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3500 rc = -EIO;
3501 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3502
3503 return rc;
3504}
3505
ee959b00 3506static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3507 .attr = {
3508 .name = "run_diagnostics",
3509 .mode = S_IWUSR,
3510 },
3511 .store = ipr_store_diagnostics
3512};
3513
f37eb54b
BK
3514/**
3515 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3516 * @class_dev: device struct
3517 * @buf: buffer
f37eb54b
BK
3518 *
3519 * Return value:
3520 * number of bytes printed to buffer
3521 **/
ee959b00
TJ
3522static ssize_t ipr_show_adapter_state(struct device *dev,
3523 struct device_attribute *attr, char *buf)
f37eb54b 3524{
ee959b00 3525 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3526 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3527 unsigned long lock_flags = 0;
3528 int len;
3529
3530 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3531 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b
BK
3532 len = snprintf(buf, PAGE_SIZE, "offline\n");
3533 else
3534 len = snprintf(buf, PAGE_SIZE, "online\n");
3535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3536 return len;
3537}
3538
3539/**
3540 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3541 * @dev: device struct
3542 * @buf: buffer
3543 * @count: buffer size
f37eb54b
BK
3544 *
3545 * This function will change the adapter's state.
3546 *
3547 * Return value:
3548 * count on success / other on failure
3549 **/
ee959b00
TJ
3550static ssize_t ipr_store_adapter_state(struct device *dev,
3551 struct device_attribute *attr,
f37eb54b
BK
3552 const char *buf, size_t count)
3553{
ee959b00 3554 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3555 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3556 unsigned long lock_flags;
56d6aa33 3557 int result = count, i;
f37eb54b
BK
3558
3559 if (!capable(CAP_SYS_ADMIN))
3560 return -EACCES;
3561
3562 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3563 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3564 !strncmp(buf, "online", 6)) {
3565 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3566 spin_lock(&ioa_cfg->hrrq[i]._lock);
3567 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3568 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3569 }
3570 wmb();
f37eb54b
BK
3571 ioa_cfg->reset_retries = 0;
3572 ioa_cfg->in_ioa_bringdown = 0;
3573 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3574 }
3575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3576 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3577
3578 return result;
3579}
3580
ee959b00 3581static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3582 .attr = {
49dd0961 3583 .name = "online_state",
f37eb54b
BK
3584 .mode = S_IRUGO | S_IWUSR,
3585 },
3586 .show = ipr_show_adapter_state,
3587 .store = ipr_store_adapter_state
3588};
3589
1da177e4
LT
3590/**
3591 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3592 * @dev: device struct
3593 * @buf: buffer
3594 * @count: buffer size
1da177e4
LT
3595 *
3596 * This function will reset the adapter.
3597 *
3598 * Return value:
3599 * count on success / other on failure
3600 **/
ee959b00
TJ
3601static ssize_t ipr_store_reset_adapter(struct device *dev,
3602 struct device_attribute *attr,
1da177e4
LT
3603 const char *buf, size_t count)
3604{
ee959b00 3605 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3606 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3607 unsigned long lock_flags;
3608 int result = count;
3609
3610 if (!capable(CAP_SYS_ADMIN))
3611 return -EACCES;
3612
3613 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3614 if (!ioa_cfg->in_reset_reload)
3615 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3617 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3618
3619 return result;
3620}
3621
ee959b00 3622static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3623 .attr = {
3624 .name = "reset_host",
3625 .mode = S_IWUSR,
3626 },
3627 .store = ipr_store_reset_adapter
3628};
3629
b53d124a 3630static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3631 /**
3632 * ipr_show_iopoll_weight - Show ipr polling mode
3633 * @dev: class device struct
3634 * @buf: buffer
3635 *
3636 * Return value:
3637 * number of bytes printed to buffer
3638 **/
3639static ssize_t ipr_show_iopoll_weight(struct device *dev,
3640 struct device_attribute *attr, char *buf)
3641{
3642 struct Scsi_Host *shost = class_to_shost(dev);
3643 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3644 unsigned long lock_flags = 0;
3645 int len;
3646
3647 spin_lock_irqsave(shost->host_lock, lock_flags);
3648 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3649 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3650
3651 return len;
3652}
3653
3654/**
3655 * ipr_store_iopoll_weight - Change the adapter's polling mode
3656 * @dev: class device struct
3657 * @buf: buffer
3658 *
3659 * Return value:
3660 * number of bytes printed to buffer
3661 **/
3662static ssize_t ipr_store_iopoll_weight(struct device *dev,
3663 struct device_attribute *attr,
3664 const char *buf, size_t count)
3665{
3666 struct Scsi_Host *shost = class_to_shost(dev);
3667 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3668 unsigned long user_iopoll_weight;
3669 unsigned long lock_flags = 0;
3670 int i;
3671
3672 if (!ioa_cfg->sis64) {
3673 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3674 return -EINVAL;
3675 }
3676 if (kstrtoul(buf, 10, &user_iopoll_weight))
3677 return -EINVAL;
3678
3679 if (user_iopoll_weight > 256) {
3680 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3681 return -EINVAL;
3682 }
3683
3684 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3685 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3686 return strlen(buf);
3687 }
3688
89f8b33c 3689 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3690 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3691 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3692 }
3693
3694 spin_lock_irqsave(shost->host_lock, lock_flags);
3695 ioa_cfg->iopoll_weight = user_iopoll_weight;
89f8b33c 3696 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3697 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3698 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3699 ioa_cfg->iopoll_weight, ipr_iopoll);
3700 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3701 }
3702 }
3703 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3704
3705 return strlen(buf);
3706}
3707
3708static struct device_attribute ipr_iopoll_weight_attr = {
3709 .attr = {
3710 .name = "iopoll_weight",
3711 .mode = S_IRUGO | S_IWUSR,
3712 },
3713 .show = ipr_show_iopoll_weight,
3714 .store = ipr_store_iopoll_weight
3715};
3716
1da177e4
LT
3717/**
3718 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3719 * @buf_len: buffer length
3720 *
3721 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3722 * list to use for microcode download
3723 *
3724 * Return value:
3725 * pointer to sglist / NULL on failure
3726 **/
3727static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3728{
3729 int sg_size, order, bsize_elem, num_elem, i, j;
3730 struct ipr_sglist *sglist;
3731 struct scatterlist *scatterlist;
3732 struct page *page;
3733
3734 /* Get the minimum size per scatter/gather element */
3735 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3736
3737 /* Get the actual size per element */
3738 order = get_order(sg_size);
3739
3740 /* Determine the actual number of bytes per element */
3741 bsize_elem = PAGE_SIZE * (1 << order);
3742
3743 /* Determine the actual number of sg entries needed */
3744 if (buf_len % bsize_elem)
3745 num_elem = (buf_len / bsize_elem) + 1;
3746 else
3747 num_elem = buf_len / bsize_elem;
3748
3749 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3750 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3751 (sizeof(struct scatterlist) * (num_elem - 1)),
3752 GFP_KERNEL);
3753
3754 if (sglist == NULL) {
3755 ipr_trace;
3756 return NULL;
3757 }
3758
1da177e4 3759 scatterlist = sglist->scatterlist;
45711f1a 3760 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3761
3762 sglist->order = order;
3763 sglist->num_sg = num_elem;
3764
3765 /* Allocate a bunch of sg elements */
3766 for (i = 0; i < num_elem; i++) {
3767 page = alloc_pages(GFP_KERNEL, order);
3768 if (!page) {
3769 ipr_trace;
3770
3771 /* Free up what we already allocated */
3772 for (j = i - 1; j >= 0; j--)
45711f1a 3773 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3774 kfree(sglist);
3775 return NULL;
3776 }
3777
642f1490 3778 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3779 }
3780
3781 return sglist;
3782}
3783
3784/**
3785 * ipr_free_ucode_buffer - Frees a microcode download buffer
3786 * @p_dnld: scatter/gather list pointer
3787 *
3788 * Free a DMA'able ucode download buffer previously allocated with
3789 * ipr_alloc_ucode_buffer
3790 *
3791 * Return value:
3792 * nothing
3793 **/
3794static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3795{
3796 int i;
3797
3798 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3799 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3800
3801 kfree(sglist);
3802}
3803
3804/**
3805 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3806 * @sglist: scatter/gather list pointer
3807 * @buffer: buffer pointer
3808 * @len: buffer length
3809 *
3810 * Copy a microcode image from a user buffer into a buffer allocated by
3811 * ipr_alloc_ucode_buffer
3812 *
3813 * Return value:
3814 * 0 on success / other on failure
3815 **/
3816static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3817 u8 *buffer, u32 len)
3818{
3819 int bsize_elem, i, result = 0;
3820 struct scatterlist *scatterlist;
3821 void *kaddr;
3822
3823 /* Determine the actual number of bytes per element */
3824 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3825
3826 scatterlist = sglist->scatterlist;
3827
3828 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3829 struct page *page = sg_page(&scatterlist[i]);
3830
3831 kaddr = kmap(page);
1da177e4 3832 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3833 kunmap(page);
1da177e4
LT
3834
3835 scatterlist[i].length = bsize_elem;
3836
3837 if (result != 0) {
3838 ipr_trace;
3839 return result;
3840 }
3841 }
3842
3843 if (len % bsize_elem) {
45711f1a
JA
3844 struct page *page = sg_page(&scatterlist[i]);
3845
3846 kaddr = kmap(page);
1da177e4 3847 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3848 kunmap(page);
1da177e4
LT
3849
3850 scatterlist[i].length = len % bsize_elem;
3851 }
3852
3853 sglist->buffer_len = len;
3854 return result;
3855}
3856
a32c055f
WB
3857/**
3858 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3859 * @ipr_cmd: ipr command struct
3860 * @sglist: scatter/gather list
3861 *
3862 * Builds a microcode download IOA data list (IOADL).
3863 *
3864 **/
3865static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3866 struct ipr_sglist *sglist)
3867{
3868 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3869 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3870 struct scatterlist *scatterlist = sglist->scatterlist;
3871 int i;
3872
3873 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3874 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3875 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3876
3877 ioarcb->ioadl_len =
3878 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3879 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3880 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3881 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3882 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3883 }
3884
3885 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3886}
3887
1da177e4 3888/**
12baa420 3889 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3890 * @ipr_cmd: ipr command struct
3891 * @sglist: scatter/gather list
1da177e4 3892 *
12baa420 3893 * Builds a microcode download IOA data list (IOADL).
1da177e4 3894 *
1da177e4 3895 **/
12baa420
BK
3896static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3897 struct ipr_sglist *sglist)
1da177e4 3898{
1da177e4 3899 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3900 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3901 struct scatterlist *scatterlist = sglist->scatterlist;
3902 int i;
3903
12baa420 3904 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3905 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3906 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3907
3908 ioarcb->ioadl_len =
1da177e4
LT
3909 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3910
3911 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3912 ioadl[i].flags_and_data_len =
3913 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3914 ioadl[i].address =
3915 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3916 }
3917
12baa420
BK
3918 ioadl[i-1].flags_and_data_len |=
3919 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3920}
3921
3922/**
3923 * ipr_update_ioa_ucode - Update IOA's microcode
3924 * @ioa_cfg: ioa config struct
3925 * @sglist: scatter/gather list
3926 *
3927 * Initiate an adapter reset to update the IOA's microcode
3928 *
3929 * Return value:
3930 * 0 on success / -EIO on failure
3931 **/
3932static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3933 struct ipr_sglist *sglist)
3934{
3935 unsigned long lock_flags;
3936
3937 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3938 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3939 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3941 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3942 }
12baa420
BK
3943
3944 if (ioa_cfg->ucode_sglist) {
3945 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3946 dev_err(&ioa_cfg->pdev->dev,
3947 "Microcode download already in progress\n");
3948 return -EIO;
1da177e4 3949 }
12baa420 3950
d73341bf
AB
3951 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3952 sglist->scatterlist, sglist->num_sg,
3953 DMA_TO_DEVICE);
12baa420
BK
3954
3955 if (!sglist->num_dma_sg) {
3956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3957 dev_err(&ioa_cfg->pdev->dev,
3958 "Failed to map microcode download buffer!\n");
1da177e4
LT
3959 return -EIO;
3960 }
3961
12baa420
BK
3962 ioa_cfg->ucode_sglist = sglist;
3963 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3965 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3966
3967 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3968 ioa_cfg->ucode_sglist = NULL;
3969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3970 return 0;
3971}
3972
3973/**
3974 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3975 * @class_dev: device struct
3976 * @buf: buffer
3977 * @count: buffer size
1da177e4
LT
3978 *
3979 * This function will update the firmware on the adapter.
3980 *
3981 * Return value:
3982 * count on success / other on failure
3983 **/
ee959b00
TJ
3984static ssize_t ipr_store_update_fw(struct device *dev,
3985 struct device_attribute *attr,
3986 const char *buf, size_t count)
1da177e4 3987{
ee959b00 3988 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3989 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3990 struct ipr_ucode_image_header *image_hdr;
3991 const struct firmware *fw_entry;
3992 struct ipr_sglist *sglist;
1da177e4
LT
3993 char fname[100];
3994 char *src;
3995 int len, result, dnld_size;
3996
3997 if (!capable(CAP_SYS_ADMIN))
3998 return -EACCES;
3999
4000 len = snprintf(fname, 99, "%s", buf);
4001 fname[len-1] = '\0';
4002
203fa3fe 4003 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
4004 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4005 return -EIO;
4006 }
4007
4008 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4009
1da177e4
LT
4010 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4011 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4012 sglist = ipr_alloc_ucode_buffer(dnld_size);
4013
4014 if (!sglist) {
4015 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4016 release_firmware(fw_entry);
4017 return -ENOMEM;
4018 }
4019
4020 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4021
4022 if (result) {
4023 dev_err(&ioa_cfg->pdev->dev,
4024 "Microcode buffer copy to DMA buffer failed\n");
12baa420 4025 goto out;
1da177e4
LT
4026 }
4027
14ed9cc7
WB
4028 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4029
12baa420 4030 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 4031
12baa420
BK
4032 if (!result)
4033 result = count;
4034out:
1da177e4
LT
4035 ipr_free_ucode_buffer(sglist);
4036 release_firmware(fw_entry);
12baa420 4037 return result;
1da177e4
LT
4038}
4039
ee959b00 4040static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
4041 .attr = {
4042 .name = "update_fw",
4043 .mode = S_IWUSR,
4044 },
4045 .store = ipr_store_update_fw
4046};
4047
75576bb9
WB
4048/**
4049 * ipr_show_fw_type - Show the adapter's firmware type.
4050 * @dev: class device struct
4051 * @buf: buffer
4052 *
4053 * Return value:
4054 * number of bytes printed to buffer
4055 **/
4056static ssize_t ipr_show_fw_type(struct device *dev,
4057 struct device_attribute *attr, char *buf)
4058{
4059 struct Scsi_Host *shost = class_to_shost(dev);
4060 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4061 unsigned long lock_flags = 0;
4062 int len;
4063
4064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4065 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4067 return len;
4068}
4069
4070static struct device_attribute ipr_ioa_fw_type_attr = {
4071 .attr = {
4072 .name = "fw_type",
4073 .mode = S_IRUGO,
4074 },
4075 .show = ipr_show_fw_type
4076};
4077
ee959b00 4078static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
4079 &ipr_fw_version_attr,
4080 &ipr_log_level_attr,
4081 &ipr_diagnostics_attr,
f37eb54b 4082 &ipr_ioa_state_attr,
1da177e4
LT
4083 &ipr_ioa_reset_attr,
4084 &ipr_update_fw_attr,
75576bb9 4085 &ipr_ioa_fw_type_attr,
b53d124a 4086 &ipr_iopoll_weight_attr,
1da177e4
LT
4087 NULL,
4088};
4089
4090#ifdef CONFIG_SCSI_IPR_DUMP
4091/**
4092 * ipr_read_dump - Dump the adapter
2c3c8bea 4093 * @filp: open sysfs file
1da177e4 4094 * @kobj: kobject struct
91a69029 4095 * @bin_attr: bin_attribute struct
1da177e4
LT
4096 * @buf: buffer
4097 * @off: offset
4098 * @count: buffer size
4099 *
4100 * Return value:
4101 * number of bytes printed to buffer
4102 **/
2c3c8bea 4103static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4104 struct bin_attribute *bin_attr,
4105 char *buf, loff_t off, size_t count)
1da177e4 4106{
ee959b00 4107 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4108 struct Scsi_Host *shost = class_to_shost(cdev);
4109 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4110 struct ipr_dump *dump;
4111 unsigned long lock_flags = 0;
4112 char *src;
4d4dd706 4113 int len, sdt_end;
1da177e4
LT
4114 size_t rc = count;
4115
4116 if (!capable(CAP_SYS_ADMIN))
4117 return -EACCES;
4118
4119 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4120 dump = ioa_cfg->dump;
4121
4122 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4123 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4124 return 0;
4125 }
4126 kref_get(&dump->kref);
4127 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4128
4129 if (off > dump->driver_dump.hdr.len) {
4130 kref_put(&dump->kref, ipr_release_dump);
4131 return 0;
4132 }
4133
4134 if (off + count > dump->driver_dump.hdr.len) {
4135 count = dump->driver_dump.hdr.len - off;
4136 rc = count;
4137 }
4138
4139 if (count && off < sizeof(dump->driver_dump)) {
4140 if (off + count > sizeof(dump->driver_dump))
4141 len = sizeof(dump->driver_dump) - off;
4142 else
4143 len = count;
4144 src = (u8 *)&dump->driver_dump + off;
4145 memcpy(buf, src, len);
4146 buf += len;
4147 off += len;
4148 count -= len;
4149 }
4150
4151 off -= sizeof(dump->driver_dump);
4152
4d4dd706
KSS
4153 if (ioa_cfg->sis64)
4154 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4155 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4156 sizeof(struct ipr_sdt_entry));
4157 else
4158 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4159 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4160
4161 if (count && off < sdt_end) {
4162 if (off + count > sdt_end)
4163 len = sdt_end - off;
1da177e4
LT
4164 else
4165 len = count;
4166 src = (u8 *)&dump->ioa_dump + off;
4167 memcpy(buf, src, len);
4168 buf += len;
4169 off += len;
4170 count -= len;
4171 }
4172
4d4dd706 4173 off -= sdt_end;
1da177e4
LT
4174
4175 while (count) {
4176 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4177 len = PAGE_ALIGN(off) - off;
4178 else
4179 len = count;
4180 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4181 src += off & ~PAGE_MASK;
4182 memcpy(buf, src, len);
4183 buf += len;
4184 off += len;
4185 count -= len;
4186 }
4187
4188 kref_put(&dump->kref, ipr_release_dump);
4189 return rc;
4190}
4191
4192/**
4193 * ipr_alloc_dump - Prepare for adapter dump
4194 * @ioa_cfg: ioa config struct
4195 *
4196 * Return value:
4197 * 0 on success / other on failure
4198 **/
4199static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4200{
4201 struct ipr_dump *dump;
4d4dd706 4202 __be32 **ioa_data;
1da177e4
LT
4203 unsigned long lock_flags = 0;
4204
0bc42e35 4205 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4206
4207 if (!dump) {
4208 ipr_err("Dump memory allocation failed\n");
4209 return -ENOMEM;
4210 }
4211
4d4dd706
KSS
4212 if (ioa_cfg->sis64)
4213 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4214 else
4215 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4216
4217 if (!ioa_data) {
4218 ipr_err("Dump memory allocation failed\n");
4219 kfree(dump);
4220 return -ENOMEM;
4221 }
4222
4223 dump->ioa_dump.ioa_data = ioa_data;
4224
1da177e4
LT
4225 kref_init(&dump->kref);
4226 dump->ioa_cfg = ioa_cfg;
4227
4228 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4229
4230 if (INACTIVE != ioa_cfg->sdt_state) {
4231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4232 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4233 kfree(dump);
4234 return 0;
4235 }
4236
4237 ioa_cfg->dump = dump;
4238 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4239 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4240 ioa_cfg->dump_taken = 1;
4241 schedule_work(&ioa_cfg->work_q);
4242 }
4243 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4244
1da177e4
LT
4245 return 0;
4246}
4247
4248/**
4249 * ipr_free_dump - Free adapter dump memory
4250 * @ioa_cfg: ioa config struct
4251 *
4252 * Return value:
4253 * 0 on success / other on failure
4254 **/
4255static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4256{
4257 struct ipr_dump *dump;
4258 unsigned long lock_flags = 0;
4259
4260 ENTER;
4261
4262 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4263 dump = ioa_cfg->dump;
4264 if (!dump) {
4265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4266 return 0;
4267 }
4268
4269 ioa_cfg->dump = NULL;
4270 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4271
4272 kref_put(&dump->kref, ipr_release_dump);
4273
4274 LEAVE;
4275 return 0;
4276}
4277
4278/**
4279 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4280 * @filp: open sysfs file
1da177e4 4281 * @kobj: kobject struct
91a69029 4282 * @bin_attr: bin_attribute struct
1da177e4
LT
4283 * @buf: buffer
4284 * @off: offset
4285 * @count: buffer size
4286 *
4287 * Return value:
4288 * number of bytes printed to buffer
4289 **/
2c3c8bea 4290static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4291 struct bin_attribute *bin_attr,
4292 char *buf, loff_t off, size_t count)
1da177e4 4293{
ee959b00 4294 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4295 struct Scsi_Host *shost = class_to_shost(cdev);
4296 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4297 int rc;
4298
4299 if (!capable(CAP_SYS_ADMIN))
4300 return -EACCES;
4301
4302 if (buf[0] == '1')
4303 rc = ipr_alloc_dump(ioa_cfg);
4304 else if (buf[0] == '0')
4305 rc = ipr_free_dump(ioa_cfg);
4306 else
4307 return -EINVAL;
4308
4309 if (rc)
4310 return rc;
4311 else
4312 return count;
4313}
4314
4315static struct bin_attribute ipr_dump_attr = {
4316 .attr = {
4317 .name = "dump",
4318 .mode = S_IRUSR | S_IWUSR,
4319 },
4320 .size = 0,
4321 .read = ipr_read_dump,
4322 .write = ipr_write_dump
4323};
4324#else
4325static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4326#endif
4327
4328/**
4329 * ipr_change_queue_depth - Change the device's queue depth
4330 * @sdev: scsi device struct
4331 * @qdepth: depth to set
e881a172 4332 * @reason: calling context
1da177e4
LT
4333 *
4334 * Return value:
4335 * actual depth set
4336 **/
db5ed4df 4337static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
1da177e4 4338{
35a39691
BK
4339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4340 struct ipr_resource_entry *res;
4341 unsigned long lock_flags = 0;
4342
4343 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4344 res = (struct ipr_resource_entry *)sdev->hostdata;
4345
4346 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4347 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4348 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4349
db5ed4df 4350 scsi_change_queue_depth(sdev, qdepth);
1da177e4
LT
4351 return sdev->queue_depth;
4352}
4353
1da177e4
LT
4354/**
4355 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4356 * @dev: device struct
46d74563 4357 * @attr: device attribute structure
1da177e4
LT
4358 * @buf: buffer
4359 *
4360 * Return value:
4361 * number of bytes printed to buffer
4362 **/
10523b3b 4363static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4364{
4365 struct scsi_device *sdev = to_scsi_device(dev);
4366 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4367 struct ipr_resource_entry *res;
4368 unsigned long lock_flags = 0;
4369 ssize_t len = -ENXIO;
4370
4371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4372 res = (struct ipr_resource_entry *)sdev->hostdata;
4373 if (res)
3e7ebdfa 4374 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4376 return len;
4377}
4378
4379static struct device_attribute ipr_adapter_handle_attr = {
4380 .attr = {
4381 .name = "adapter_handle",
4382 .mode = S_IRUSR,
4383 },
4384 .show = ipr_show_adapter_handle
4385};
4386
3e7ebdfa 4387/**
5adcbeb3
WB
4388 * ipr_show_resource_path - Show the resource path or the resource address for
4389 * this device.
3e7ebdfa 4390 * @dev: device struct
46d74563 4391 * @attr: device attribute structure
3e7ebdfa
WB
4392 * @buf: buffer
4393 *
4394 * Return value:
4395 * number of bytes printed to buffer
4396 **/
4397static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4398{
4399 struct scsi_device *sdev = to_scsi_device(dev);
4400 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4401 struct ipr_resource_entry *res;
4402 unsigned long lock_flags = 0;
4403 ssize_t len = -ENXIO;
4404 char buffer[IPR_MAX_RES_PATH_LENGTH];
4405
4406 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4407 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4408 if (res && ioa_cfg->sis64)
3e7ebdfa 4409 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4410 __ipr_format_res_path(res->res_path, buffer,
4411 sizeof(buffer)));
5adcbeb3
WB
4412 else if (res)
4413 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4414 res->bus, res->target, res->lun);
4415
3e7ebdfa
WB
4416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4417 return len;
4418}
4419
4420static struct device_attribute ipr_resource_path_attr = {
4421 .attr = {
4422 .name = "resource_path",
75576bb9 4423 .mode = S_IRUGO,
3e7ebdfa
WB
4424 },
4425 .show = ipr_show_resource_path
4426};
4427
46d74563
WB
4428/**
4429 * ipr_show_device_id - Show the device_id for this device.
4430 * @dev: device struct
4431 * @attr: device attribute structure
4432 * @buf: buffer
4433 *
4434 * Return value:
4435 * number of bytes printed to buffer
4436 **/
4437static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4438{
4439 struct scsi_device *sdev = to_scsi_device(dev);
4440 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4441 struct ipr_resource_entry *res;
4442 unsigned long lock_flags = 0;
4443 ssize_t len = -ENXIO;
4444
4445 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4446 res = (struct ipr_resource_entry *)sdev->hostdata;
4447 if (res && ioa_cfg->sis64)
4448 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4449 else if (res)
4450 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4451
4452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4453 return len;
4454}
4455
4456static struct device_attribute ipr_device_id_attr = {
4457 .attr = {
4458 .name = "device_id",
4459 .mode = S_IRUGO,
4460 },
4461 .show = ipr_show_device_id
4462};
4463
75576bb9
WB
4464/**
4465 * ipr_show_resource_type - Show the resource type for this device.
4466 * @dev: device struct
46d74563 4467 * @attr: device attribute structure
75576bb9
WB
4468 * @buf: buffer
4469 *
4470 * Return value:
4471 * number of bytes printed to buffer
4472 **/
4473static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4474{
4475 struct scsi_device *sdev = to_scsi_device(dev);
4476 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4477 struct ipr_resource_entry *res;
4478 unsigned long lock_flags = 0;
4479 ssize_t len = -ENXIO;
4480
4481 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4482 res = (struct ipr_resource_entry *)sdev->hostdata;
4483
4484 if (res)
4485 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4486
4487 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4488 return len;
4489}
4490
4491static struct device_attribute ipr_resource_type_attr = {
4492 .attr = {
4493 .name = "resource_type",
4494 .mode = S_IRUGO,
4495 },
4496 .show = ipr_show_resource_type
4497};
4498
1da177e4
LT
4499static struct device_attribute *ipr_dev_attrs[] = {
4500 &ipr_adapter_handle_attr,
3e7ebdfa 4501 &ipr_resource_path_attr,
46d74563 4502 &ipr_device_id_attr,
75576bb9 4503 &ipr_resource_type_attr,
1da177e4
LT
4504 NULL,
4505};
4506
4507/**
4508 * ipr_biosparam - Return the HSC mapping
4509 * @sdev: scsi device struct
4510 * @block_device: block device pointer
4511 * @capacity: capacity of the device
4512 * @parm: Array containing returned HSC values.
4513 *
4514 * This function generates the HSC parms that fdisk uses.
4515 * We want to make sure we return something that places partitions
4516 * on 4k boundaries for best performance with the IOA.
4517 *
4518 * Return value:
4519 * 0 on success
4520 **/
4521static int ipr_biosparam(struct scsi_device *sdev,
4522 struct block_device *block_device,
4523 sector_t capacity, int *parm)
4524{
4525 int heads, sectors;
4526 sector_t cylinders;
4527
4528 heads = 128;
4529 sectors = 32;
4530
4531 cylinders = capacity;
4532 sector_div(cylinders, (128 * 32));
4533
4534 /* return result */
4535 parm[0] = heads;
4536 parm[1] = sectors;
4537 parm[2] = cylinders;
4538
4539 return 0;
4540}
4541
35a39691
BK
4542/**
4543 * ipr_find_starget - Find target based on bus/target.
4544 * @starget: scsi target struct
4545 *
4546 * Return value:
4547 * resource entry pointer if found / NULL if not found
4548 **/
4549static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4550{
4551 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4552 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4553 struct ipr_resource_entry *res;
4554
4555 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4556 if ((res->bus == starget->channel) &&
0ee1d714 4557 (res->target == starget->id)) {
35a39691
BK
4558 return res;
4559 }
4560 }
4561
4562 return NULL;
4563}
4564
4565static struct ata_port_info sata_port_info;
4566
4567/**
4568 * ipr_target_alloc - Prepare for commands to a SCSI target
4569 * @starget: scsi target struct
4570 *
4571 * If the device is a SATA device, this function allocates an
4572 * ATA port with libata, else it does nothing.
4573 *
4574 * Return value:
4575 * 0 on success / non-0 on failure
4576 **/
4577static int ipr_target_alloc(struct scsi_target *starget)
4578{
4579 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4580 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4581 struct ipr_sata_port *sata_port;
4582 struct ata_port *ap;
4583 struct ipr_resource_entry *res;
4584 unsigned long lock_flags;
4585
4586 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4587 res = ipr_find_starget(starget);
4588 starget->hostdata = NULL;
4589
4590 if (res && ipr_is_gata(res)) {
4591 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4592 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4593 if (!sata_port)
4594 return -ENOMEM;
4595
4596 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4597 if (ap) {
4598 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4599 sata_port->ioa_cfg = ioa_cfg;
4600 sata_port->ap = ap;
4601 sata_port->res = res;
4602
4603 res->sata_port = sata_port;
4604 ap->private_data = sata_port;
4605 starget->hostdata = sata_port;
4606 } else {
4607 kfree(sata_port);
4608 return -ENOMEM;
4609 }
4610 }
4611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4612
4613 return 0;
4614}
4615
4616/**
4617 * ipr_target_destroy - Destroy a SCSI target
4618 * @starget: scsi target struct
4619 *
4620 * If the device was a SATA device, this function frees the libata
4621 * ATA port, else it does nothing.
4622 *
4623 **/
4624static void ipr_target_destroy(struct scsi_target *starget)
4625{
4626 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4627 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4628 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4629
4630 if (ioa_cfg->sis64) {
0ee1d714
BK
4631 if (!ipr_find_starget(starget)) {
4632 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4633 clear_bit(starget->id, ioa_cfg->array_ids);
4634 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4635 clear_bit(starget->id, ioa_cfg->vset_ids);
4636 else if (starget->channel == 0)
4637 clear_bit(starget->id, ioa_cfg->target_ids);
4638 }
3e7ebdfa 4639 }
35a39691
BK
4640
4641 if (sata_port) {
4642 starget->hostdata = NULL;
4643 ata_sas_port_destroy(sata_port->ap);
4644 kfree(sata_port);
4645 }
4646}
4647
4648/**
4649 * ipr_find_sdev - Find device based on bus/target/lun.
4650 * @sdev: scsi device struct
4651 *
4652 * Return value:
4653 * resource entry pointer if found / NULL if not found
4654 **/
4655static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4656{
4657 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4658 struct ipr_resource_entry *res;
4659
4660 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4661 if ((res->bus == sdev->channel) &&
4662 (res->target == sdev->id) &&
4663 (res->lun == sdev->lun))
35a39691
BK
4664 return res;
4665 }
4666
4667 return NULL;
4668}
4669
1da177e4
LT
4670/**
4671 * ipr_slave_destroy - Unconfigure a SCSI device
4672 * @sdev: scsi device struct
4673 *
4674 * Return value:
4675 * nothing
4676 **/
4677static void ipr_slave_destroy(struct scsi_device *sdev)
4678{
4679 struct ipr_resource_entry *res;
4680 struct ipr_ioa_cfg *ioa_cfg;
4681 unsigned long lock_flags = 0;
4682
4683 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4684
4685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4686 res = (struct ipr_resource_entry *) sdev->hostdata;
4687 if (res) {
35a39691 4688 if (res->sata_port)
3e4ec344 4689 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4690 sdev->hostdata = NULL;
4691 res->sdev = NULL;
35a39691 4692 res->sata_port = NULL;
1da177e4
LT
4693 }
4694 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4695}
4696
4697/**
4698 * ipr_slave_configure - Configure a SCSI device
4699 * @sdev: scsi device struct
4700 *
4701 * This function configures the specified scsi device.
4702 *
4703 * Return value:
4704 * 0 on success
4705 **/
4706static int ipr_slave_configure(struct scsi_device *sdev)
4707{
4708 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4709 struct ipr_resource_entry *res;
dd406ef8 4710 struct ata_port *ap = NULL;
1da177e4 4711 unsigned long lock_flags = 0;
3e7ebdfa 4712 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4713
4714 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4715 res = sdev->hostdata;
4716 if (res) {
4717 if (ipr_is_af_dasd_device(res))
4718 sdev->type = TYPE_RAID;
0726ce26 4719 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4720 sdev->scsi_level = 4;
0726ce26
BK
4721 sdev->no_uld_attach = 1;
4722 }
1da177e4 4723 if (ipr_is_vset_device(res)) {
60654e25 4724 sdev->scsi_level = SCSI_SPC_3;
242f9dcb
JA
4725 blk_queue_rq_timeout(sdev->request_queue,
4726 IPR_VSET_RW_TIMEOUT);
086fa5ff 4727 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4728 }
dd406ef8
BK
4729 if (ipr_is_gata(res) && res->sata_port)
4730 ap = res->sata_port->ap;
4731 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4732
4733 if (ap) {
db5ed4df 4734 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8 4735 ata_sas_slave_configure(sdev, ap);
c8b09f6f
CH
4736 }
4737
3e7ebdfa
WB
4738 if (ioa_cfg->sis64)
4739 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4740 ipr_format_res_path(ioa_cfg,
4741 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4742 return 0;
1da177e4
LT
4743 }
4744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4745 return 0;
4746}
4747
35a39691
BK
4748/**
4749 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4750 * @sdev: scsi device struct
4751 *
4752 * This function initializes an ATA port so that future commands
4753 * sent through queuecommand will work.
4754 *
4755 * Return value:
4756 * 0 on success
4757 **/
4758static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4759{
4760 struct ipr_sata_port *sata_port = NULL;
4761 int rc = -ENXIO;
4762
4763 ENTER;
4764 if (sdev->sdev_target)
4765 sata_port = sdev->sdev_target->hostdata;
b2024459 4766 if (sata_port) {
35a39691 4767 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4768 if (rc == 0)
4769 rc = ata_sas_sync_probe(sata_port->ap);
4770 }
4771
35a39691
BK
4772 if (rc)
4773 ipr_slave_destroy(sdev);
4774
4775 LEAVE;
4776 return rc;
4777}
4778
1da177e4
LT
4779/**
4780 * ipr_slave_alloc - Prepare for commands to a device.
4781 * @sdev: scsi device struct
4782 *
4783 * This function saves a pointer to the resource entry
4784 * in the scsi device struct if the device exists. We
4785 * can then use this pointer in ipr_queuecommand when
4786 * handling new commands.
4787 *
4788 * Return value:
692aebfc 4789 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4790 **/
4791static int ipr_slave_alloc(struct scsi_device *sdev)
4792{
4793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4794 struct ipr_resource_entry *res;
4795 unsigned long lock_flags;
692aebfc 4796 int rc = -ENXIO;
1da177e4
LT
4797
4798 sdev->hostdata = NULL;
4799
4800 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4801
35a39691
BK
4802 res = ipr_find_sdev(sdev);
4803 if (res) {
4804 res->sdev = sdev;
4805 res->add_to_ml = 0;
4806 res->in_erp = 0;
4807 sdev->hostdata = res;
4808 if (!ipr_is_naca_model(res))
4809 res->needs_sync_complete = 1;
4810 rc = 0;
4811 if (ipr_is_gata(res)) {
4812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4813 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4814 }
4815 }
4816
4817 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4818
692aebfc 4819 return rc;
1da177e4
LT
4820}
4821
6cdb0817
BK
4822/**
4823 * ipr_match_lun - Match function for specified LUN
4824 * @ipr_cmd: ipr command struct
4825 * @device: device to match (sdev)
4826 *
4827 * Returns:
4828 * 1 if command matches sdev / 0 if command does not match sdev
4829 **/
4830static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4831{
4832 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4833 return 1;
4834 return 0;
4835}
4836
4837/**
4838 * ipr_wait_for_ops - Wait for matching commands to complete
4839 * @ipr_cmd: ipr command struct
4840 * @device: device to match (sdev)
4841 * @match: match function to use
4842 *
4843 * Returns:
4844 * SUCCESS / FAILED
4845 **/
4846static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4847 int (*match)(struct ipr_cmnd *, void *))
4848{
4849 struct ipr_cmnd *ipr_cmd;
4850 int wait;
4851 unsigned long flags;
4852 struct ipr_hrr_queue *hrrq;
4853 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4854 DECLARE_COMPLETION_ONSTACK(comp);
4855
4856 ENTER;
4857 do {
4858 wait = 0;
4859
4860 for_each_hrrq(hrrq, ioa_cfg) {
4861 spin_lock_irqsave(hrrq->lock, flags);
4862 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4863 if (match(ipr_cmd, device)) {
4864 ipr_cmd->eh_comp = &comp;
4865 wait++;
4866 }
4867 }
4868 spin_unlock_irqrestore(hrrq->lock, flags);
4869 }
4870
4871 if (wait) {
4872 timeout = wait_for_completion_timeout(&comp, timeout);
4873
4874 if (!timeout) {
4875 wait = 0;
4876
4877 for_each_hrrq(hrrq, ioa_cfg) {
4878 spin_lock_irqsave(hrrq->lock, flags);
4879 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4880 if (match(ipr_cmd, device)) {
4881 ipr_cmd->eh_comp = NULL;
4882 wait++;
4883 }
4884 }
4885 spin_unlock_irqrestore(hrrq->lock, flags);
4886 }
4887
4888 if (wait)
4889 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4890 LEAVE;
4891 return wait ? FAILED : SUCCESS;
4892 }
4893 }
4894 } while (wait);
4895
4896 LEAVE;
4897 return SUCCESS;
4898}
4899
70233ac5 4900static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
1da177e4
LT
4901{
4902 struct ipr_ioa_cfg *ioa_cfg;
70233ac5 4903 unsigned long lock_flags = 0;
4904 int rc = SUCCESS;
1da177e4
LT
4905
4906 ENTER;
70233ac5 4907 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4908 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 4909
96b04db9 4910 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
70233ac5 4911 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
a92fa25c
KSS
4912 dev_err(&ioa_cfg->pdev->dev,
4913 "Adapter being reset as a result of error recovery.\n");
1da177e4 4914
a92fa25c
KSS
4915 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4916 ioa_cfg->sdt_state = GET_DUMP;
4917 }
1da177e4 4918
70233ac5 4919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4920 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4921 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
df0ae249 4922
70233ac5 4923 /* If we got hit with a host reset while we were already resetting
4924 the adapter for some reason, and the reset failed. */
4925 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4926 ipr_trace;
4927 rc = FAILED;
4928 }
df0ae249 4929
70233ac5 4930 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4931 LEAVE;
df0ae249
JG
4932 return rc;
4933}
4934
c6513096
BK
4935/**
4936 * ipr_device_reset - Reset the device
4937 * @ioa_cfg: ioa config struct
4938 * @res: resource entry struct
4939 *
4940 * This function issues a device reset to the affected device.
4941 * If the device is a SCSI device, a LUN reset will be sent
4942 * to the device first. If that does not work, a target reset
35a39691
BK
4943 * will be sent. If the device is a SATA device, a PHY reset will
4944 * be sent.
c6513096
BK
4945 *
4946 * Return value:
4947 * 0 on success / non-zero on failure
4948 **/
4949static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4950 struct ipr_resource_entry *res)
4951{
4952 struct ipr_cmnd *ipr_cmd;
4953 struct ipr_ioarcb *ioarcb;
4954 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4955 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4956 u32 ioasc;
4957
4958 ENTER;
4959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4960 ioarcb = &ipr_cmd->ioarcb;
4961 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4962
4963 if (ipr_cmd->ioa_cfg->sis64) {
4964 regs = &ipr_cmd->i.ata_ioadl.regs;
4965 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4966 } else
4967 regs = &ioarcb->u.add_data.u.regs;
c6513096 4968
3e7ebdfa 4969 ioarcb->res_handle = res->res_handle;
c6513096
BK
4970 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4971 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4972 if (ipr_is_gata(res)) {
4973 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4974 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4975 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4976 }
c6513096
BK
4977
4978 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4979 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 4980 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
4981 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4982 if (ipr_cmd->ioa_cfg->sis64)
4983 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4984 sizeof(struct ipr_ioasa_gata));
4985 else
4986 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4987 sizeof(struct ipr_ioasa_gata));
4988 }
c6513096
BK
4989
4990 LEAVE;
203fa3fe 4991 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
4992}
4993
35a39691
BK
4994/**
4995 * ipr_sata_reset - Reset the SATA port
cc0680a5 4996 * @link: SATA link to reset
35a39691
BK
4997 * @classes: class of the attached device
4998 *
cc0680a5 4999 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
5000 *
5001 * Return value:
5002 * 0 on success / non-zero on failure
5003 **/
cc0680a5 5004static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 5005 unsigned long deadline)
35a39691 5006{
cc0680a5 5007 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
5008 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5009 struct ipr_resource_entry *res;
5010 unsigned long lock_flags = 0;
5011 int rc = -ENXIO;
5012
5013 ENTER;
5014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 5015 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
5016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5017 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5018 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5019 }
5020
35a39691
BK
5021 res = sata_port->res;
5022 if (res) {
5023 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 5024 *classes = res->ata_class;
35a39691
BK
5025 }
5026
5027 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5028 LEAVE;
5029 return rc;
5030}
5031
1da177e4
LT
5032/**
5033 * ipr_eh_dev_reset - Reset the device
5034 * @scsi_cmd: scsi command struct
5035 *
5036 * This function issues a device reset to the affected device.
5037 * A LUN reset will be sent to the device first. If that does
5038 * not work, a target reset will be sent.
5039 *
5040 * Return value:
5041 * SUCCESS / FAILED
5042 **/
203fa3fe 5043static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5044{
5045 struct ipr_cmnd *ipr_cmd;
5046 struct ipr_ioa_cfg *ioa_cfg;
5047 struct ipr_resource_entry *res;
35a39691
BK
5048 struct ata_port *ap;
5049 int rc = 0;
05a6538a 5050 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5051
5052 ENTER;
5053 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5054 res = scsi_cmd->device->hostdata;
5055
eeb88307 5056 if (!res)
1da177e4
LT
5057 return FAILED;
5058
5059 /*
5060 * If we are currently going through reset/reload, return failed. This will force the
5061 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5062 * reset to complete
5063 */
5064 if (ioa_cfg->in_reset_reload)
5065 return FAILED;
56d6aa33 5066 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
5067 return FAILED;
5068
05a6538a 5069 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5070 spin_lock(&hrrq->_lock);
05a6538a 5071 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5072 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5073 if (ipr_cmd->scsi_cmd)
5074 ipr_cmd->done = ipr_scsi_eh_done;
5075 if (ipr_cmd->qc)
5076 ipr_cmd->done = ipr_sata_eh_done;
5077 if (ipr_cmd->qc &&
5078 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5079 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5080 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5081 }
7402ecef 5082 }
1da177e4 5083 }
56d6aa33 5084 spin_unlock(&hrrq->_lock);
1da177e4 5085 }
1da177e4 5086 res->resetting_device = 1;
fb3ed3cb 5087 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
5088
5089 if (ipr_is_gata(res) && res->sata_port) {
5090 ap = res->sata_port->ap;
5091 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 5092 ata_std_error_handler(ap);
35a39691 5093 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26 5094
05a6538a 5095 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5096 spin_lock(&hrrq->_lock);
05a6538a 5097 list_for_each_entry(ipr_cmd,
5098 &hrrq->hrrq_pending_q, queue) {
5099 if (ipr_cmd->ioarcb.res_handle ==
5100 res->res_handle) {
5101 rc = -EIO;
5102 break;
5103 }
5af23d26 5104 }
56d6aa33 5105 spin_unlock(&hrrq->_lock);
5af23d26 5106 }
35a39691
BK
5107 } else
5108 rc = ipr_device_reset(ioa_cfg, res);
1da177e4 5109 res->resetting_device = 0;
0b1f8d44 5110 res->reset_occurred = 1;
1da177e4 5111
1da177e4 5112 LEAVE;
203fa3fe 5113 return rc ? FAILED : SUCCESS;
1da177e4
LT
5114}
5115
203fa3fe 5116static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
5117{
5118 int rc;
6cdb0817
BK
5119 struct ipr_ioa_cfg *ioa_cfg;
5120
5121 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
94d0e7b8
JG
5122
5123 spin_lock_irq(cmd->device->host->host_lock);
5124 rc = __ipr_eh_dev_reset(cmd);
5125 spin_unlock_irq(cmd->device->host->host_lock);
5126
6cdb0817
BK
5127 if (rc == SUCCESS)
5128 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5129
94d0e7b8
JG
5130 return rc;
5131}
5132
1da177e4
LT
5133/**
5134 * ipr_bus_reset_done - Op done function for bus reset.
5135 * @ipr_cmd: ipr command struct
5136 *
5137 * This function is the op done function for a bus reset
5138 *
5139 * Return value:
5140 * none
5141 **/
5142static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5143{
5144 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5145 struct ipr_resource_entry *res;
5146
5147 ENTER;
3e7ebdfa
WB
5148 if (!ioa_cfg->sis64)
5149 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5150 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5151 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5152 break;
5153 }
1da177e4 5154 }
1da177e4
LT
5155
5156 /*
5157 * If abort has not completed, indicate the reset has, else call the
5158 * abort's done function to wake the sleeping eh thread
5159 */
5160 if (ipr_cmd->sibling->sibling)
5161 ipr_cmd->sibling->sibling = NULL;
5162 else
5163 ipr_cmd->sibling->done(ipr_cmd->sibling);
5164
05a6538a 5165 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5166 LEAVE;
5167}
5168
5169/**
5170 * ipr_abort_timeout - An abort task has timed out
5171 * @ipr_cmd: ipr command struct
5172 *
5173 * This function handles when an abort task times out. If this
5174 * happens we issue a bus reset since we have resources tied
5175 * up that must be freed before returning to the midlayer.
5176 *
5177 * Return value:
5178 * none
5179 **/
5180static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5181{
5182 struct ipr_cmnd *reset_cmd;
5183 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5184 struct ipr_cmd_pkt *cmd_pkt;
5185 unsigned long lock_flags = 0;
5186
5187 ENTER;
5188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5189 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5191 return;
5192 }
5193
fb3ed3cb 5194 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5195 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5196 ipr_cmd->sibling = reset_cmd;
5197 reset_cmd->sibling = ipr_cmd;
5198 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5199 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5200 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5201 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5202 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5203
5204 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5205 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5206 LEAVE;
5207}
5208
5209/**
5210 * ipr_cancel_op - Cancel specified op
5211 * @scsi_cmd: scsi command struct
5212 *
5213 * This function cancels specified op.
5214 *
5215 * Return value:
5216 * SUCCESS / FAILED
5217 **/
203fa3fe 5218static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5219{
5220 struct ipr_cmnd *ipr_cmd;
5221 struct ipr_ioa_cfg *ioa_cfg;
5222 struct ipr_resource_entry *res;
5223 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5224 u32 ioasc, int_reg;
1da177e4 5225 int op_found = 0;
05a6538a 5226 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5227
5228 ENTER;
5229 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5230 res = scsi_cmd->device->hostdata;
5231
8fa728a2
JG
5232 /* If we are currently going through reset/reload, return failed.
5233 * This will force the mid-layer to call ipr_eh_host_reset,
5234 * which will then go to sleep and wait for the reset to complete
5235 */
56d6aa33 5236 if (ioa_cfg->in_reset_reload ||
5237 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5238 return FAILED;
a92fa25c
KSS
5239 if (!res)
5240 return FAILED;
5241
5242 /*
5243 * If we are aborting a timed out op, chances are that the timeout was caused
5244 * by a still not detected EEH error. In such cases, reading a register will
5245 * trigger the EEH recovery infrastructure.
5246 */
5247 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5248
5249 if (!ipr_is_gscsi(res))
1da177e4
LT
5250 return FAILED;
5251
05a6538a 5252 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5253 spin_lock(&hrrq->_lock);
05a6538a 5254 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5255 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5256 ipr_cmd->done = ipr_scsi_eh_done;
5257 op_found = 1;
5258 break;
5259 }
1da177e4 5260 }
56d6aa33 5261 spin_unlock(&hrrq->_lock);
1da177e4
LT
5262 }
5263
5264 if (!op_found)
5265 return SUCCESS;
5266
5267 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5268 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5269 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5270 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5271 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5272 ipr_cmd->u.sdev = scsi_cmd->device;
5273
fb3ed3cb
BK
5274 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5275 scsi_cmd->cmnd[0]);
1da177e4 5276 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5277 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5278
5279 /*
5280 * If the abort task timed out and we sent a bus reset, we will get
5281 * one the following responses to the abort
5282 */
5283 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5284 ioasc = 0;
5285 ipr_trace;
5286 }
5287
c4ee22a3 5288 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ee0a90fa
BK
5289 if (!ipr_is_naca_model(res))
5290 res->needs_sync_complete = 1;
1da177e4
LT
5291
5292 LEAVE;
203fa3fe 5293 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5294}
5295
5296/**
5297 * ipr_eh_abort - Abort a single op
5298 * @scsi_cmd: scsi command struct
5299 *
5300 * Return value:
f688f96d
BK
5301 * 0 if scan in progress / 1 if scan is complete
5302 **/
5303static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5304{
5305 unsigned long lock_flags;
5306 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5307 int rc = 0;
5308
5309 spin_lock_irqsave(shost->host_lock, lock_flags);
5310 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5311 rc = 1;
5312 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5313 rc = 1;
5314 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5315 return rc;
5316}
5317
5318/**
5319 * ipr_eh_host_reset - Reset the host adapter
5320 * @scsi_cmd: scsi command struct
5321 *
5322 * Return value:
1da177e4
LT
5323 * SUCCESS / FAILED
5324 **/
203fa3fe 5325static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5326{
8fa728a2
JG
5327 unsigned long flags;
5328 int rc;
6cdb0817 5329 struct ipr_ioa_cfg *ioa_cfg;
1da177e4
LT
5330
5331 ENTER;
1da177e4 5332
6cdb0817
BK
5333 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5334
8fa728a2
JG
5335 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5336 rc = ipr_cancel_op(scsi_cmd);
5337 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4 5338
6cdb0817
BK
5339 if (rc == SUCCESS)
5340 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
1da177e4 5341 LEAVE;
8fa728a2 5342 return rc;
1da177e4
LT
5343}
5344
5345/**
5346 * ipr_handle_other_interrupt - Handle "other" interrupts
5347 * @ioa_cfg: ioa config struct
634651fa 5348 * @int_reg: interrupt register
1da177e4
LT
5349 *
5350 * Return value:
5351 * IRQ_NONE / IRQ_HANDLED
5352 **/
634651fa 5353static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5354 u32 int_reg)
1da177e4
LT
5355{
5356 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5357 u32 int_mask_reg;
56d6aa33 5358
7dacb64f
WB
5359 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5360 int_reg &= ~int_mask_reg;
5361
5362 /* If an interrupt on the adapter did not occur, ignore it.
5363 * Or in the case of SIS 64, check for a stage change interrupt.
5364 */
5365 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5366 if (ioa_cfg->sis64) {
5367 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5368 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5369 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5370
5371 /* clear stage change */
5372 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5373 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5374 list_del(&ioa_cfg->reset_cmd->queue);
5375 del_timer(&ioa_cfg->reset_cmd->timer);
5376 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5377 return IRQ_HANDLED;
5378 }
5379 }
5380
5381 return IRQ_NONE;
5382 }
1da177e4
LT
5383
5384 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5385 /* Mask the interrupt */
5386 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
5387 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5388
5389 list_del(&ioa_cfg->reset_cmd->queue);
5390 del_timer(&ioa_cfg->reset_cmd->timer);
5391 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5392 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5393 if (ioa_cfg->clear_isr) {
5394 if (ipr_debug && printk_ratelimit())
5395 dev_err(&ioa_cfg->pdev->dev,
5396 "Spurious interrupt detected. 0x%08X\n", int_reg);
5397 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5398 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5399 return IRQ_NONE;
5400 }
1da177e4
LT
5401 } else {
5402 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5403 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5404 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5405 dev_err(&ioa_cfg->pdev->dev,
5406 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5407 else
5408 dev_err(&ioa_cfg->pdev->dev,
5409 "Permanent IOA failure. 0x%08X\n", int_reg);
5410
5411 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5412 ioa_cfg->sdt_state = GET_DUMP;
5413
5414 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5415 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5416 }
56d6aa33 5417
1da177e4
LT
5418 return rc;
5419}
5420
3feeb89d
WB
5421/**
5422 * ipr_isr_eh - Interrupt service routine error handler
5423 * @ioa_cfg: ioa config struct
5424 * @msg: message to log
5425 *
5426 * Return value:
5427 * none
5428 **/
05a6538a 5429static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5430{
5431 ioa_cfg->errors_logged++;
05a6538a 5432 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5433
5434 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5435 ioa_cfg->sdt_state = GET_DUMP;
5436
5437 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5438}
5439
b53d124a 5440static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
05a6538a 5441 struct list_head *doneq)
5442{
5443 u32 ioasc;
5444 u16 cmd_index;
5445 struct ipr_cmnd *ipr_cmd;
5446 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5447 int num_hrrq = 0;
5448
5449 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5450 if (!hrr_queue->allow_interrupts)
05a6538a 5451 return 0;
5452
5453 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5454 hrr_queue->toggle_bit) {
5455
5456 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5457 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5458 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5459
5460 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5461 cmd_index < hrr_queue->min_cmd_id)) {
5462 ipr_isr_eh(ioa_cfg,
5463 "Invalid response handle from IOA: ",
5464 cmd_index);
5465 break;
5466 }
5467
5468 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5469 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5470
5471 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5472
5473 list_move_tail(&ipr_cmd->queue, doneq);
5474
5475 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5476 hrr_queue->hrrq_curr++;
5477 } else {
5478 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5479 hrr_queue->toggle_bit ^= 1u;
5480 }
5481 num_hrrq++;
b53d124a 5482 if (budget > 0 && num_hrrq >= budget)
5483 break;
05a6538a 5484 }
b53d124a 5485
05a6538a 5486 return num_hrrq;
5487}
b53d124a 5488
5489static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5490{
5491 struct ipr_ioa_cfg *ioa_cfg;
5492 struct ipr_hrr_queue *hrrq;
5493 struct ipr_cmnd *ipr_cmd, *temp;
5494 unsigned long hrrq_flags;
5495 int completed_ops;
5496 LIST_HEAD(doneq);
5497
5498 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5499 ioa_cfg = hrrq->ioa_cfg;
5500
5501 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5502 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5503
5504 if (completed_ops < budget)
5505 blk_iopoll_complete(iop);
5506 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5507
5508 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5509 list_del(&ipr_cmd->queue);
5510 del_timer(&ipr_cmd->timer);
5511 ipr_cmd->fast_done(ipr_cmd);
5512 }
5513
5514 return completed_ops;
5515}
5516
1da177e4
LT
5517/**
5518 * ipr_isr - Interrupt service routine
5519 * @irq: irq number
5520 * @devp: pointer to ioa config struct
1da177e4
LT
5521 *
5522 * Return value:
5523 * IRQ_NONE / IRQ_HANDLED
5524 **/
7d12e780 5525static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5526{
05a6538a 5527 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5528 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5529 unsigned long hrrq_flags = 0;
7dacb64f 5530 u32 int_reg = 0;
3feeb89d 5531 int num_hrrq = 0;
7dacb64f 5532 int irq_none = 0;
172cd6e1 5533 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5534 irqreturn_t rc = IRQ_NONE;
172cd6e1 5535 LIST_HEAD(doneq);
1da177e4 5536
56d6aa33 5537 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5538 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5539 if (!hrrq->allow_interrupts) {
5540 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5541 return IRQ_NONE;
5542 }
5543
1da177e4 5544 while (1) {
b53d124a 5545 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5546 rc = IRQ_HANDLED;
1da177e4 5547
b53d124a 5548 if (!ioa_cfg->clear_isr)
5549 break;
7dd21308 5550
1da177e4 5551 /* Clear the PCI interrupt */
a5442ba4 5552 num_hrrq = 0;
3feeb89d 5553 do {
b53d124a 5554 writel(IPR_PCII_HRRQ_UPDATED,
5555 ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5556 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d 5557 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
b53d124a 5558 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
3feeb89d 5559
7dacb64f
WB
5560 } else if (rc == IRQ_NONE && irq_none == 0) {
5561 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5562 irq_none++;
a5442ba4
WB
5563 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5564 int_reg & IPR_PCII_HRRQ_UPDATED) {
b53d124a 5565 ipr_isr_eh(ioa_cfg,
5566 "Error clearing HRRQ: ", num_hrrq);
172cd6e1 5567 rc = IRQ_HANDLED;
b53d124a 5568 break;
1da177e4
LT
5569 } else
5570 break;
5571 }
5572
5573 if (unlikely(rc == IRQ_NONE))
634651fa 5574 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5575
56d6aa33 5576 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5577 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5578 list_del(&ipr_cmd->queue);
5579 del_timer(&ipr_cmd->timer);
5580 ipr_cmd->fast_done(ipr_cmd);
5581 }
05a6538a 5582 return rc;
5583}
5584
5585/**
5586 * ipr_isr_mhrrq - Interrupt service routine
5587 * @irq: irq number
5588 * @devp: pointer to ioa config struct
5589 *
5590 * Return value:
5591 * IRQ_NONE / IRQ_HANDLED
5592 **/
5593static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5594{
5595 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
b53d124a 5596 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5597 unsigned long hrrq_flags = 0;
05a6538a 5598 struct ipr_cmnd *ipr_cmd, *temp;
5599 irqreturn_t rc = IRQ_NONE;
5600 LIST_HEAD(doneq);
172cd6e1 5601
56d6aa33 5602 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5603
5604 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5605 if (!hrrq->allow_interrupts) {
5606 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5607 return IRQ_NONE;
5608 }
5609
89f8b33c 5610 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 5611 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5612 hrrq->toggle_bit) {
5613 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5614 blk_iopoll_sched(&hrrq->iopoll);
5615 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5616 return IRQ_HANDLED;
5617 }
5618 } else {
5619 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5620 hrrq->toggle_bit)
05a6538a 5621
b53d124a 5622 if (ipr_process_hrrq(hrrq, -1, &doneq))
5623 rc = IRQ_HANDLED;
5624 }
05a6538a 5625
56d6aa33 5626 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5627
5628 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5629 list_del(&ipr_cmd->queue);
5630 del_timer(&ipr_cmd->timer);
5631 ipr_cmd->fast_done(ipr_cmd);
5632 }
1da177e4
LT
5633 return rc;
5634}
5635
a32c055f
WB
5636/**
5637 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5638 * @ioa_cfg: ioa config struct
5639 * @ipr_cmd: ipr command struct
5640 *
5641 * Return value:
5642 * 0 on success / -1 on failure
5643 **/
5644static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5645 struct ipr_cmnd *ipr_cmd)
5646{
5647 int i, nseg;
5648 struct scatterlist *sg;
5649 u32 length;
5650 u32 ioadl_flags = 0;
5651 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5652 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5653 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5654
5655 length = scsi_bufflen(scsi_cmd);
5656 if (!length)
5657 return 0;
5658
5659 nseg = scsi_dma_map(scsi_cmd);
5660 if (nseg < 0) {
51f52a47 5661 if (printk_ratelimit())
d73341bf 5662 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
a32c055f
WB
5663 return -1;
5664 }
5665
5666 ipr_cmd->dma_use_sg = nseg;
5667
438b0331 5668 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5669 ioarcb->ioadl_len =
5670 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5671
a32c055f
WB
5672 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5673 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5674 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5675 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5676 ioadl_flags = IPR_IOADL_FLAGS_READ;
5677
5678 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5679 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5680 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5681 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5682 }
5683
5684 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5685 return 0;
5686}
5687
1da177e4
LT
5688/**
5689 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5690 * @ioa_cfg: ioa config struct
5691 * @ipr_cmd: ipr command struct
5692 *
5693 * Return value:
5694 * 0 on success / -1 on failure
5695 **/
5696static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5697 struct ipr_cmnd *ipr_cmd)
5698{
63015bc9
FT
5699 int i, nseg;
5700 struct scatterlist *sg;
1da177e4
LT
5701 u32 length;
5702 u32 ioadl_flags = 0;
5703 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5704 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5705 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5706
63015bc9
FT
5707 length = scsi_bufflen(scsi_cmd);
5708 if (!length)
1da177e4
LT
5709 return 0;
5710
63015bc9
FT
5711 nseg = scsi_dma_map(scsi_cmd);
5712 if (nseg < 0) {
d73341bf 5713 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
63015bc9
FT
5714 return -1;
5715 }
51b1c7e1 5716
63015bc9
FT
5717 ipr_cmd->dma_use_sg = nseg;
5718
5719 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5720 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5721 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5722 ioarcb->data_transfer_length = cpu_to_be32(length);
5723 ioarcb->ioadl_len =
63015bc9
FT
5724 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5725 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5726 ioadl_flags = IPR_IOADL_FLAGS_READ;
5727 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5728 ioarcb->read_ioadl_len =
5729 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5730 }
1da177e4 5731
a32c055f
WB
5732 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5733 ioadl = ioarcb->u.add_data.u.ioadl;
5734 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5735 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5736 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5737 }
1da177e4 5738
63015bc9
FT
5739 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5740 ioadl[i].flags_and_data_len =
5741 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5742 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5743 }
5744
63015bc9
FT
5745 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5746 return 0;
1da177e4
LT
5747}
5748
1da177e4
LT
5749/**
5750 * ipr_erp_done - Process completion of ERP for a device
5751 * @ipr_cmd: ipr command struct
5752 *
5753 * This function copies the sense buffer into the scsi_cmd
5754 * struct and pushes the scsi_done function.
5755 *
5756 * Return value:
5757 * nothing
5758 **/
5759static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5760{
5761 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5762 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5763 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5764
5765 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5766 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5767 scmd_printk(KERN_ERR, scsi_cmd,
5768 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5769 } else {
5770 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5771 SCSI_SENSE_BUFFERSIZE);
5772 }
5773
5774 if (res) {
ee0a90fa
BK
5775 if (!ipr_is_naca_model(res))
5776 res->needs_sync_complete = 1;
1da177e4
LT
5777 res->in_erp = 0;
5778 }
63015bc9 5779 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5780 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5781 scsi_cmd->scsi_done(scsi_cmd);
5782}
5783
5784/**
5785 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5786 * @ipr_cmd: ipr command struct
5787 *
5788 * Return value:
5789 * none
5790 **/
5791static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5792{
51b1c7e1 5793 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5794 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5795 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5796
5797 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5798 ioarcb->data_transfer_length = 0;
1da177e4 5799 ioarcb->read_data_transfer_length = 0;
a32c055f 5800 ioarcb->ioadl_len = 0;
1da177e4 5801 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5802 ioasa->hdr.ioasc = 0;
5803 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5804
5805 if (ipr_cmd->ioa_cfg->sis64)
5806 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5807 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5808 else {
5809 ioarcb->write_ioadl_addr =
5810 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5811 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5812 }
1da177e4
LT
5813}
5814
5815/**
5816 * ipr_erp_request_sense - Send request sense to a device
5817 * @ipr_cmd: ipr command struct
5818 *
5819 * This function sends a request sense to a device as a result
5820 * of a check condition.
5821 *
5822 * Return value:
5823 * nothing
5824 **/
5825static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5826{
5827 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5828 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5829
5830 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5831 ipr_erp_done(ipr_cmd);
5832 return;
5833 }
5834
5835 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5836
5837 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5838 cmd_pkt->cdb[0] = REQUEST_SENSE;
5839 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5840 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5841 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5842 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5843
a32c055f
WB
5844 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5845 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5846
5847 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5848 IPR_REQUEST_SENSE_TIMEOUT * 2);
5849}
5850
5851/**
5852 * ipr_erp_cancel_all - Send cancel all to a device
5853 * @ipr_cmd: ipr command struct
5854 *
5855 * This function sends a cancel all to a device to clear the
5856 * queue. If we are running TCQ on the device, QERR is set to 1,
5857 * which means all outstanding ops have been dropped on the floor.
5858 * Cancel all will return them to us.
5859 *
5860 * Return value:
5861 * nothing
5862 **/
5863static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5864{
5865 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5866 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5867 struct ipr_cmd_pkt *cmd_pkt;
5868
5869 res->in_erp = 1;
5870
5871 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5872
17ea0126 5873 if (!scsi_cmd->device->simple_tags) {
1da177e4
LT
5874 ipr_erp_request_sense(ipr_cmd);
5875 return;
5876 }
5877
5878 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5879 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5880 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5881
5882 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5883 IPR_CANCEL_ALL_TIMEOUT);
5884}
5885
5886/**
5887 * ipr_dump_ioasa - Dump contents of IOASA
5888 * @ioa_cfg: ioa config struct
5889 * @ipr_cmd: ipr command struct
fe964d0a 5890 * @res: resource entry struct
1da177e4
LT
5891 *
5892 * This function is invoked by the interrupt handler when ops
5893 * fail. It will log the IOASA if appropriate. Only called
5894 * for GPDD ops.
5895 *
5896 * Return value:
5897 * none
5898 **/
5899static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5900 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5901{
5902 int i;
5903 u16 data_len;
b0692dd4 5904 u32 ioasc, fd_ioasc;
96d21f00 5905 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5906 __be32 *ioasa_data = (__be32 *)ioasa;
5907 int error_index;
5908
96d21f00
WB
5909 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5910 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5911
5912 if (0 == ioasc)
5913 return;
5914
5915 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5916 return;
5917
b0692dd4
BK
5918 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5919 error_index = ipr_get_error(fd_ioasc);
5920 else
5921 error_index = ipr_get_error(ioasc);
1da177e4
LT
5922
5923 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5924 /* Don't log an error if the IOA already logged one */
96d21f00 5925 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5926 return;
5927
cc9bd5d4
BK
5928 if (!ipr_is_gscsi(res))
5929 return;
5930
1da177e4
LT
5931 if (ipr_error_table[error_index].log_ioasa == 0)
5932 return;
5933 }
5934
fe964d0a 5935 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5936
96d21f00
WB
5937 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5938 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5939 data_len = sizeof(struct ipr_ioasa64);
5940 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5941 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5942
5943 ipr_err("IOASA Dump:\n");
5944
5945 for (i = 0; i < data_len / 4; i += 4) {
5946 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5947 be32_to_cpu(ioasa_data[i]),
5948 be32_to_cpu(ioasa_data[i+1]),
5949 be32_to_cpu(ioasa_data[i+2]),
5950 be32_to_cpu(ioasa_data[i+3]));
5951 }
5952}
5953
5954/**
5955 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5956 * @ioasa: IOASA
5957 * @sense_buf: sense data buffer
5958 *
5959 * Return value:
5960 * none
5961 **/
5962static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5963{
5964 u32 failing_lba;
5965 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5966 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5967 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5968 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5969
5970 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5971
5972 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5973 return;
5974
5975 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5976
5977 if (ipr_is_vset_device(res) &&
5978 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5979 ioasa->u.vset.failing_lba_hi != 0) {
5980 sense_buf[0] = 0x72;
5981 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5982 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5983 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5984
5985 sense_buf[7] = 12;
5986 sense_buf[8] = 0;
5987 sense_buf[9] = 0x0A;
5988 sense_buf[10] = 0x80;
5989
5990 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5991
5992 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5993 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5994 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5995 sense_buf[15] = failing_lba & 0x000000ff;
5996
5997 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5998
5999 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6000 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6001 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6002 sense_buf[19] = failing_lba & 0x000000ff;
6003 } else {
6004 sense_buf[0] = 0x70;
6005 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6006 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6007 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6008
6009 /* Illegal request */
6010 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 6011 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
6012 sense_buf[7] = 10; /* additional length */
6013
6014 /* IOARCB was in error */
6015 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6016 sense_buf[15] = 0xC0;
6017 else /* Parameter data was invalid */
6018 sense_buf[15] = 0x80;
6019
6020 sense_buf[16] =
6021 ((IPR_FIELD_POINTER_MASK &
96d21f00 6022 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
6023 sense_buf[17] =
6024 (IPR_FIELD_POINTER_MASK &
96d21f00 6025 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
6026 } else {
6027 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6028 if (ipr_is_vset_device(res))
6029 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6030 else
6031 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6032
6033 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6034 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6035 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6036 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6037 sense_buf[6] = failing_lba & 0x000000ff;
6038 }
6039
6040 sense_buf[7] = 6; /* additional length */
6041 }
6042 }
6043}
6044
ee0a90fa
BK
6045/**
6046 * ipr_get_autosense - Copy autosense data to sense buffer
6047 * @ipr_cmd: ipr command struct
6048 *
6049 * This function copies the autosense buffer to the buffer
6050 * in the scsi_cmd, if there is autosense available.
6051 *
6052 * Return value:
6053 * 1 if autosense was available / 0 if not
6054 **/
6055static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6056{
96d21f00
WB
6057 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6058 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 6059
96d21f00 6060 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
6061 return 0;
6062
96d21f00
WB
6063 if (ipr_cmd->ioa_cfg->sis64)
6064 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6065 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6066 SCSI_SENSE_BUFFERSIZE));
6067 else
6068 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6069 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6070 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
6071 return 1;
6072}
6073
1da177e4
LT
6074/**
6075 * ipr_erp_start - Process an error response for a SCSI op
6076 * @ioa_cfg: ioa config struct
6077 * @ipr_cmd: ipr command struct
6078 *
6079 * This function determines whether or not to initiate ERP
6080 * on the affected device.
6081 *
6082 * Return value:
6083 * nothing
6084 **/
6085static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6086 struct ipr_cmnd *ipr_cmd)
6087{
6088 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6089 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6090 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 6091 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6092
6093 if (!res) {
6094 ipr_scsi_eh_done(ipr_cmd);
6095 return;
6096 }
6097
8a048994 6098 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
6099 ipr_gen_sense(ipr_cmd);
6100
cc9bd5d4
BK
6101 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6102
8a048994 6103 switch (masked_ioasc) {
1da177e4 6104 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
6105 if (ipr_is_naca_model(res))
6106 scsi_cmd->result |= (DID_ABORT << 16);
6107 else
6108 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
6109 break;
6110 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 6111 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
6112 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6113 break;
6114 case IPR_IOASC_HW_SEL_TIMEOUT:
6115 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
6116 if (!ipr_is_naca_model(res))
6117 res->needs_sync_complete = 1;
1da177e4
LT
6118 break;
6119 case IPR_IOASC_SYNC_REQUIRED:
6120 if (!res->in_erp)
6121 res->needs_sync_complete = 1;
6122 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6123 break;
6124 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 6125 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
6126 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6127 break;
6128 case IPR_IOASC_BUS_WAS_RESET:
6129 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6130 /*
6131 * Report the bus reset and ask for a retry. The device
6132 * will give CC/UA the next command.
6133 */
6134 if (!res->resetting_device)
6135 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6136 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
6137 if (!ipr_is_naca_model(res))
6138 res->needs_sync_complete = 1;
1da177e4
LT
6139 break;
6140 case IPR_IOASC_HW_DEV_BUS_STATUS:
6141 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6142 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
6143 if (!ipr_get_autosense(ipr_cmd)) {
6144 if (!ipr_is_naca_model(res)) {
6145 ipr_erp_cancel_all(ipr_cmd);
6146 return;
6147 }
6148 }
1da177e4 6149 }
ee0a90fa
BK
6150 if (!ipr_is_naca_model(res))
6151 res->needs_sync_complete = 1;
1da177e4
LT
6152 break;
6153 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6154 break;
6155 default:
5b7304fb
BK
6156 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6157 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6158 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
6159 res->needs_sync_complete = 1;
6160 break;
6161 }
6162
63015bc9 6163 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 6164 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6165 scsi_cmd->scsi_done(scsi_cmd);
6166}
6167
6168/**
6169 * ipr_scsi_done - mid-layer done function
6170 * @ipr_cmd: ipr command struct
6171 *
6172 * This function is invoked by the interrupt handler for
6173 * ops generated by the SCSI mid-layer
6174 *
6175 * Return value:
6176 * none
6177 **/
6178static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6179{
6180 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6181 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 6182 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 6183 unsigned long hrrq_flags;
1da177e4 6184
96d21f00 6185 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
6186
6187 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
6188 scsi_dma_unmap(scsi_cmd);
6189
56d6aa33 6190 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
05a6538a 6191 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6192 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6193 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 6194 } else {
56d6aa33 6195 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
1da177e4 6196 ipr_erp_start(ioa_cfg, ipr_cmd);
56d6aa33 6197 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 6198 }
1da177e4
LT
6199}
6200
1da177e4
LT
6201/**
6202 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6203 * @shost: scsi host struct
1da177e4 6204 * @scsi_cmd: scsi command struct
1da177e4
LT
6205 *
6206 * This function queues a request generated by the mid-layer.
6207 *
6208 * Return value:
6209 * 0 on success
6210 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6211 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6212 **/
00bfef2c
BK
6213static int ipr_queuecommand(struct Scsi_Host *shost,
6214 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6215{
6216 struct ipr_ioa_cfg *ioa_cfg;
6217 struct ipr_resource_entry *res;
6218 struct ipr_ioarcb *ioarcb;
6219 struct ipr_cmnd *ipr_cmd;
56d6aa33 6220 unsigned long hrrq_flags, lock_flags;
d12f1576 6221 int rc;
05a6538a 6222 struct ipr_hrr_queue *hrrq;
6223 int hrrq_id;
1da177e4 6224
00bfef2c
BK
6225 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6226
1da177e4 6227 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6228 res = scsi_cmd->device->hostdata;
56d6aa33 6229
6230 if (ipr_is_gata(res) && res->sata_port) {
6231 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6232 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6234 return rc;
6235 }
6236
05a6538a 6237 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6238 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6239
56d6aa33 6240 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6241 /*
6242 * We are currently blocking all devices due to a host reset
6243 * We have told the host to stop giving us new requests, but
6244 * ERP ops don't count. FIXME
6245 */
bfae7820 6246 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
56d6aa33 6247 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6248 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6249 }
1da177e4
LT
6250
6251 /*
6252 * FIXME - Create scsi_set_host_offline interface
6253 * and the ioa_is_dead check can be removed
6254 */
bfae7820 6255 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
56d6aa33 6256 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6257 goto err_nodev;
1da177e4
LT
6258 }
6259
05a6538a 6260 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6261 if (ipr_cmd == NULL) {
56d6aa33 6262 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6263 return SCSI_MLQUEUE_HOST_BUSY;
6264 }
56d6aa33 6265 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6266
172cd6e1 6267 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6268 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6269
6270 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6271 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6272 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4
LT
6273
6274 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6275 if (scsi_cmd->underflow == 0)
6276 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6277
1da177e4 6278 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
0b1f8d44
WX
6279 if (ipr_is_gscsi(res) && res->reset_occurred) {
6280 res->reset_occurred = 0;
ab6c10b1 6281 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
0b1f8d44 6282 }
1da177e4 6283 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
50668633
CH
6284 if (scsi_cmd->flags & SCMD_TAGGED)
6285 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6286 else
6287 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
1da177e4
LT
6288 }
6289
6290 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6291 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6292 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6293 }
1da177e4 6294
d12f1576
DC
6295 if (ioa_cfg->sis64)
6296 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6297 else
6298 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6299
56d6aa33 6300 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6301 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6302 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6303 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6304 if (!rc)
6305 scsi_dma_unmap(scsi_cmd);
a5fb407e 6306 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6307 }
6308
56d6aa33 6309 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6310 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6311 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6312 scsi_dma_unmap(scsi_cmd);
6313 goto err_nodev;
6314 }
6315
6316 ioarcb->res_handle = res->res_handle;
6317 if (res->needs_sync_complete) {
6318 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6319 res->needs_sync_complete = 0;
6320 }
05a6538a 6321 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6322 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6323 ipr_send_command(ipr_cmd);
56d6aa33 6324 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6325 return 0;
1da177e4 6326
00bfef2c 6327err_nodev:
56d6aa33 6328 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6329 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6330 scsi_cmd->result = (DID_NO_CONNECT << 16);
6331 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6332 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6333 return 0;
6334}
f281233d 6335
35a39691
BK
6336/**
6337 * ipr_ioctl - IOCTL handler
6338 * @sdev: scsi device struct
6339 * @cmd: IOCTL cmd
6340 * @arg: IOCTL arg
6341 *
6342 * Return value:
6343 * 0 on success / other on failure
6344 **/
bd705f2d 6345static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6346{
6347 struct ipr_resource_entry *res;
6348
6349 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6350 if (res && ipr_is_gata(res)) {
6351 if (cmd == HDIO_GET_IDENTITY)
6352 return -ENOTTY;
94be9a58 6353 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6354 }
35a39691
BK
6355
6356 return -EINVAL;
6357}
6358
1da177e4
LT
6359/**
6360 * ipr_info - Get information about the card/driver
6361 * @scsi_host: scsi host struct
6362 *
6363 * Return value:
6364 * pointer to buffer with description string
6365 **/
203fa3fe 6366static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6367{
6368 static char buffer[512];
6369 struct ipr_ioa_cfg *ioa_cfg;
6370 unsigned long lock_flags = 0;
6371
6372 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6373
6374 spin_lock_irqsave(host->host_lock, lock_flags);
6375 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6376 spin_unlock_irqrestore(host->host_lock, lock_flags);
6377
6378 return buffer;
6379}
6380
6381static struct scsi_host_template driver_template = {
6382 .module = THIS_MODULE,
6383 .name = "IPR",
6384 .info = ipr_ioa_info,
35a39691 6385 .ioctl = ipr_ioctl,
1da177e4
LT
6386 .queuecommand = ipr_queuecommand,
6387 .eh_abort_handler = ipr_eh_abort,
6388 .eh_device_reset_handler = ipr_eh_dev_reset,
6389 .eh_host_reset_handler = ipr_eh_host_reset,
6390 .slave_alloc = ipr_slave_alloc,
6391 .slave_configure = ipr_slave_configure,
6392 .slave_destroy = ipr_slave_destroy,
f688f96d 6393 .scan_finished = ipr_scan_finished,
35a39691
BK
6394 .target_alloc = ipr_target_alloc,
6395 .target_destroy = ipr_target_destroy,
1da177e4 6396 .change_queue_depth = ipr_change_queue_depth,
1da177e4
LT
6397 .bios_param = ipr_biosparam,
6398 .can_queue = IPR_MAX_COMMANDS,
6399 .this_id = -1,
6400 .sg_tablesize = IPR_MAX_SGLIST,
6401 .max_sectors = IPR_IOA_MAX_SECTORS,
6402 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6403 .use_clustering = ENABLE_CLUSTERING,
6404 .shost_attrs = ipr_ioa_attrs,
6405 .sdev_attrs = ipr_dev_attrs,
54b2b50c
MP
6406 .proc_name = IPR_NAME,
6407 .no_write_same = 1,
2ecb204d 6408 .use_blk_tags = 1,
1da177e4
LT
6409};
6410
35a39691
BK
6411/**
6412 * ipr_ata_phy_reset - libata phy_reset handler
6413 * @ap: ata port to reset
6414 *
6415 **/
6416static void ipr_ata_phy_reset(struct ata_port *ap)
6417{
6418 unsigned long flags;
6419 struct ipr_sata_port *sata_port = ap->private_data;
6420 struct ipr_resource_entry *res = sata_port->res;
6421 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6422 int rc;
6423
6424 ENTER;
6425 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6426 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6428 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6429 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6430 }
6431
56d6aa33 6432 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6433 goto out_unlock;
6434
6435 rc = ipr_device_reset(ioa_cfg, res);
6436
6437 if (rc) {
3e4ec344 6438 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6439 goto out_unlock;
6440 }
6441
3e7ebdfa
WB
6442 ap->link.device[0].class = res->ata_class;
6443 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6444 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6445
6446out_unlock:
6447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6448 LEAVE;
6449}
6450
6451/**
6452 * ipr_ata_post_internal - Cleanup after an internal command
6453 * @qc: ATA queued command
6454 *
6455 * Return value:
6456 * none
6457 **/
6458static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6459{
6460 struct ipr_sata_port *sata_port = qc->ap->private_data;
6461 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6462 struct ipr_cmnd *ipr_cmd;
05a6538a 6463 struct ipr_hrr_queue *hrrq;
35a39691
BK
6464 unsigned long flags;
6465
6466 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6467 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6469 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6470 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6471 }
6472
05a6538a 6473 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6474 spin_lock(&hrrq->_lock);
05a6538a 6475 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6476 if (ipr_cmd->qc == qc) {
6477 ipr_device_reset(ioa_cfg, sata_port->res);
6478 break;
6479 }
35a39691 6480 }
56d6aa33 6481 spin_unlock(&hrrq->_lock);
35a39691
BK
6482 }
6483 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6484}
6485
35a39691
BK
6486/**
6487 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6488 * @regs: destination
6489 * @tf: source ATA taskfile
6490 *
6491 * Return value:
6492 * none
6493 **/
6494static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6495 struct ata_taskfile *tf)
6496{
6497 regs->feature = tf->feature;
6498 regs->nsect = tf->nsect;
6499 regs->lbal = tf->lbal;
6500 regs->lbam = tf->lbam;
6501 regs->lbah = tf->lbah;
6502 regs->device = tf->device;
6503 regs->command = tf->command;
6504 regs->hob_feature = tf->hob_feature;
6505 regs->hob_nsect = tf->hob_nsect;
6506 regs->hob_lbal = tf->hob_lbal;
6507 regs->hob_lbam = tf->hob_lbam;
6508 regs->hob_lbah = tf->hob_lbah;
6509 regs->ctl = tf->ctl;
6510}
6511
6512/**
6513 * ipr_sata_done - done function for SATA commands
6514 * @ipr_cmd: ipr command struct
6515 *
6516 * This function is invoked by the interrupt handler for
6517 * ops generated by the SCSI mid-layer to SATA devices
6518 *
6519 * Return value:
6520 * none
6521 **/
6522static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6523{
6524 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6525 struct ata_queued_cmd *qc = ipr_cmd->qc;
6526 struct ipr_sata_port *sata_port = qc->ap->private_data;
6527 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6528 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6529
56d6aa33 6530 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6531 if (ipr_cmd->ioa_cfg->sis64)
6532 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6533 sizeof(struct ipr_ioasa_gata));
6534 else
6535 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6536 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6537 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6538
96d21f00 6539 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6540 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6541
6542 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6543 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6544 else
96d21f00 6545 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6546 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6547 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6548 ata_qc_complete(qc);
6549}
6550
a32c055f
WB
6551/**
6552 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6553 * @ipr_cmd: ipr command struct
6554 * @qc: ATA queued command
6555 *
6556 **/
6557static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6558 struct ata_queued_cmd *qc)
6559{
6560 u32 ioadl_flags = 0;
6561 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1ac7c26d 6562 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
a32c055f
WB
6563 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6564 int len = qc->nbytes;
6565 struct scatterlist *sg;
6566 unsigned int si;
6567 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6568
6569 if (len == 0)
6570 return;
6571
6572 if (qc->dma_dir == DMA_TO_DEVICE) {
6573 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6574 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6575 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6576 ioadl_flags = IPR_IOADL_FLAGS_READ;
6577
6578 ioarcb->data_transfer_length = cpu_to_be32(len);
6579 ioarcb->ioadl_len =
6580 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6581 ioarcb->u.sis64_addr_data.data_ioadl_addr =
1ac7c26d 6582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
a32c055f
WB
6583
6584 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6585 ioadl64->flags = cpu_to_be32(ioadl_flags);
6586 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6587 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6588
6589 last_ioadl64 = ioadl64;
6590 ioadl64++;
6591 }
6592
6593 if (likely(last_ioadl64))
6594 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6595}
6596
35a39691
BK
6597/**
6598 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6599 * @ipr_cmd: ipr command struct
6600 * @qc: ATA queued command
6601 *
6602 **/
6603static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6604 struct ata_queued_cmd *qc)
6605{
6606 u32 ioadl_flags = 0;
6607 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6608 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6609 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6610 int len = qc->nbytes;
35a39691 6611 struct scatterlist *sg;
ff2aeb1e 6612 unsigned int si;
35a39691
BK
6613
6614 if (len == 0)
6615 return;
6616
6617 if (qc->dma_dir == DMA_TO_DEVICE) {
6618 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6619 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6620 ioarcb->data_transfer_length = cpu_to_be32(len);
6621 ioarcb->ioadl_len =
35a39691
BK
6622 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6623 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6624 ioadl_flags = IPR_IOADL_FLAGS_READ;
6625 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6626 ioarcb->read_ioadl_len =
6627 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6628 }
6629
ff2aeb1e 6630 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6631 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6632 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6633
6634 last_ioadl = ioadl;
6635 ioadl++;
35a39691 6636 }
3be6cbd7
JG
6637
6638 if (likely(last_ioadl))
6639 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6640}
6641
56d6aa33 6642/**
6643 * ipr_qc_defer - Get a free ipr_cmd
6644 * @qc: queued command
6645 *
6646 * Return value:
6647 * 0 if success
6648 **/
6649static int ipr_qc_defer(struct ata_queued_cmd *qc)
6650{
6651 struct ata_port *ap = qc->ap;
6652 struct ipr_sata_port *sata_port = ap->private_data;
6653 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6654 struct ipr_cmnd *ipr_cmd;
6655 struct ipr_hrr_queue *hrrq;
6656 int hrrq_id;
6657
6658 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6659 hrrq = &ioa_cfg->hrrq[hrrq_id];
6660
6661 qc->lldd_task = NULL;
6662 spin_lock(&hrrq->_lock);
6663 if (unlikely(hrrq->ioa_is_dead)) {
6664 spin_unlock(&hrrq->_lock);
6665 return 0;
6666 }
6667
6668 if (unlikely(!hrrq->allow_cmds)) {
6669 spin_unlock(&hrrq->_lock);
6670 return ATA_DEFER_LINK;
6671 }
6672
6673 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6674 if (ipr_cmd == NULL) {
6675 spin_unlock(&hrrq->_lock);
6676 return ATA_DEFER_LINK;
6677 }
6678
6679 qc->lldd_task = ipr_cmd;
6680 spin_unlock(&hrrq->_lock);
6681 return 0;
6682}
6683
35a39691
BK
6684/**
6685 * ipr_qc_issue - Issue a SATA qc to a device
6686 * @qc: queued command
6687 *
6688 * Return value:
6689 * 0 if success
6690 **/
6691static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6692{
6693 struct ata_port *ap = qc->ap;
6694 struct ipr_sata_port *sata_port = ap->private_data;
6695 struct ipr_resource_entry *res = sata_port->res;
6696 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6697 struct ipr_cmnd *ipr_cmd;
6698 struct ipr_ioarcb *ioarcb;
6699 struct ipr_ioarcb_ata_regs *regs;
6700
56d6aa33 6701 if (qc->lldd_task == NULL)
6702 ipr_qc_defer(qc);
6703
6704 ipr_cmd = qc->lldd_task;
6705 if (ipr_cmd == NULL)
0feeed82 6706 return AC_ERR_SYSTEM;
35a39691 6707
56d6aa33 6708 qc->lldd_task = NULL;
6709 spin_lock(&ipr_cmd->hrrq->_lock);
6710 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6711 ipr_cmd->hrrq->ioa_is_dead)) {
6712 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6713 spin_unlock(&ipr_cmd->hrrq->_lock);
6714 return AC_ERR_SYSTEM;
6715 }
6716
05a6538a 6717 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 6718 ioarcb = &ipr_cmd->ioarcb;
35a39691 6719
a32c055f
WB
6720 if (ioa_cfg->sis64) {
6721 regs = &ipr_cmd->i.ata_ioadl.regs;
6722 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6723 } else
6724 regs = &ioarcb->u.add_data.u.regs;
6725
6726 memset(regs, 0, sizeof(*regs));
6727 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 6728
56d6aa33 6729 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
6730 ipr_cmd->qc = qc;
6731 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6732 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6733 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6734 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6735 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6736 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6737
a32c055f
WB
6738 if (ioa_cfg->sis64)
6739 ipr_build_ata_ioadl64(ipr_cmd, qc);
6740 else
6741 ipr_build_ata_ioadl(ipr_cmd, qc);
6742
35a39691
BK
6743 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6744 ipr_copy_sata_tf(regs, &qc->tf);
6745 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6746 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6747
6748 switch (qc->tf.protocol) {
6749 case ATA_PROT_NODATA:
6750 case ATA_PROT_PIO:
6751 break;
6752
6753 case ATA_PROT_DMA:
6754 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6755 break;
6756
0dc36888
TH
6757 case ATAPI_PROT_PIO:
6758 case ATAPI_PROT_NODATA:
35a39691
BK
6759 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6760 break;
6761
0dc36888 6762 case ATAPI_PROT_DMA:
35a39691
BK
6763 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6764 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6765 break;
6766
6767 default:
6768 WARN_ON(1);
56d6aa33 6769 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 6770 return AC_ERR_INVALID;
35a39691
BK
6771 }
6772
a32c055f 6773 ipr_send_command(ipr_cmd);
56d6aa33 6774 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 6775
35a39691
BK
6776 return 0;
6777}
6778
4c9bf4e7
TH
6779/**
6780 * ipr_qc_fill_rtf - Read result TF
6781 * @qc: ATA queued command
6782 *
6783 * Return value:
6784 * true
6785 **/
6786static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6787{
6788 struct ipr_sata_port *sata_port = qc->ap->private_data;
6789 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6790 struct ata_taskfile *tf = &qc->result_tf;
6791
6792 tf->feature = g->error;
6793 tf->nsect = g->nsect;
6794 tf->lbal = g->lbal;
6795 tf->lbam = g->lbam;
6796 tf->lbah = g->lbah;
6797 tf->device = g->device;
6798 tf->command = g->status;
6799 tf->hob_nsect = g->hob_nsect;
6800 tf->hob_lbal = g->hob_lbal;
6801 tf->hob_lbam = g->hob_lbam;
6802 tf->hob_lbah = g->hob_lbah;
4c9bf4e7
TH
6803
6804 return true;
6805}
6806
35a39691 6807static struct ata_port_operations ipr_sata_ops = {
35a39691 6808 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6809 .hardreset = ipr_sata_reset,
35a39691 6810 .post_internal_cmd = ipr_ata_post_internal,
35a39691 6811 .qc_prep = ata_noop_qc_prep,
56d6aa33 6812 .qc_defer = ipr_qc_defer,
35a39691 6813 .qc_issue = ipr_qc_issue,
4c9bf4e7 6814 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6815 .port_start = ata_sas_port_start,
6816 .port_stop = ata_sas_port_stop
6817};
6818
6819static struct ata_port_info sata_port_info = {
9cbe056f 6820 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0f2e0330
SS
6821 .pio_mask = ATA_PIO4_ONLY,
6822 .mwdma_mask = ATA_MWDMA2,
6823 .udma_mask = ATA_UDMA6,
35a39691
BK
6824 .port_ops = &ipr_sata_ops
6825};
6826
1da177e4
LT
6827#ifdef CONFIG_PPC_PSERIES
6828static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
6829 PVR_NORTHSTAR,
6830 PVR_PULSAR,
6831 PVR_POWER4,
6832 PVR_ICESTAR,
6833 PVR_SSTAR,
6834 PVR_POWER4p,
6835 PVR_630,
6836 PVR_630p
1da177e4
LT
6837};
6838
6839/**
6840 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6841 * @ioa_cfg: ioa cfg struct
6842 *
6843 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6844 * certain pSeries hardware. This function determines if the given
6845 * adapter is in one of these confgurations or not.
6846 *
6847 * Return value:
6848 * 1 if adapter is not supported / 0 if adapter is supported
6849 **/
6850static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6851{
1da177e4
LT
6852 int i;
6853
44c10138 6854 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 6855 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 6856 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 6857 return 1;
1da177e4
LT
6858 }
6859 }
6860 return 0;
6861}
6862#else
6863#define ipr_invalid_adapter(ioa_cfg) 0
6864#endif
6865
6866/**
6867 * ipr_ioa_bringdown_done - IOA bring down completion.
6868 * @ipr_cmd: ipr command struct
6869 *
6870 * This function processes the completion of an adapter bring down.
6871 * It wakes any reset sleepers.
6872 *
6873 * Return value:
6874 * IPR_RC_JOB_RETURN
6875 **/
6876static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6877{
6878 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96b04db9 6879 int i;
1da177e4
LT
6880
6881 ENTER;
bfae7820
BK
6882 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6883 ipr_trace;
6884 spin_unlock_irq(ioa_cfg->host->host_lock);
6885 scsi_unblock_requests(ioa_cfg->host);
6886 spin_lock_irq(ioa_cfg->host->host_lock);
6887 }
6888
1da177e4
LT
6889 ioa_cfg->in_reset_reload = 0;
6890 ioa_cfg->reset_retries = 0;
96b04db9 6891 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6892 spin_lock(&ioa_cfg->hrrq[i]._lock);
6893 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6894 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6895 }
6896 wmb();
6897
05a6538a 6898 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6899 wake_up_all(&ioa_cfg->reset_wait_q);
1da177e4
LT
6900 LEAVE;
6901
6902 return IPR_RC_JOB_RETURN;
6903}
6904
6905/**
6906 * ipr_ioa_reset_done - IOA reset completion.
6907 * @ipr_cmd: ipr command struct
6908 *
6909 * This function processes the completion of an adapter reset.
6910 * It schedules any necessary mid-layer add/removes and
6911 * wakes any reset sleepers.
6912 *
6913 * Return value:
6914 * IPR_RC_JOB_RETURN
6915 **/
6916static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6917{
6918 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6919 struct ipr_resource_entry *res;
6920 struct ipr_hostrcb *hostrcb, *temp;
56d6aa33 6921 int i = 0, j;
1da177e4
LT
6922
6923 ENTER;
6924 ioa_cfg->in_reset_reload = 0;
56d6aa33 6925 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6926 spin_lock(&ioa_cfg->hrrq[j]._lock);
6927 ioa_cfg->hrrq[j].allow_cmds = 1;
6928 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6929 }
6930 wmb();
1da177e4 6931 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6932 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6933
6934 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
f688f96d 6935 if (res->add_to_ml || res->del_from_ml) {
1da177e4
LT
6936 ipr_trace;
6937 break;
6938 }
6939 }
6940 schedule_work(&ioa_cfg->work_q);
6941
6942 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6943 list_del(&hostrcb->queue);
6944 if (i++ < IPR_NUM_LOG_HCAMS)
6945 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6946 else
6947 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6948 }
6949
6bb04170 6950 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6951 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6952
6953 ioa_cfg->reset_retries = 0;
05a6538a 6954 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6955 wake_up_all(&ioa_cfg->reset_wait_q);
6956
30237853 6957 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6958 scsi_unblock_requests(ioa_cfg->host);
30237853 6959 spin_lock(ioa_cfg->host->host_lock);
1da177e4 6960
56d6aa33 6961 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
1da177e4
LT
6962 scsi_block_requests(ioa_cfg->host);
6963
f688f96d 6964 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
6965 LEAVE;
6966 return IPR_RC_JOB_RETURN;
6967}
6968
6969/**
6970 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6971 * @supported_dev: supported device struct
6972 * @vpids: vendor product id struct
6973 *
6974 * Return value:
6975 * none
6976 **/
6977static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6978 struct ipr_std_inq_vpids *vpids)
6979{
6980 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6981 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6982 supported_dev->num_records = 1;
6983 supported_dev->data_length =
6984 cpu_to_be16(sizeof(struct ipr_supported_device));
6985 supported_dev->reserved = 0;
6986}
6987
6988/**
6989 * ipr_set_supported_devs - Send Set Supported Devices for a device
6990 * @ipr_cmd: ipr command struct
6991 *
a32c055f 6992 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6993 *
6994 * Return value:
6995 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6996 **/
6997static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6998{
6999 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7000 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
7001 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7002 struct ipr_resource_entry *res = ipr_cmd->u.res;
7003
7004 ipr_cmd->job_step = ipr_ioa_reset_done;
7005
7006 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 7007 if (!ipr_is_scsi_disk(res))
1da177e4
LT
7008 continue;
7009
7010 ipr_cmd->u.res = res;
3e7ebdfa 7011 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
7012
7013 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7014 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7015 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7016
7017 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 7018 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
7019 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7020 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7021
a32c055f
WB
7022 ipr_init_ioadl(ipr_cmd,
7023 ioa_cfg->vpd_cbs_dma +
7024 offsetof(struct ipr_misc_cbs, supp_dev),
7025 sizeof(struct ipr_supported_device),
7026 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7027
7028 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7029 IPR_SET_SUP_DEVICE_TIMEOUT);
7030
3e7ebdfa
WB
7031 if (!ioa_cfg->sis64)
7032 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 7033 LEAVE;
1da177e4
LT
7034 return IPR_RC_JOB_RETURN;
7035 }
7036
05a6538a 7037 LEAVE;
1da177e4
LT
7038 return IPR_RC_JOB_CONTINUE;
7039}
7040
7041/**
7042 * ipr_get_mode_page - Locate specified mode page
7043 * @mode_pages: mode page buffer
7044 * @page_code: page code to find
7045 * @len: minimum required length for mode page
7046 *
7047 * Return value:
7048 * pointer to mode page / NULL on failure
7049 **/
7050static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7051 u32 page_code, u32 len)
7052{
7053 struct ipr_mode_page_hdr *mode_hdr;
7054 u32 page_length;
7055 u32 length;
7056
7057 if (!mode_pages || (mode_pages->hdr.length == 0))
7058 return NULL;
7059
7060 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7061 mode_hdr = (struct ipr_mode_page_hdr *)
7062 (mode_pages->data + mode_pages->hdr.block_desc_len);
7063
7064 while (length) {
7065 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7066 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7067 return mode_hdr;
7068 break;
7069 } else {
7070 page_length = (sizeof(struct ipr_mode_page_hdr) +
7071 mode_hdr->page_length);
7072 length -= page_length;
7073 mode_hdr = (struct ipr_mode_page_hdr *)
7074 ((unsigned long)mode_hdr + page_length);
7075 }
7076 }
7077 return NULL;
7078}
7079
7080/**
7081 * ipr_check_term_power - Check for term power errors
7082 * @ioa_cfg: ioa config struct
7083 * @mode_pages: IOAFP mode pages buffer
7084 *
7085 * Check the IOAFP's mode page 28 for term power errors
7086 *
7087 * Return value:
7088 * nothing
7089 **/
7090static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7091 struct ipr_mode_pages *mode_pages)
7092{
7093 int i;
7094 int entry_length;
7095 struct ipr_dev_bus_entry *bus;
7096 struct ipr_mode_page28 *mode_page;
7097
7098 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7099 sizeof(struct ipr_mode_page28));
7100
7101 entry_length = mode_page->entry_length;
7102
7103 bus = mode_page->bus;
7104
7105 for (i = 0; i < mode_page->num_entries; i++) {
7106 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7107 dev_err(&ioa_cfg->pdev->dev,
7108 "Term power is absent on scsi bus %d\n",
7109 bus->res_addr.bus);
7110 }
7111
7112 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7113 }
7114}
7115
7116/**
7117 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7118 * @ioa_cfg: ioa config struct
7119 *
7120 * Looks through the config table checking for SES devices. If
7121 * the SES device is in the SES table indicating a maximum SCSI
7122 * bus speed, the speed is limited for the bus.
7123 *
7124 * Return value:
7125 * none
7126 **/
7127static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7128{
7129 u32 max_xfer_rate;
7130 int i;
7131
7132 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7133 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7134 ioa_cfg->bus_attr[i].bus_width);
7135
7136 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7137 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7138 }
7139}
7140
7141/**
7142 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7143 * @ioa_cfg: ioa config struct
7144 * @mode_pages: mode page 28 buffer
7145 *
7146 * Updates mode page 28 based on driver configuration
7147 *
7148 * Return value:
7149 * none
7150 **/
7151static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 7152 struct ipr_mode_pages *mode_pages)
1da177e4
LT
7153{
7154 int i, entry_length;
7155 struct ipr_dev_bus_entry *bus;
7156 struct ipr_bus_attributes *bus_attr;
7157 struct ipr_mode_page28 *mode_page;
7158
7159 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7160 sizeof(struct ipr_mode_page28));
7161
7162 entry_length = mode_page->entry_length;
7163
7164 /* Loop for each device bus entry */
7165 for (i = 0, bus = mode_page->bus;
7166 i < mode_page->num_entries;
7167 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7168 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7169 dev_err(&ioa_cfg->pdev->dev,
7170 "Invalid resource address reported: 0x%08X\n",
7171 IPR_GET_PHYS_LOC(bus->res_addr));
7172 continue;
7173 }
7174
7175 bus_attr = &ioa_cfg->bus_attr[i];
7176 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7177 bus->bus_width = bus_attr->bus_width;
7178 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7179 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7180 if (bus_attr->qas_enabled)
7181 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7182 else
7183 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7184 }
7185}
7186
7187/**
7188 * ipr_build_mode_select - Build a mode select command
7189 * @ipr_cmd: ipr command struct
7190 * @res_handle: resource handle to send command to
7191 * @parm: Byte 2 of Mode Sense command
7192 * @dma_addr: DMA buffer address
7193 * @xfer_len: data transfer length
7194 *
7195 * Return value:
7196 * none
7197 **/
7198static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
7199 __be32 res_handle, u8 parm,
7200 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7201{
1da177e4
LT
7202 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7203
7204 ioarcb->res_handle = res_handle;
7205 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7206 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7207 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7208 ioarcb->cmd_pkt.cdb[1] = parm;
7209 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7210
a32c055f 7211 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7212}
7213
7214/**
7215 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7216 * @ipr_cmd: ipr command struct
7217 *
7218 * This function sets up the SCSI bus attributes and sends
7219 * a Mode Select for Page 28 to activate them.
7220 *
7221 * Return value:
7222 * IPR_RC_JOB_RETURN
7223 **/
7224static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7225{
7226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7227 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7228 int length;
7229
7230 ENTER;
4733804c
BK
7231 ipr_scsi_bus_speed_limit(ioa_cfg);
7232 ipr_check_term_power(ioa_cfg, mode_pages);
7233 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7234 length = mode_pages->hdr.length + 1;
7235 mode_pages->hdr.length = 0;
1da177e4
LT
7236
7237 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7238 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7239 length);
7240
f72919ec
WB
7241 ipr_cmd->job_step = ipr_set_supported_devs;
7242 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7243 struct ipr_resource_entry, queue);
1da177e4
LT
7244 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7245
7246 LEAVE;
7247 return IPR_RC_JOB_RETURN;
7248}
7249
7250/**
7251 * ipr_build_mode_sense - Builds a mode sense command
7252 * @ipr_cmd: ipr command struct
7253 * @res: resource entry struct
7254 * @parm: Byte 2 of mode sense command
7255 * @dma_addr: DMA address of mode sense buffer
7256 * @xfer_len: Size of DMA buffer
7257 *
7258 * Return value:
7259 * none
7260 **/
7261static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7262 __be32 res_handle,
a32c055f 7263 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7264{
1da177e4
LT
7265 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7266
7267 ioarcb->res_handle = res_handle;
7268 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7269 ioarcb->cmd_pkt.cdb[2] = parm;
7270 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7271 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7272
a32c055f 7273 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7274}
7275
dfed823e
BK
7276/**
7277 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7278 * @ipr_cmd: ipr command struct
7279 *
7280 * This function handles the failure of an IOA bringup command.
7281 *
7282 * Return value:
7283 * IPR_RC_JOB_RETURN
7284 **/
7285static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7286{
7287 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7288 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7289
7290 dev_err(&ioa_cfg->pdev->dev,
7291 "0x%02X failed with IOASC: 0x%08X\n",
7292 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7293
7294 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7295 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e
BK
7296 return IPR_RC_JOB_RETURN;
7297}
7298
7299/**
7300 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7301 * @ipr_cmd: ipr command struct
7302 *
7303 * This function handles the failure of a Mode Sense to the IOAFP.
7304 * Some adapters do not handle all mode pages.
7305 *
7306 * Return value:
7307 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7308 **/
7309static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7310{
f72919ec 7311 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7312 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7313
7314 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7315 ipr_cmd->job_step = ipr_set_supported_devs;
7316 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7317 struct ipr_resource_entry, queue);
dfed823e
BK
7318 return IPR_RC_JOB_CONTINUE;
7319 }
7320
7321 return ipr_reset_cmd_failed(ipr_cmd);
7322}
7323
1da177e4
LT
7324/**
7325 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7326 * @ipr_cmd: ipr command struct
7327 *
7328 * This function send a Page 28 mode sense to the IOA to
7329 * retrieve SCSI bus attributes.
7330 *
7331 * Return value:
7332 * IPR_RC_JOB_RETURN
7333 **/
7334static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7335{
7336 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7337
7338 ENTER;
7339 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7340 0x28, ioa_cfg->vpd_cbs_dma +
7341 offsetof(struct ipr_misc_cbs, mode_pages),
7342 sizeof(struct ipr_mode_pages));
7343
7344 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7345 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7346
7347 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7348
7349 LEAVE;
7350 return IPR_RC_JOB_RETURN;
7351}
7352
ac09c349
BK
7353/**
7354 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7355 * @ipr_cmd: ipr command struct
7356 *
7357 * This function enables dual IOA RAID support if possible.
7358 *
7359 * Return value:
7360 * IPR_RC_JOB_RETURN
7361 **/
7362static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7363{
7364 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7365 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7366 struct ipr_mode_page24 *mode_page;
7367 int length;
7368
7369 ENTER;
7370 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7371 sizeof(struct ipr_mode_page24));
7372
7373 if (mode_page)
7374 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7375
7376 length = mode_pages->hdr.length + 1;
7377 mode_pages->hdr.length = 0;
7378
7379 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7380 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7381 length);
7382
7383 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7384 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7385
7386 LEAVE;
7387 return IPR_RC_JOB_RETURN;
7388}
7389
7390/**
7391 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7392 * @ipr_cmd: ipr command struct
7393 *
7394 * This function handles the failure of a Mode Sense to the IOAFP.
7395 * Some adapters do not handle all mode pages.
7396 *
7397 * Return value:
7398 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7399 **/
7400static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7401{
96d21f00 7402 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7403
7404 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7405 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7406 return IPR_RC_JOB_CONTINUE;
7407 }
7408
7409 return ipr_reset_cmd_failed(ipr_cmd);
7410}
7411
7412/**
7413 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7414 * @ipr_cmd: ipr command struct
7415 *
7416 * This function send a mode sense to the IOA to retrieve
7417 * the IOA Advanced Function Control mode page.
7418 *
7419 * Return value:
7420 * IPR_RC_JOB_RETURN
7421 **/
7422static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7423{
7424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7425
7426 ENTER;
7427 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7428 0x24, ioa_cfg->vpd_cbs_dma +
7429 offsetof(struct ipr_misc_cbs, mode_pages),
7430 sizeof(struct ipr_mode_pages));
7431
7432 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7433 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7434
7435 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7436
7437 LEAVE;
7438 return IPR_RC_JOB_RETURN;
7439}
7440
1da177e4
LT
7441/**
7442 * ipr_init_res_table - Initialize the resource table
7443 * @ipr_cmd: ipr command struct
7444 *
7445 * This function looks through the existing resource table, comparing
7446 * it with the config table. This function will take care of old/new
7447 * devices and schedule adding/removing them from the mid-layer
7448 * as appropriate.
7449 *
7450 * Return value:
7451 * IPR_RC_JOB_CONTINUE
7452 **/
7453static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7454{
7455 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7456 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7457 struct ipr_config_table_entry_wrapper cfgtew;
7458 int entries, found, flag, i;
1da177e4
LT
7459 LIST_HEAD(old_res);
7460
7461 ENTER;
3e7ebdfa
WB
7462 if (ioa_cfg->sis64)
7463 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7464 else
7465 flag = ioa_cfg->u.cfg_table->hdr.flags;
7466
7467 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7468 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7469
7470 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7471 list_move_tail(&res->queue, &old_res);
7472
3e7ebdfa 7473 if (ioa_cfg->sis64)
438b0331 7474 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7475 else
7476 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7477
7478 for (i = 0; i < entries; i++) {
7479 if (ioa_cfg->sis64)
7480 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7481 else
7482 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7483 found = 0;
7484
7485 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7486 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7487 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7488 found = 1;
7489 break;
7490 }
7491 }
7492
7493 if (!found) {
7494 if (list_empty(&ioa_cfg->free_res_q)) {
7495 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7496 break;
7497 }
7498
7499 found = 1;
7500 res = list_entry(ioa_cfg->free_res_q.next,
7501 struct ipr_resource_entry, queue);
7502 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7503 ipr_init_res_entry(res, &cfgtew);
1da177e4 7504 res->add_to_ml = 1;
56115598
WB
7505 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7506 res->sdev->allow_restart = 1;
1da177e4
LT
7507
7508 if (found)
3e7ebdfa 7509 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7510 }
7511
7512 list_for_each_entry_safe(res, temp, &old_res, queue) {
7513 if (res->sdev) {
7514 res->del_from_ml = 1;
3e7ebdfa 7515 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7516 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7517 }
7518 }
7519
3e7ebdfa
WB
7520 list_for_each_entry_safe(res, temp, &old_res, queue) {
7521 ipr_clear_res_target(res);
7522 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7523 }
7524
ac09c349
BK
7525 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7526 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7527 else
7528 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7529
7530 LEAVE;
7531 return IPR_RC_JOB_CONTINUE;
7532}
7533
7534/**
7535 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7536 * @ipr_cmd: ipr command struct
7537 *
7538 * This function sends a Query IOA Configuration command
7539 * to the adapter to retrieve the IOA configuration table.
7540 *
7541 * Return value:
7542 * IPR_RC_JOB_RETURN
7543 **/
7544static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7545{
7546 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7547 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7548 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7549 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7550
7551 ENTER;
ac09c349
BK
7552 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7553 ioa_cfg->dual_raid = 1;
1da177e4
LT
7554 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7555 ucode_vpd->major_release, ucode_vpd->card_type,
7556 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7557 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7558 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7559
7560 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7561 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7562 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7563 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7564
3e7ebdfa 7565 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7566 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7567
7568 ipr_cmd->job_step = ipr_init_res_table;
7569
7570 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7571
7572 LEAVE;
7573 return IPR_RC_JOB_RETURN;
7574}
7575
7576/**
7577 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7578 * @ipr_cmd: ipr command struct
7579 *
7580 * This utility function sends an inquiry to the adapter.
7581 *
7582 * Return value:
7583 * none
7584 **/
7585static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7586 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7587{
7588 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7589
7590 ENTER;
7591 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7592 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7593
7594 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7595 ioarcb->cmd_pkt.cdb[1] = flags;
7596 ioarcb->cmd_pkt.cdb[2] = page;
7597 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7598
a32c055f 7599 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7600
7601 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7602 LEAVE;
7603}
7604
62275040
BK
7605/**
7606 * ipr_inquiry_page_supported - Is the given inquiry page supported
7607 * @page0: inquiry page 0 buffer
7608 * @page: page code.
7609 *
7610 * This function determines if the specified inquiry page is supported.
7611 *
7612 * Return value:
7613 * 1 if page is supported / 0 if not
7614 **/
7615static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7616{
7617 int i;
7618
7619 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7620 if (page0->page[i] == page)
7621 return 1;
7622
7623 return 0;
7624}
7625
ac09c349
BK
7626/**
7627 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7628 * @ipr_cmd: ipr command struct
7629 *
7630 * This function sends a Page 0xD0 inquiry to the adapter
7631 * to retrieve adapter capabilities.
7632 *
7633 * Return value:
7634 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7635 **/
7636static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7637{
7638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7639 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7640 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7641
7642 ENTER;
7643 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7644 memset(cap, 0, sizeof(*cap));
7645
7646 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7647 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7648 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7649 sizeof(struct ipr_inquiry_cap));
7650 return IPR_RC_JOB_RETURN;
7651 }
7652
7653 LEAVE;
7654 return IPR_RC_JOB_CONTINUE;
7655}
7656
1da177e4
LT
7657/**
7658 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7659 * @ipr_cmd: ipr command struct
7660 *
7661 * This function sends a Page 3 inquiry to the adapter
7662 * to retrieve software VPD information.
7663 *
7664 * Return value:
7665 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7666 **/
7667static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
7668{
7669 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
7670
7671 ENTER;
7672
ac09c349 7673 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
7674
7675 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7676 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7677 sizeof(struct ipr_inquiry_page3));
7678
7679 LEAVE;
7680 return IPR_RC_JOB_RETURN;
7681}
7682
7683/**
7684 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7685 * @ipr_cmd: ipr command struct
7686 *
7687 * This function sends a Page 0 inquiry to the adapter
7688 * to retrieve supported inquiry pages.
7689 *
7690 * Return value:
7691 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7692 **/
7693static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7694{
7695 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7696 char type[5];
7697
7698 ENTER;
7699
7700 /* Grab the type out of the VPD and store it away */
7701 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7702 type[4] = '\0';
7703 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7704
f688f96d
BK
7705 if (ipr_invalid_adapter(ioa_cfg)) {
7706 dev_err(&ioa_cfg->pdev->dev,
7707 "Adapter not supported in this hardware configuration.\n");
7708
7709 if (!ipr_testmode) {
7710 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7711 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7712 list_add_tail(&ipr_cmd->queue,
7713 &ioa_cfg->hrrq->hrrq_free_q);
7714 return IPR_RC_JOB_RETURN;
7715 }
7716 }
7717
62275040 7718 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7719
62275040
BK
7720 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7721 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7722 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7723
7724 LEAVE;
7725 return IPR_RC_JOB_RETURN;
7726}
7727
7728/**
7729 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7730 * @ipr_cmd: ipr command struct
7731 *
7732 * This function sends a standard inquiry to the adapter.
7733 *
7734 * Return value:
7735 * IPR_RC_JOB_RETURN
7736 **/
7737static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7738{
7739 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7740
7741 ENTER;
62275040 7742 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7743
7744 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7745 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7746 sizeof(struct ipr_ioa_vpd));
7747
7748 LEAVE;
7749 return IPR_RC_JOB_RETURN;
7750}
7751
7752/**
214777ba 7753 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7754 * @ipr_cmd: ipr command struct
7755 *
7756 * This function send an Identify Host Request Response Queue
7757 * command to establish the HRRQ with the adapter.
7758 *
7759 * Return value:
7760 * IPR_RC_JOB_RETURN
7761 **/
214777ba 7762static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7763{
7764 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7765 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 7766 struct ipr_hrr_queue *hrrq;
1da177e4
LT
7767
7768 ENTER;
05a6538a 7769 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
1da177e4
LT
7770 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7771
56d6aa33 7772 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7773 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 7774
05a6538a 7775 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7776 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 7777
05a6538a 7778 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7779 if (ioa_cfg->sis64)
7780 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 7781
05a6538a 7782 if (ioa_cfg->nvectors == 1)
7783 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7784 else
7785 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7786
7787 ioarcb->cmd_pkt.cdb[2] =
7788 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7789 ioarcb->cmd_pkt.cdb[3] =
7790 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7791 ioarcb->cmd_pkt.cdb[4] =
7792 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7793 ioarcb->cmd_pkt.cdb[5] =
7794 ((u64) hrrq->host_rrq_dma) & 0xff;
7795 ioarcb->cmd_pkt.cdb[7] =
7796 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7797 ioarcb->cmd_pkt.cdb[8] =
7798 (sizeof(u32) * hrrq->size) & 0xff;
7799
7800 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7801 ioarcb->cmd_pkt.cdb[9] =
7802 ioa_cfg->identify_hrrq_index;
1da177e4 7803
05a6538a 7804 if (ioa_cfg->sis64) {
7805 ioarcb->cmd_pkt.cdb[10] =
7806 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7807 ioarcb->cmd_pkt.cdb[11] =
7808 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7809 ioarcb->cmd_pkt.cdb[12] =
7810 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7811 ioarcb->cmd_pkt.cdb[13] =
7812 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7813 }
7814
7815 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7816 ioarcb->cmd_pkt.cdb[14] =
7817 ioa_cfg->identify_hrrq_index;
05a6538a 7818
7819 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7820 IPR_INTERNAL_TIMEOUT);
7821
56d6aa33 7822 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7823 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 7824
7825 LEAVE;
7826 return IPR_RC_JOB_RETURN;
05a6538a 7827 }
7828
1da177e4 7829 LEAVE;
05a6538a 7830 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
7831}
7832
7833/**
7834 * ipr_reset_timer_done - Adapter reset timer function
7835 * @ipr_cmd: ipr command struct
7836 *
7837 * Description: This function is used in adapter reset processing
7838 * for timing events. If the reset_cmd pointer in the IOA
7839 * config struct is not this adapter's we are doing nested
7840 * resets and fail_all_ops will take care of freeing the
7841 * command block.
7842 *
7843 * Return value:
7844 * none
7845 **/
7846static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7847{
7848 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7849 unsigned long lock_flags = 0;
7850
7851 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7852
7853 if (ioa_cfg->reset_cmd == ipr_cmd) {
7854 list_del(&ipr_cmd->queue);
7855 ipr_cmd->done(ipr_cmd);
7856 }
7857
7858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7859}
7860
7861/**
7862 * ipr_reset_start_timer - Start a timer for adapter reset job
7863 * @ipr_cmd: ipr command struct
7864 * @timeout: timeout value
7865 *
7866 * Description: This function is used in adapter reset processing
7867 * for timing events. If the reset_cmd pointer in the IOA
7868 * config struct is not this adapter's we are doing nested
7869 * resets and fail_all_ops will take care of freeing the
7870 * command block.
7871 *
7872 * Return value:
7873 * none
7874 **/
7875static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7876 unsigned long timeout)
7877{
05a6538a 7878
7879 ENTER;
7880 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7881 ipr_cmd->done = ipr_reset_ioa_job;
7882
7883 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7884 ipr_cmd->timer.expires = jiffies + timeout;
7885 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7886 add_timer(&ipr_cmd->timer);
7887}
7888
7889/**
7890 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7891 * @ioa_cfg: ioa cfg struct
7892 *
7893 * Return value:
7894 * nothing
7895 **/
7896static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7897{
05a6538a 7898 struct ipr_hrr_queue *hrrq;
1da177e4 7899
05a6538a 7900 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 7901 spin_lock(&hrrq->_lock);
05a6538a 7902 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7903
7904 /* Initialize Host RRQ pointers */
7905 hrrq->hrrq_start = hrrq->host_rrq;
7906 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7907 hrrq->hrrq_curr = hrrq->hrrq_start;
7908 hrrq->toggle_bit = 1;
56d6aa33 7909 spin_unlock(&hrrq->_lock);
05a6538a 7910 }
56d6aa33 7911 wmb();
05a6538a 7912
56d6aa33 7913 ioa_cfg->identify_hrrq_index = 0;
7914 if (ioa_cfg->hrrq_num == 1)
7915 atomic_set(&ioa_cfg->hrrq_index, 0);
7916 else
7917 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
7918
7919 /* Zero out config table */
3e7ebdfa 7920 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7921}
7922
214777ba
WB
7923/**
7924 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7925 * @ipr_cmd: ipr command struct
7926 *
7927 * Return value:
7928 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7929 **/
7930static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7931{
7932 unsigned long stage, stage_time;
7933 u32 feedback;
7934 volatile u32 int_reg;
7935 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7936 u64 maskval = 0;
7937
7938 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7939 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7940 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7941
7942 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7943
7944 /* sanity check the stage_time value */
438b0331
WB
7945 if (stage_time == 0)
7946 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7947 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7948 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7949 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7950 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7951
7952 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7953 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7954 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7955 stage_time = ioa_cfg->transop_timeout;
7956 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7957 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
7958 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7959 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7960 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7961 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7962 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7963 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7964 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7965 return IPR_RC_JOB_CONTINUE;
7966 }
214777ba
WB
7967 }
7968
7969 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7970 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7971 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7972 ipr_cmd->done = ipr_reset_ioa_job;
7973 add_timer(&ipr_cmd->timer);
05a6538a 7974
7975 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
7976
7977 return IPR_RC_JOB_RETURN;
7978}
7979
1da177e4
LT
7980/**
7981 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7982 * @ipr_cmd: ipr command struct
7983 *
7984 * This function reinitializes some control blocks and
7985 * enables destructive diagnostics on the adapter.
7986 *
7987 * Return value:
7988 * IPR_RC_JOB_RETURN
7989 **/
7990static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7991{
7992 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7993 volatile u32 int_reg;
7be96900 7994 volatile u64 maskval;
56d6aa33 7995 int i;
1da177e4
LT
7996
7997 ENTER;
214777ba 7998 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7999 ipr_init_ioa_mem(ioa_cfg);
8000
56d6aa33 8001 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8002 spin_lock(&ioa_cfg->hrrq[i]._lock);
8003 ioa_cfg->hrrq[i].allow_interrupts = 1;
8004 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8005 }
8006 wmb();
8701f185
WB
8007 if (ioa_cfg->sis64) {
8008 /* Set the adapter to the correct endian mode. */
8009 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8010 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8011 }
8012
7be96900 8013 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
8014
8015 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8016 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 8017 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
8018 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8019 return IPR_RC_JOB_CONTINUE;
8020 }
8021
8022 /* Enable destructive diagnostics on IOA */
214777ba
WB
8023 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8024
7be96900
WB
8025 if (ioa_cfg->sis64) {
8026 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8027 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8028 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8029 } else
8030 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 8031
1da177e4
LT
8032 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8033
8034 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8035
214777ba
WB
8036 if (ioa_cfg->sis64) {
8037 ipr_cmd->job_step = ipr_reset_next_stage;
8038 return IPR_RC_JOB_CONTINUE;
8039 }
8040
1da177e4 8041 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 8042 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
8043 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8044 ipr_cmd->done = ipr_reset_ioa_job;
8045 add_timer(&ipr_cmd->timer);
05a6538a 8046 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8047
8048 LEAVE;
8049 return IPR_RC_JOB_RETURN;
8050}
8051
8052/**
8053 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8054 * @ipr_cmd: ipr command struct
8055 *
8056 * This function is invoked when an adapter dump has run out
8057 * of processing time.
8058 *
8059 * Return value:
8060 * IPR_RC_JOB_CONTINUE
8061 **/
8062static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8063{
8064 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8065
8066 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
8067 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8068 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
8069 ioa_cfg->sdt_state = ABORT_DUMP;
8070
4c647e90 8071 ioa_cfg->dump_timeout = 1;
1da177e4
LT
8072 ipr_cmd->job_step = ipr_reset_alert;
8073
8074 return IPR_RC_JOB_CONTINUE;
8075}
8076
8077/**
8078 * ipr_unit_check_no_data - Log a unit check/no data error log
8079 * @ioa_cfg: ioa config struct
8080 *
8081 * Logs an error indicating the adapter unit checked, but for some
8082 * reason, we were unable to fetch the unit check buffer.
8083 *
8084 * Return value:
8085 * nothing
8086 **/
8087static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8088{
8089 ioa_cfg->errors_logged++;
8090 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8091}
8092
8093/**
8094 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8095 * @ioa_cfg: ioa config struct
8096 *
8097 * Fetches the unit check buffer from the adapter by clocking the data
8098 * through the mailbox register.
8099 *
8100 * Return value:
8101 * nothing
8102 **/
8103static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8104{
8105 unsigned long mailbox;
8106 struct ipr_hostrcb *hostrcb;
8107 struct ipr_uc_sdt sdt;
8108 int rc, length;
65f56475 8109 u32 ioasc;
1da177e4
LT
8110
8111 mailbox = readl(ioa_cfg->ioa_mailbox);
8112
dcbad00e 8113 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
8114 ipr_unit_check_no_data(ioa_cfg);
8115 return;
8116 }
8117
8118 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8119 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8120 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8121
dcbad00e
WB
8122 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8123 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8124 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
8125 ipr_unit_check_no_data(ioa_cfg);
8126 return;
8127 }
8128
8129 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
8130 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8131 length = be32_to_cpu(sdt.entry[0].end_token);
8132 else
8133 length = (be32_to_cpu(sdt.entry[0].end_token) -
8134 be32_to_cpu(sdt.entry[0].start_token)) &
8135 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
8136
8137 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8138 struct ipr_hostrcb, queue);
8139 list_del(&hostrcb->queue);
8140 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8141
8142 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 8143 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
8144 (__be32 *)&hostrcb->hcam,
8145 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8146
65f56475 8147 if (!rc) {
1da177e4 8148 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 8149 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
8150 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8151 ioa_cfg->sdt_state == GET_DUMP)
8152 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8153 } else
1da177e4
LT
8154 ipr_unit_check_no_data(ioa_cfg);
8155
8156 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8157}
8158
110def85
WB
8159/**
8160 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8161 * @ipr_cmd: ipr command struct
8162 *
8163 * Description: This function will call to get the unit check buffer.
8164 *
8165 * Return value:
8166 * IPR_RC_JOB_RETURN
8167 **/
8168static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8169{
8170 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8171
8172 ENTER;
8173 ioa_cfg->ioa_unit_checked = 0;
8174 ipr_get_unit_check_buffer(ioa_cfg);
8175 ipr_cmd->job_step = ipr_reset_alert;
8176 ipr_reset_start_timer(ipr_cmd, 0);
8177
8178 LEAVE;
8179 return IPR_RC_JOB_RETURN;
8180}
8181
1da177e4
LT
8182/**
8183 * ipr_reset_restore_cfg_space - Restore PCI config space.
8184 * @ipr_cmd: ipr command struct
8185 *
8186 * Description: This function restores the saved PCI config space of
8187 * the adapter, fails all outstanding ops back to the callers, and
8188 * fetches the dump/unit check if applicable to this reset.
8189 *
8190 * Return value:
8191 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8192 **/
8193static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8194{
8195 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 8196 u32 int_reg;
1da177e4
LT
8197
8198 ENTER;
99c965dd 8199 ioa_cfg->pdev->state_saved = true;
1d3c16a8 8200 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
8201
8202 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 8203 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
8204 return IPR_RC_JOB_CONTINUE;
8205 }
8206
8207 ipr_fail_all_ops(ioa_cfg);
8208
8701f185
WB
8209 if (ioa_cfg->sis64) {
8210 /* Set the adapter to the correct endian mode. */
8211 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8212 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8213 }
8214
1da177e4 8215 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
8216 if (ioa_cfg->sis64) {
8217 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8218 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8219 return IPR_RC_JOB_RETURN;
8220 } else {
8221 ioa_cfg->ioa_unit_checked = 0;
8222 ipr_get_unit_check_buffer(ioa_cfg);
8223 ipr_cmd->job_step = ipr_reset_alert;
8224 ipr_reset_start_timer(ipr_cmd, 0);
8225 return IPR_RC_JOB_RETURN;
8226 }
1da177e4
LT
8227 }
8228
8229 if (ioa_cfg->in_ioa_bringdown) {
8230 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8231 } else {
8232 ipr_cmd->job_step = ipr_reset_enable_ioa;
8233
8234 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 8235 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 8236 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
8237 if (ioa_cfg->sis64)
8238 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8239 else
8240 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
8241 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8242 schedule_work(&ioa_cfg->work_q);
8243 return IPR_RC_JOB_RETURN;
8244 }
8245 }
8246
438b0331 8247 LEAVE;
1da177e4
LT
8248 return IPR_RC_JOB_CONTINUE;
8249}
8250
e619e1a7
BK
8251/**
8252 * ipr_reset_bist_done - BIST has completed on the adapter.
8253 * @ipr_cmd: ipr command struct
8254 *
8255 * Description: Unblock config space and resume the reset process.
8256 *
8257 * Return value:
8258 * IPR_RC_JOB_CONTINUE
8259 **/
8260static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8261{
fb51ccbf
JK
8262 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8263
e619e1a7 8264 ENTER;
fb51ccbf
JK
8265 if (ioa_cfg->cfg_locked)
8266 pci_cfg_access_unlock(ioa_cfg->pdev);
8267 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8268 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8269 LEAVE;
8270 return IPR_RC_JOB_CONTINUE;
8271}
8272
1da177e4
LT
8273/**
8274 * ipr_reset_start_bist - Run BIST on the adapter.
8275 * @ipr_cmd: ipr command struct
8276 *
8277 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8278 *
8279 * Return value:
8280 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8281 **/
8282static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8283{
8284 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8285 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8286
8287 ENTER;
cb237ef7
WB
8288 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8289 writel(IPR_UPROCI_SIS64_START_BIST,
8290 ioa_cfg->regs.set_uproc_interrupt_reg32);
8291 else
8292 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8293
8294 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8295 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8296 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8297 rc = IPR_RC_JOB_RETURN;
cb237ef7 8298 } else {
fb51ccbf
JK
8299 if (ioa_cfg->cfg_locked)
8300 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8301 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8302 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8303 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8304 }
8305
8306 LEAVE;
8307 return rc;
8308}
8309
463fc696
BK
8310/**
8311 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8312 * @ipr_cmd: ipr command struct
8313 *
8314 * Description: This clears PCI reset to the adapter and delays two seconds.
8315 *
8316 * Return value:
8317 * IPR_RC_JOB_RETURN
8318 **/
8319static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8320{
8321 ENTER;
8322 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8323 ipr_cmd->job_step = ipr_reset_bist_done;
8324 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8325 LEAVE;
8326 return IPR_RC_JOB_RETURN;
8327}
8328
8329/**
8330 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8331 * @ipr_cmd: ipr command struct
8332 *
8333 * Description: This asserts PCI reset to the adapter.
8334 *
8335 * Return value:
8336 * IPR_RC_JOB_RETURN
8337 **/
8338static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8339{
8340 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8341 struct pci_dev *pdev = ioa_cfg->pdev;
8342
8343 ENTER;
463fc696
BK
8344 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8345 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8346 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8347 LEAVE;
8348 return IPR_RC_JOB_RETURN;
8349}
8350
fb51ccbf
JK
8351/**
8352 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8353 * @ipr_cmd: ipr command struct
8354 *
8355 * Description: This attempts to block config access to the IOA.
8356 *
8357 * Return value:
8358 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8359 **/
8360static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8361{
8362 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8363 int rc = IPR_RC_JOB_CONTINUE;
8364
8365 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8366 ioa_cfg->cfg_locked = 1;
8367 ipr_cmd->job_step = ioa_cfg->reset;
8368 } else {
8369 if (ipr_cmd->u.time_left) {
8370 rc = IPR_RC_JOB_RETURN;
8371 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8372 ipr_reset_start_timer(ipr_cmd,
8373 IPR_CHECK_FOR_RESET_TIMEOUT);
8374 } else {
8375 ipr_cmd->job_step = ioa_cfg->reset;
8376 dev_err(&ioa_cfg->pdev->dev,
8377 "Timed out waiting to lock config access. Resetting anyway.\n");
8378 }
8379 }
8380
8381 return rc;
8382}
8383
8384/**
8385 * ipr_reset_block_config_access - Block config access to the IOA
8386 * @ipr_cmd: ipr command struct
8387 *
8388 * Description: This attempts to block config access to the IOA
8389 *
8390 * Return value:
8391 * IPR_RC_JOB_CONTINUE
8392 **/
8393static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8394{
8395 ipr_cmd->ioa_cfg->cfg_locked = 0;
8396 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8397 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8398 return IPR_RC_JOB_CONTINUE;
8399}
8400
1da177e4
LT
8401/**
8402 * ipr_reset_allowed - Query whether or not IOA can be reset
8403 * @ioa_cfg: ioa config struct
8404 *
8405 * Return value:
8406 * 0 if reset not allowed / non-zero if reset is allowed
8407 **/
8408static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8409{
8410 volatile u32 temp_reg;
8411
8412 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8413 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8414}
8415
8416/**
8417 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8418 * @ipr_cmd: ipr command struct
8419 *
8420 * Description: This function waits for adapter permission to run BIST,
8421 * then runs BIST. If the adapter does not give permission after a
8422 * reasonable time, we will reset the adapter anyway. The impact of
8423 * resetting the adapter without warning the adapter is the risk of
8424 * losing the persistent error log on the adapter. If the adapter is
8425 * reset while it is writing to the flash on the adapter, the flash
8426 * segment will have bad ECC and be zeroed.
8427 *
8428 * Return value:
8429 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8430 **/
8431static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8432{
8433 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8434 int rc = IPR_RC_JOB_RETURN;
8435
8436 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8437 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8438 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8439 } else {
fb51ccbf 8440 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8441 rc = IPR_RC_JOB_CONTINUE;
8442 }
8443
8444 return rc;
8445}
8446
8447/**
8701f185 8448 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8449 * @ipr_cmd: ipr command struct
8450 *
8451 * Description: This function alerts the adapter that it will be reset.
8452 * If memory space is not currently enabled, proceed directly
8453 * to running BIST on the adapter. The timer must always be started
8454 * so we guarantee we do not run BIST from ipr_isr.
8455 *
8456 * Return value:
8457 * IPR_RC_JOB_RETURN
8458 **/
8459static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8460{
8461 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8462 u16 cmd_reg;
8463 int rc;
8464
8465 ENTER;
8466 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8467
8468 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8469 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8470 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8471 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8472 } else {
fb51ccbf 8473 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8474 }
8475
8476 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8477 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8478
8479 LEAVE;
8480 return IPR_RC_JOB_RETURN;
8481}
8482
4fdd7c7a
BK
8483/**
8484 * ipr_reset_quiesce_done - Complete IOA disconnect
8485 * @ipr_cmd: ipr command struct
8486 *
8487 * Description: Freeze the adapter to complete quiesce processing
8488 *
8489 * Return value:
8490 * IPR_RC_JOB_CONTINUE
8491 **/
8492static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8493{
8494 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8495
8496 ENTER;
8497 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8498 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8499 LEAVE;
8500 return IPR_RC_JOB_CONTINUE;
8501}
8502
8503/**
8504 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8505 * @ipr_cmd: ipr command struct
8506 *
8507 * Description: Ensure nothing is outstanding to the IOA and
8508 * proceed with IOA disconnect. Otherwise reset the IOA.
8509 *
8510 * Return value:
8511 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8512 **/
8513static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8514{
8515 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8516 struct ipr_cmnd *loop_cmd;
8517 struct ipr_hrr_queue *hrrq;
8518 int rc = IPR_RC_JOB_CONTINUE;
8519 int count = 0;
8520
8521 ENTER;
8522 ipr_cmd->job_step = ipr_reset_quiesce_done;
8523
8524 for_each_hrrq(hrrq, ioa_cfg) {
8525 spin_lock(&hrrq->_lock);
8526 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8527 count++;
8528 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8529 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8530 rc = IPR_RC_JOB_RETURN;
8531 break;
8532 }
8533 spin_unlock(&hrrq->_lock);
8534
8535 if (count)
8536 break;
8537 }
8538
8539 LEAVE;
8540 return rc;
8541}
8542
8543/**
8544 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8545 * @ipr_cmd: ipr command struct
8546 *
8547 * Description: Cancel any oustanding HCAMs to the IOA.
8548 *
8549 * Return value:
8550 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8551 **/
8552static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8553{
8554 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8555 int rc = IPR_RC_JOB_CONTINUE;
8556 struct ipr_cmd_pkt *cmd_pkt;
8557 struct ipr_cmnd *hcam_cmd;
8558 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8559
8560 ENTER;
8561 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8562
8563 if (!hrrq->ioa_is_dead) {
8564 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8565 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8566 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8567 continue;
8568
8569 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8570 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8571 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8572 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8573 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8574 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8575 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8576 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8577 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8578 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8579 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8580 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8581 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8582 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8583
8584 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8585 IPR_CANCEL_TIMEOUT);
8586
8587 rc = IPR_RC_JOB_RETURN;
8588 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8589 break;
8590 }
8591 }
8592 } else
8593 ipr_cmd->job_step = ipr_reset_alert;
8594
8595 LEAVE;
8596 return rc;
8597}
8598
1da177e4
LT
8599/**
8600 * ipr_reset_ucode_download_done - Microcode download completion
8601 * @ipr_cmd: ipr command struct
8602 *
8603 * Description: This function unmaps the microcode download buffer.
8604 *
8605 * Return value:
8606 * IPR_RC_JOB_CONTINUE
8607 **/
8608static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8609{
8610 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8611 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8612
d73341bf 8613 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
1da177e4
LT
8614 sglist->num_sg, DMA_TO_DEVICE);
8615
8616 ipr_cmd->job_step = ipr_reset_alert;
8617 return IPR_RC_JOB_CONTINUE;
8618}
8619
8620/**
8621 * ipr_reset_ucode_download - Download microcode to the adapter
8622 * @ipr_cmd: ipr command struct
8623 *
8624 * Description: This function checks to see if it there is microcode
8625 * to download to the adapter. If there is, a download is performed.
8626 *
8627 * Return value:
8628 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8629 **/
8630static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8631{
8632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8633 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8634
8635 ENTER;
8636 ipr_cmd->job_step = ipr_reset_alert;
8637
8638 if (!sglist)
8639 return IPR_RC_JOB_CONTINUE;
8640
8641 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8642 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8643 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8644 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8645 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8646 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8647 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8648
a32c055f
WB
8649 if (ioa_cfg->sis64)
8650 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8651 else
8652 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
8653 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8654
8655 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8656 IPR_WRITE_BUFFER_TIMEOUT);
8657
8658 LEAVE;
8659 return IPR_RC_JOB_RETURN;
8660}
8661
8662/**
8663 * ipr_reset_shutdown_ioa - Shutdown the adapter
8664 * @ipr_cmd: ipr command struct
8665 *
8666 * Description: This function issues an adapter shutdown of the
8667 * specified type to the specified adapter as part of the
8668 * adapter reset job.
8669 *
8670 * Return value:
8671 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8672 **/
8673static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8674{
8675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8676 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8677 unsigned long timeout;
8678 int rc = IPR_RC_JOB_CONTINUE;
8679
8680 ENTER;
4fdd7c7a
BK
8681 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8682 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8683 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
56d6aa33 8684 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
8685 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8686 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8687 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8688 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8689
ac09c349
BK
8690 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8691 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
8692 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8693 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
8694 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8695 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 8696 else
ac09c349 8697 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
8698
8699 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8700
8701 rc = IPR_RC_JOB_RETURN;
8702 ipr_cmd->job_step = ipr_reset_ucode_download;
8703 } else
8704 ipr_cmd->job_step = ipr_reset_alert;
8705
8706 LEAVE;
8707 return rc;
8708}
8709
8710/**
8711 * ipr_reset_ioa_job - Adapter reset job
8712 * @ipr_cmd: ipr command struct
8713 *
8714 * Description: This function is the job router for the adapter reset job.
8715 *
8716 * Return value:
8717 * none
8718 **/
8719static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8720{
8721 u32 rc, ioasc;
1da177e4
LT
8722 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8723
8724 do {
96d21f00 8725 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
8726
8727 if (ioa_cfg->reset_cmd != ipr_cmd) {
8728 /*
8729 * We are doing nested adapter resets and this is
8730 * not the current reset job.
8731 */
05a6538a 8732 list_add_tail(&ipr_cmd->queue,
8733 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
8734 return;
8735 }
8736
8737 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
8738 rc = ipr_cmd->job_step_failed(ipr_cmd);
8739 if (rc == IPR_RC_JOB_RETURN)
8740 return;
1da177e4
LT
8741 }
8742
8743 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8744 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 8745 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 8746 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
8747}
8748
8749/**
8750 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8751 * @ioa_cfg: ioa config struct
8752 * @job_step: first job step of reset job
8753 * @shutdown_type: shutdown type
8754 *
8755 * Description: This function will initiate the reset of the given adapter
8756 * starting at the selected job step.
8757 * If the caller needs to wait on the completion of the reset,
8758 * the caller must sleep on the reset_wait_q.
8759 *
8760 * Return value:
8761 * none
8762 **/
8763static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8764 int (*job_step) (struct ipr_cmnd *),
8765 enum ipr_shutdown_type shutdown_type)
8766{
8767 struct ipr_cmnd *ipr_cmd;
56d6aa33 8768 int i;
1da177e4
LT
8769
8770 ioa_cfg->in_reset_reload = 1;
56d6aa33 8771 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8772 spin_lock(&ioa_cfg->hrrq[i]._lock);
8773 ioa_cfg->hrrq[i].allow_cmds = 0;
8774 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8775 }
8776 wmb();
bfae7820
BK
8777 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8778 scsi_block_requests(ioa_cfg->host);
1da177e4
LT
8779
8780 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8781 ioa_cfg->reset_cmd = ipr_cmd;
8782 ipr_cmd->job_step = job_step;
8783 ipr_cmd->u.shutdown_type = shutdown_type;
8784
8785 ipr_reset_ioa_job(ipr_cmd);
8786}
8787
8788/**
8789 * ipr_initiate_ioa_reset - Initiate an adapter reset
8790 * @ioa_cfg: ioa config struct
8791 * @shutdown_type: shutdown type
8792 *
8793 * Description: This function will initiate the reset of the given adapter.
8794 * If the caller needs to wait on the completion of the reset,
8795 * the caller must sleep on the reset_wait_q.
8796 *
8797 * Return value:
8798 * none
8799 **/
8800static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8801 enum ipr_shutdown_type shutdown_type)
8802{
56d6aa33 8803 int i;
8804
8805 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
8806 return;
8807
41e9a696
BK
8808 if (ioa_cfg->in_reset_reload) {
8809 if (ioa_cfg->sdt_state == GET_DUMP)
8810 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8811 else if (ioa_cfg->sdt_state == READ_DUMP)
8812 ioa_cfg->sdt_state = ABORT_DUMP;
8813 }
1da177e4
LT
8814
8815 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8816 dev_err(&ioa_cfg->pdev->dev,
8817 "IOA taken offline - error recovery failed\n");
8818
8819 ioa_cfg->reset_retries = 0;
56d6aa33 8820 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8821 spin_lock(&ioa_cfg->hrrq[i]._lock);
8822 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8823 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8824 }
8825 wmb();
1da177e4
LT
8826
8827 if (ioa_cfg->in_ioa_bringdown) {
8828 ioa_cfg->reset_cmd = NULL;
8829 ioa_cfg->in_reset_reload = 0;
8830 ipr_fail_all_ops(ioa_cfg);
8831 wake_up_all(&ioa_cfg->reset_wait_q);
8832
bfae7820
BK
8833 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8834 spin_unlock_irq(ioa_cfg->host->host_lock);
8835 scsi_unblock_requests(ioa_cfg->host);
8836 spin_lock_irq(ioa_cfg->host->host_lock);
8837 }
1da177e4
LT
8838 return;
8839 } else {
8840 ioa_cfg->in_ioa_bringdown = 1;
8841 shutdown_type = IPR_SHUTDOWN_NONE;
8842 }
8843 }
8844
8845 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8846 shutdown_type);
8847}
8848
f8a88b19
LV
8849/**
8850 * ipr_reset_freeze - Hold off all I/O activity
8851 * @ipr_cmd: ipr command struct
8852 *
8853 * Description: If the PCI slot is frozen, hold off all I/O
8854 * activity; then, as soon as the slot is available again,
8855 * initiate an adapter reset.
8856 */
8857static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8858{
56d6aa33 8859 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8860 int i;
8861
f8a88b19 8862 /* Disallow new interrupts, avoid loop */
56d6aa33 8863 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8864 spin_lock(&ioa_cfg->hrrq[i]._lock);
8865 ioa_cfg->hrrq[i].allow_interrupts = 0;
8866 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8867 }
8868 wmb();
05a6538a 8869 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
8870 ipr_cmd->done = ipr_reset_ioa_job;
8871 return IPR_RC_JOB_RETURN;
8872}
8873
6270e593
BK
8874/**
8875 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8876 * @pdev: PCI device struct
8877 *
8878 * Description: This routine is called to tell us that the MMIO
8879 * access to the IOA has been restored
8880 */
8881static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8882{
8883 unsigned long flags = 0;
8884 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8885
8886 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8887 if (!ioa_cfg->probe_done)
8888 pci_save_state(pdev);
8889 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8890 return PCI_ERS_RESULT_NEED_RESET;
8891}
8892
f8a88b19
LV
8893/**
8894 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8895 * @pdev: PCI device struct
8896 *
8897 * Description: This routine is called to tell us that the PCI bus
8898 * is down. Can't do anything here, except put the device driver
8899 * into a holding pattern, waiting for the PCI bus to come back.
8900 */
8901static void ipr_pci_frozen(struct pci_dev *pdev)
8902{
8903 unsigned long flags = 0;
8904 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8905
8906 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
8907 if (ioa_cfg->probe_done)
8908 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
f8a88b19
LV
8909 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8910}
8911
8912/**
8913 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8914 * @pdev: PCI device struct
8915 *
8916 * Description: This routine is called by the pci error recovery
8917 * code after the PCI slot has been reset, just before we
8918 * should resume normal operations.
8919 */
8920static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8921{
8922 unsigned long flags = 0;
8923 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8924
8925 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
8926 if (ioa_cfg->probe_done) {
8927 if (ioa_cfg->needs_warm_reset)
8928 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8929 else
8930 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8931 IPR_SHUTDOWN_NONE);
8932 } else
8933 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
8934 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8935 return PCI_ERS_RESULT_RECOVERED;
8936}
8937
8938/**
8939 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8940 * @pdev: PCI device struct
8941 *
8942 * Description: This routine is called when the PCI bus has
8943 * permanently failed.
8944 */
8945static void ipr_pci_perm_failure(struct pci_dev *pdev)
8946{
8947 unsigned long flags = 0;
8948 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 8949 int i;
f8a88b19
LV
8950
8951 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
8952 if (ioa_cfg->probe_done) {
8953 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8954 ioa_cfg->sdt_state = ABORT_DUMP;
8955 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8956 ioa_cfg->in_ioa_bringdown = 1;
8957 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8958 spin_lock(&ioa_cfg->hrrq[i]._lock);
8959 ioa_cfg->hrrq[i].allow_cmds = 0;
8960 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8961 }
8962 wmb();
8963 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8964 } else
8965 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
8966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8967}
8968
8969/**
8970 * ipr_pci_error_detected - Called when a PCI error is detected.
8971 * @pdev: PCI device struct
8972 * @state: PCI channel state
8973 *
8974 * Description: Called when a PCI error is detected.
8975 *
8976 * Return value:
8977 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8978 */
8979static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8980 pci_channel_state_t state)
8981{
8982 switch (state) {
8983 case pci_channel_io_frozen:
8984 ipr_pci_frozen(pdev);
6270e593 8985 return PCI_ERS_RESULT_CAN_RECOVER;
f8a88b19
LV
8986 case pci_channel_io_perm_failure:
8987 ipr_pci_perm_failure(pdev);
8988 return PCI_ERS_RESULT_DISCONNECT;
8989 break;
8990 default:
8991 break;
8992 }
8993 return PCI_ERS_RESULT_NEED_RESET;
8994}
8995
1da177e4
LT
8996/**
8997 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8998 * @ioa_cfg: ioa cfg struct
8999 *
9000 * Description: This is the second phase of adapter intialization
9001 * This function takes care of initilizing the adapter to the point
9002 * where it can accept new commands.
9003
9004 * Return value:
b1c11812 9005 * 0 on success / -EIO on failure
1da177e4 9006 **/
6f039790 9007static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9008{
9009 int rc = 0;
9010 unsigned long host_lock_flags = 0;
9011
9012 ENTER;
9013 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9014 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6270e593 9015 ioa_cfg->probe_done = 1;
ce155cce
BK
9016 if (ioa_cfg->needs_hard_reset) {
9017 ioa_cfg->needs_hard_reset = 0;
9018 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9019 } else
9020 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9021 IPR_SHUTDOWN_NONE);
1da177e4 9022 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
1da177e4
LT
9023
9024 LEAVE;
9025 return rc;
9026}
9027
9028/**
9029 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9030 * @ioa_cfg: ioa config struct
9031 *
9032 * Return value:
9033 * none
9034 **/
9035static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9036{
9037 int i;
9038
9039 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9040 if (ioa_cfg->ipr_cmnd_list[i])
d73341bf 9041 dma_pool_free(ioa_cfg->ipr_cmd_pool,
1da177e4
LT
9042 ioa_cfg->ipr_cmnd_list[i],
9043 ioa_cfg->ipr_cmnd_list_dma[i]);
9044
9045 ioa_cfg->ipr_cmnd_list[i] = NULL;
9046 }
9047
9048 if (ioa_cfg->ipr_cmd_pool)
d73341bf 9049 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 9050
89aad428
BK
9051 kfree(ioa_cfg->ipr_cmnd_list);
9052 kfree(ioa_cfg->ipr_cmnd_list_dma);
9053 ioa_cfg->ipr_cmnd_list = NULL;
9054 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
9055 ioa_cfg->ipr_cmd_pool = NULL;
9056}
9057
9058/**
9059 * ipr_free_mem - Frees memory allocated for an adapter
9060 * @ioa_cfg: ioa cfg struct
9061 *
9062 * Return value:
9063 * nothing
9064 **/
9065static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9066{
9067 int i;
9068
9069 kfree(ioa_cfg->res_entries);
d73341bf
AB
9070 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9071 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4 9072 ipr_free_cmd_blks(ioa_cfg);
05a6538a 9073
9074 for (i = 0; i < ioa_cfg->hrrq_num; i++)
d73341bf
AB
9075 dma_free_coherent(&ioa_cfg->pdev->dev,
9076 sizeof(u32) * ioa_cfg->hrrq[i].size,
9077 ioa_cfg->hrrq[i].host_rrq,
9078 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9079
d73341bf
AB
9080 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9081 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4
LT
9082
9083 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
9084 dma_free_coherent(&ioa_cfg->pdev->dev,
9085 sizeof(struct ipr_hostrcb),
9086 ioa_cfg->hostrcb[i],
9087 ioa_cfg->hostrcb_dma[i]);
1da177e4
LT
9088 }
9089
9090 ipr_free_dump(ioa_cfg);
1da177e4
LT
9091 kfree(ioa_cfg->trace);
9092}
9093
9094/**
9095 * ipr_free_all_resources - Free all allocated resources for an adapter.
9096 * @ipr_cmd: ipr command struct
9097 *
9098 * This function frees all allocated resources for the
9099 * specified adapter.
9100 *
9101 * Return value:
9102 * none
9103 **/
9104static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9105{
9106 struct pci_dev *pdev = ioa_cfg->pdev;
9107
9108 ENTER;
05a6538a 9109 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9110 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9111 int i;
9112 for (i = 0; i < ioa_cfg->nvectors; i++)
9113 free_irq(ioa_cfg->vectors_info[i].vec,
9114 &ioa_cfg->hrrq[i]);
9115 } else
9116 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9117
56d6aa33 9118 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
05a6538a 9119 pci_disable_msi(pdev);
56d6aa33 9120 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9121 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
05a6538a 9122 pci_disable_msix(pdev);
56d6aa33 9123 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9124 }
05a6538a 9125
1da177e4
LT
9126 iounmap(ioa_cfg->hdw_dma_regs);
9127 pci_release_regions(pdev);
9128 ipr_free_mem(ioa_cfg);
9129 scsi_host_put(ioa_cfg->host);
9130 pci_disable_device(pdev);
9131 LEAVE;
9132}
9133
9134/**
9135 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9136 * @ioa_cfg: ioa config struct
9137 *
9138 * Return value:
9139 * 0 on success / -ENOMEM on allocation failure
9140 **/
6f039790 9141static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9142{
9143 struct ipr_cmnd *ipr_cmd;
9144 struct ipr_ioarcb *ioarcb;
9145 dma_addr_t dma_addr;
05a6538a 9146 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 9147
d73341bf 9148 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
203fa3fe 9149 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
9150
9151 if (!ioa_cfg->ipr_cmd_pool)
9152 return -ENOMEM;
9153
89aad428
BK
9154 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9155 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9156
9157 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9158 ipr_free_cmd_blks(ioa_cfg);
9159 return -ENOMEM;
9160 }
9161
05a6538a 9162 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9163 if (ioa_cfg->hrrq_num > 1) {
9164 if (i == 0) {
9165 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9166 ioa_cfg->hrrq[i].min_cmd_id = 0;
9167 ioa_cfg->hrrq[i].max_cmd_id =
9168 (entries_each_hrrq - 1);
9169 } else {
9170 entries_each_hrrq =
9171 IPR_NUM_BASE_CMD_BLKS/
9172 (ioa_cfg->hrrq_num - 1);
9173 ioa_cfg->hrrq[i].min_cmd_id =
9174 IPR_NUM_INTERNAL_CMD_BLKS +
9175 (i - 1) * entries_each_hrrq;
9176 ioa_cfg->hrrq[i].max_cmd_id =
9177 (IPR_NUM_INTERNAL_CMD_BLKS +
9178 i * entries_each_hrrq - 1);
9179 }
9180 } else {
9181 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9182 ioa_cfg->hrrq[i].min_cmd_id = 0;
9183 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9184 }
9185 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9186 }
9187
9188 BUG_ON(ioa_cfg->hrrq_num == 0);
9189
9190 i = IPR_NUM_CMD_BLKS -
9191 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9192 if (i > 0) {
9193 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9194 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9195 }
9196
1da177e4 9197 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
d73341bf 9198 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
9199
9200 if (!ipr_cmd) {
9201 ipr_free_cmd_blks(ioa_cfg);
9202 return -ENOMEM;
9203 }
9204
9205 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9206 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9207 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9208
9209 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
9210 ipr_cmd->dma_addr = dma_addr;
9211 if (ioa_cfg->sis64)
9212 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9213 else
9214 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9215
1da177e4 9216 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
9217 if (ioa_cfg->sis64) {
9218 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9219 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9220 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 9221 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
9222 } else {
9223 ioarcb->write_ioadl_addr =
9224 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9225 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9226 ioarcb->ioasa_host_pci_addr =
96d21f00 9227 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 9228 }
1da177e4
LT
9229 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9230 ipr_cmd->cmd_index = i;
9231 ipr_cmd->ioa_cfg = ioa_cfg;
9232 ipr_cmd->sense_buffer_dma = dma_addr +
9233 offsetof(struct ipr_cmnd, sense_buffer);
9234
05a6538a 9235 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9236 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9237 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9238 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9239 hrrq_id++;
1da177e4
LT
9240 }
9241
9242 return 0;
9243}
9244
9245/**
9246 * ipr_alloc_mem - Allocate memory for an adapter
9247 * @ioa_cfg: ioa config struct
9248 *
9249 * Return value:
9250 * 0 on success / non-zero for error
9251 **/
6f039790 9252static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9253{
9254 struct pci_dev *pdev = ioa_cfg->pdev;
9255 int i, rc = -ENOMEM;
9256
9257 ENTER;
0bc42e35 9258 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 9259 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
9260
9261 if (!ioa_cfg->res_entries)
9262 goto out;
9263
3e7ebdfa 9264 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 9265 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
9266 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9267 }
1da177e4 9268
d73341bf
AB
9269 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9270 sizeof(struct ipr_misc_cbs),
9271 &ioa_cfg->vpd_cbs_dma,
9272 GFP_KERNEL);
1da177e4
LT
9273
9274 if (!ioa_cfg->vpd_cbs)
9275 goto out_free_res_entries;
9276
9277 if (ipr_alloc_cmd_blks(ioa_cfg))
9278 goto out_free_vpd_cbs;
9279
05a6538a 9280 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf 9281 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
05a6538a 9282 sizeof(u32) * ioa_cfg->hrrq[i].size,
d73341bf
AB
9283 &ioa_cfg->hrrq[i].host_rrq_dma,
9284 GFP_KERNEL);
05a6538a 9285
9286 if (!ioa_cfg->hrrq[i].host_rrq) {
9287 while (--i > 0)
d73341bf 9288 dma_free_coherent(&pdev->dev,
05a6538a 9289 sizeof(u32) * ioa_cfg->hrrq[i].size,
9290 ioa_cfg->hrrq[i].host_rrq,
9291 ioa_cfg->hrrq[i].host_rrq_dma);
9292 goto out_ipr_free_cmd_blocks;
9293 }
9294 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9295 }
1da177e4 9296
d73341bf
AB
9297 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9298 ioa_cfg->cfg_table_size,
9299 &ioa_cfg->cfg_table_dma,
9300 GFP_KERNEL);
1da177e4 9301
3e7ebdfa 9302 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
9303 goto out_free_host_rrq;
9304
9305 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
9306 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9307 sizeof(struct ipr_hostrcb),
9308 &ioa_cfg->hostrcb_dma[i],
9309 GFP_KERNEL);
1da177e4
LT
9310
9311 if (!ioa_cfg->hostrcb[i])
9312 goto out_free_hostrcb_dma;
9313
9314 ioa_cfg->hostrcb[i]->hostrcb_dma =
9315 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 9316 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
9317 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9318 }
9319
0bc42e35 9320 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
9321 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9322
9323 if (!ioa_cfg->trace)
9324 goto out_free_hostrcb_dma;
9325
1da177e4
LT
9326 rc = 0;
9327out:
9328 LEAVE;
9329 return rc;
9330
9331out_free_hostrcb_dma:
9332 while (i-- > 0) {
d73341bf
AB
9333 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9334 ioa_cfg->hostrcb[i],
9335 ioa_cfg->hostrcb_dma[i]);
1da177e4 9336 }
d73341bf
AB
9337 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9338 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9339out_free_host_rrq:
05a6538a 9340 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf
AB
9341 dma_free_coherent(&pdev->dev,
9342 sizeof(u32) * ioa_cfg->hrrq[i].size,
9343 ioa_cfg->hrrq[i].host_rrq,
9344 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9345 }
1da177e4
LT
9346out_ipr_free_cmd_blocks:
9347 ipr_free_cmd_blks(ioa_cfg);
9348out_free_vpd_cbs:
d73341bf
AB
9349 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9350 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4
LT
9351out_free_res_entries:
9352 kfree(ioa_cfg->res_entries);
9353 goto out;
9354}
9355
9356/**
9357 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9358 * @ioa_cfg: ioa config struct
9359 *
9360 * Return value:
9361 * none
9362 **/
6f039790 9363static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9364{
9365 int i;
9366
9367 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9368 ioa_cfg->bus_attr[i].bus = i;
9369 ioa_cfg->bus_attr[i].qas_enabled = 0;
9370 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9371 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9372 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9373 else
9374 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9375 }
9376}
9377
6270e593
BK
9378/**
9379 * ipr_init_regs - Initialize IOA registers
9380 * @ioa_cfg: ioa config struct
9381 *
9382 * Return value:
9383 * none
9384 **/
9385static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9386{
9387 const struct ipr_interrupt_offsets *p;
9388 struct ipr_interrupts *t;
9389 void __iomem *base;
9390
9391 p = &ioa_cfg->chip_cfg->regs;
9392 t = &ioa_cfg->regs;
9393 base = ioa_cfg->hdw_dma_regs;
9394
9395 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9396 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9397 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9398 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9399 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9400 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9401 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9402 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9403 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9404 t->ioarrin_reg = base + p->ioarrin_reg;
9405 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9406 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9407 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9408 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9409 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9410 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9411
9412 if (ioa_cfg->sis64) {
9413 t->init_feedback_reg = base + p->init_feedback_reg;
9414 t->dump_addr_reg = base + p->dump_addr_reg;
9415 t->dump_data_reg = base + p->dump_data_reg;
9416 t->endian_swap_reg = base + p->endian_swap_reg;
9417 }
9418}
9419
1da177e4
LT
9420/**
9421 * ipr_init_ioa_cfg - Initialize IOA config struct
9422 * @ioa_cfg: ioa config struct
9423 * @host: scsi host struct
9424 * @pdev: PCI dev struct
9425 *
9426 * Return value:
9427 * none
9428 **/
6f039790
GKH
9429static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9430 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4 9431{
6270e593 9432 int i;
1da177e4
LT
9433
9434 ioa_cfg->host = host;
9435 ioa_cfg->pdev = pdev;
9436 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9437 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9438 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9439 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9440 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9441 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9442 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9443 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9444
1da177e4
LT
9445 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9446 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9447 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9448 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9449 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 9450 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9451 init_waitqueue_head(&ioa_cfg->msi_wait_q);
6270e593 9452 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1da177e4
LT
9453 ioa_cfg->sdt_state = INACTIVE;
9454
9455 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9456 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9457
3e7ebdfa
WB
9458 if (ioa_cfg->sis64) {
9459 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9460 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9461 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9462 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
6270e593
BK
9463 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9464 + ((sizeof(struct ipr_config_table_entry64)
9465 * ioa_cfg->max_devs_supported)));
3e7ebdfa
WB
9466 } else {
9467 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9468 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9469 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9470 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
6270e593
BK
9471 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9472 + ((sizeof(struct ipr_config_table_entry)
9473 * ioa_cfg->max_devs_supported)));
3e7ebdfa 9474 }
6270e593 9475
f688f96d 9476 host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9477 host->unique_id = host->host_no;
9478 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9479 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9480 pci_set_drvdata(pdev, ioa_cfg);
9481
6270e593
BK
9482 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9483 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9484 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9485 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9486 if (i == 0)
9487 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9488 else
9489 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
dcbad00e 9490 }
1da177e4
LT
9491}
9492
9493/**
1be7bd82 9494 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
9495 * @dev_id: PCI device id struct
9496 *
9497 * Return value:
1be7bd82 9498 * ptr to chip information on success / NULL on failure
1da177e4 9499 **/
6f039790 9500static const struct ipr_chip_t *
1be7bd82 9501ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9502{
9503 int i;
9504
1da177e4
LT
9505 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9506 if (ipr_chip[i].vendor == dev_id->vendor &&
9507 ipr_chip[i].device == dev_id->device)
1be7bd82 9508 return &ipr_chip[i];
1da177e4
LT
9509 return NULL;
9510}
9511
6270e593
BK
9512/**
9513 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9514 * during probe time
9515 * @ioa_cfg: ioa config struct
9516 *
9517 * Return value:
9518 * None
9519 **/
9520static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9521{
9522 struct pci_dev *pdev = ioa_cfg->pdev;
9523
9524 if (pci_channel_offline(pdev)) {
9525 wait_event_timeout(ioa_cfg->eeh_wait_q,
9526 !pci_channel_offline(pdev),
9527 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9528 pci_restore_state(pdev);
9529 }
9530}
9531
05a6538a 9532static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9533{
9534 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
60e76b77 9535 int i, vectors;
05a6538a 9536
9537 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9538 entries[i].entry = i;
9539
60e76b77
AG
9540 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9541 entries, 1, ipr_number_of_msix);
9542 if (vectors < 0) {
6270e593 9543 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9544 return vectors;
05a6538a 9545 }
9546
60e76b77
AG
9547 for (i = 0; i < vectors; i++)
9548 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9549 ioa_cfg->nvectors = vectors;
05a6538a 9550
60e76b77 9551 return 0;
05a6538a 9552}
9553
9554static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9555{
60e76b77 9556 int i, vectors;
05a6538a 9557
60e76b77
AG
9558 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9559 if (vectors < 0) {
6270e593 9560 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9561 return vectors;
05a6538a 9562 }
9563
60e76b77
AG
9564 for (i = 0; i < vectors; i++)
9565 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9566 ioa_cfg->nvectors = vectors;
05a6538a 9567
60e76b77 9568 return 0;
05a6538a 9569}
9570
9571static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9572{
9573 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9574
9575 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9576 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9577 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9578 ioa_cfg->vectors_info[vec_idx].
9579 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9580 }
9581}
9582
9583static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9584{
9585 int i, rc;
9586
9587 for (i = 1; i < ioa_cfg->nvectors; i++) {
9588 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9589 ipr_isr_mhrrq,
9590 0,
9591 ioa_cfg->vectors_info[i].desc,
9592 &ioa_cfg->hrrq[i]);
9593 if (rc) {
9594 while (--i >= 0)
9595 free_irq(ioa_cfg->vectors_info[i].vec,
9596 &ioa_cfg->hrrq[i]);
9597 return rc;
9598 }
9599 }
9600 return 0;
9601}
9602
95fecd90
WB
9603/**
9604 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9605 * @pdev: PCI device struct
9606 *
9607 * Description: Simply set the msi_received flag to 1 indicating that
9608 * Message Signaled Interrupts are supported.
9609 *
9610 * Return value:
9611 * 0 on success / non-zero on failure
9612 **/
6f039790 9613static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
9614{
9615 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9616 unsigned long lock_flags = 0;
9617 irqreturn_t rc = IRQ_HANDLED;
9618
05a6538a 9619 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
9620 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9621
9622 ioa_cfg->msi_received = 1;
9623 wake_up(&ioa_cfg->msi_wait_q);
9624
9625 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9626 return rc;
9627}
9628
9629/**
9630 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9631 * @pdev: PCI device struct
9632 *
60e76b77 9633 * Description: The return value from pci_enable_msi_range() can not always be
95fecd90
WB
9634 * trusted. This routine sets up and initiates a test interrupt to determine
9635 * if the interrupt is received via the ipr_test_intr() service routine.
9636 * If the tests fails, the driver will fall back to LSI.
9637 *
9638 * Return value:
9639 * 0 on success / non-zero on failure
9640 **/
6f039790 9641static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
9642{
9643 int rc;
9644 volatile u32 int_reg;
9645 unsigned long lock_flags = 0;
9646
9647 ENTER;
9648
9649 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9650 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9651 ioa_cfg->msi_received = 0;
9652 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 9653 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
9654 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9656
f19799f4 9657 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9658 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9659 else
9660 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
95fecd90
WB
9661 if (rc) {
9662 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9663 return rc;
9664 } else if (ipr_debug)
9665 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9666
214777ba 9667 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
9668 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9669 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 9670 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
9671 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9672
95fecd90
WB
9673 if (!ioa_cfg->msi_received) {
9674 /* MSI test failed */
9675 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9676 rc = -EOPNOTSUPP;
9677 } else if (ipr_debug)
9678 dev_info(&pdev->dev, "MSI test succeeded.\n");
9679
9680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9681
f19799f4 9682 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9683 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9684 else
9685 free_irq(pdev->irq, ioa_cfg);
95fecd90
WB
9686
9687 LEAVE;
9688
9689 return rc;
9690}
9691
05a6538a 9692 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
9693 * @pdev: PCI device struct
9694 * @dev_id: PCI device id struct
9695 *
9696 * Return value:
9697 * 0 on success / non-zero on failure
9698 **/
6f039790
GKH
9699static int ipr_probe_ioa(struct pci_dev *pdev,
9700 const struct pci_device_id *dev_id)
1da177e4
LT
9701{
9702 struct ipr_ioa_cfg *ioa_cfg;
9703 struct Scsi_Host *host;
9704 unsigned long ipr_regs_pci;
9705 void __iomem *ipr_regs;
a2a65a3e 9706 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 9707 volatile u32 mask, uproc, interrupts;
feccada9 9708 unsigned long lock_flags, driver_lock_flags;
1da177e4
LT
9709
9710 ENTER;
9711
1da177e4 9712 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
1da177e4
LT
9713 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9714
9715 if (!host) {
9716 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9717 rc = -ENOMEM;
6270e593 9718 goto out;
1da177e4
LT
9719 }
9720
9721 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9722 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 9723 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 9724
1be7bd82 9725 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 9726
1be7bd82 9727 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
9728 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9729 dev_id->vendor, dev_id->device);
9730 goto out_scsi_host_put;
9731 }
9732
a32c055f
WB
9733 /* set SIS 32 or SIS 64 */
9734 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 9735 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 9736 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 9737 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 9738
5469cb5b
BK
9739 if (ipr_transop_timeout)
9740 ioa_cfg->transop_timeout = ipr_transop_timeout;
9741 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9742 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9743 else
9744 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9745
44c10138 9746 ioa_cfg->revid = pdev->revision;
463fc696 9747
6270e593
BK
9748 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9749
1da177e4
LT
9750 ipr_regs_pci = pci_resource_start(pdev, 0);
9751
9752 rc = pci_request_regions(pdev, IPR_NAME);
9753 if (rc < 0) {
9754 dev_err(&pdev->dev,
9755 "Couldn't register memory range of registers\n");
9756 goto out_scsi_host_put;
9757 }
9758
6270e593
BK
9759 rc = pci_enable_device(pdev);
9760
9761 if (rc || pci_channel_offline(pdev)) {
9762 if (pci_channel_offline(pdev)) {
9763 ipr_wait_for_pci_err_recovery(ioa_cfg);
9764 rc = pci_enable_device(pdev);
9765 }
9766
9767 if (rc) {
9768 dev_err(&pdev->dev, "Cannot enable adapter\n");
9769 ipr_wait_for_pci_err_recovery(ioa_cfg);
9770 goto out_release_regions;
9771 }
9772 }
9773
25729a7f 9774 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
9775
9776 if (!ipr_regs) {
9777 dev_err(&pdev->dev,
9778 "Couldn't map memory range of registers\n");
9779 rc = -ENOMEM;
6270e593 9780 goto out_disable;
1da177e4
LT
9781 }
9782
9783 ioa_cfg->hdw_dma_regs = ipr_regs;
9784 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9785 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9786
6270e593 9787 ipr_init_regs(ioa_cfg);
1da177e4 9788
a32c055f 9789 if (ioa_cfg->sis64) {
869404cb 9790 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
a32c055f 9791 if (rc < 0) {
869404cb
AB
9792 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9793 rc = dma_set_mask_and_coherent(&pdev->dev,
9794 DMA_BIT_MASK(32));
a32c055f 9795 }
a32c055f 9796 } else
869404cb 9797 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
a32c055f 9798
1da177e4 9799 if (rc < 0) {
869404cb 9800 dev_err(&pdev->dev, "Failed to set DMA mask\n");
1da177e4
LT
9801 goto cleanup_nomem;
9802 }
9803
9804 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9805 ioa_cfg->chip_cfg->cache_line_size);
9806
9807 if (rc != PCIBIOS_SUCCESSFUL) {
9808 dev_err(&pdev->dev, "Write of cache line size failed\n");
6270e593 9809 ipr_wait_for_pci_err_recovery(ioa_cfg);
1da177e4
LT
9810 rc = -EIO;
9811 goto cleanup_nomem;
9812 }
9813
6270e593
BK
9814 /* Issue MMIO read to ensure card is not in EEH */
9815 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9816 ipr_wait_for_pci_err_recovery(ioa_cfg);
9817
05a6538a 9818 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9819 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9820 IPR_MAX_MSIX_VECTORS);
9821 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9822 }
9823
9824 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9825 ipr_enable_msix(ioa_cfg) == 0)
05a6538a 9826 ioa_cfg->intr_flag = IPR_USE_MSIX;
9827 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9828 ipr_enable_msi(ioa_cfg) == 0)
05a6538a 9829 ioa_cfg->intr_flag = IPR_USE_MSI;
9830 else {
9831 ioa_cfg->intr_flag = IPR_USE_LSI;
9832 ioa_cfg->nvectors = 1;
9833 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9834 }
9835
6270e593
BK
9836 pci_set_master(pdev);
9837
9838 if (pci_channel_offline(pdev)) {
9839 ipr_wait_for_pci_err_recovery(ioa_cfg);
9840 pci_set_master(pdev);
9841 if (pci_channel_offline(pdev)) {
9842 rc = -EIO;
9843 goto out_msi_disable;
9844 }
9845 }
9846
05a6538a 9847 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9848 ioa_cfg->intr_flag == IPR_USE_MSIX) {
95fecd90 9849 rc = ipr_test_msi(ioa_cfg, pdev);
05a6538a 9850 if (rc == -EOPNOTSUPP) {
6270e593 9851 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 9852 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9853 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9854 pci_disable_msi(pdev);
9855 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9856 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9857 pci_disable_msix(pdev);
9858 }
9859
9860 ioa_cfg->intr_flag = IPR_USE_LSI;
9861 ioa_cfg->nvectors = 1;
9862 }
95fecd90
WB
9863 else if (rc)
9864 goto out_msi_disable;
05a6538a 9865 else {
9866 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9867 dev_info(&pdev->dev,
9868 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9869 ioa_cfg->nvectors, pdev->irq);
9870 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9871 dev_info(&pdev->dev,
9872 "Request for %d MSIXs succeeded.",
9873 ioa_cfg->nvectors);
9874 }
9875 }
9876
9877 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9878 (unsigned int)num_online_cpus(),
9879 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 9880
1da177e4 9881 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 9882 goto out_msi_disable;
1da177e4
LT
9883
9884 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 9885 goto out_msi_disable;
1da177e4
LT
9886
9887 rc = ipr_alloc_mem(ioa_cfg);
9888 if (rc < 0) {
9889 dev_err(&pdev->dev,
9890 "Couldn't allocate enough memory for device driver!\n");
f170c684 9891 goto out_msi_disable;
1da177e4
LT
9892 }
9893
6270e593
BK
9894 /* Save away PCI config space for use following IOA reset */
9895 rc = pci_save_state(pdev);
9896
9897 if (rc != PCIBIOS_SUCCESSFUL) {
9898 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9899 rc = -EIO;
9900 goto cleanup_nolog;
9901 }
9902
ce155cce
BK
9903 /*
9904 * If HRRQ updated interrupt is not masked, or reset alert is set,
9905 * the card is in an unknown state and needs a hard reset
9906 */
214777ba
WB
9907 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9908 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9909 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
9910 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9911 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 9912 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
9913 ioa_cfg->needs_hard_reset = 1;
9914 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9915 ioa_cfg->ioa_unit_checked = 1;
ce155cce 9916
56d6aa33 9917 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9918 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 9919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9920
05a6538a 9921 if (ioa_cfg->intr_flag == IPR_USE_MSI
9922 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9923 name_msi_vectors(ioa_cfg);
9924 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9925 0,
9926 ioa_cfg->vectors_info[0].desc,
9927 &ioa_cfg->hrrq[0]);
9928 if (!rc)
9929 rc = ipr_request_other_msi_irqs(ioa_cfg);
9930 } else {
9931 rc = request_irq(pdev->irq, ipr_isr,
9932 IRQF_SHARED,
9933 IPR_NAME, &ioa_cfg->hrrq[0]);
9934 }
1da177e4
LT
9935 if (rc) {
9936 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9937 pdev->irq, rc);
9938 goto cleanup_nolog;
9939 }
9940
463fc696
BK
9941 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9942 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9943 ioa_cfg->needs_warm_reset = 1;
9944 ioa_cfg->reset = ipr_reset_slot_reset;
9945 } else
9946 ioa_cfg->reset = ipr_reset_start_bist;
9947
feccada9 9948 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 9949 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
feccada9 9950 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
9951
9952 LEAVE;
9953out:
9954 return rc;
9955
9956cleanup_nolog:
9957 ipr_free_mem(ioa_cfg);
95fecd90 9958out_msi_disable:
6270e593 9959 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 9960 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9961 pci_disable_msi(pdev);
9962 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9963 pci_disable_msix(pdev);
f170c684
JL
9964cleanup_nomem:
9965 iounmap(ipr_regs);
6270e593
BK
9966out_disable:
9967 pci_disable_device(pdev);
1da177e4
LT
9968out_release_regions:
9969 pci_release_regions(pdev);
9970out_scsi_host_put:
9971 scsi_host_put(host);
1da177e4
LT
9972 goto out;
9973}
9974
1da177e4
LT
9975/**
9976 * ipr_initiate_ioa_bringdown - Bring down an adapter
9977 * @ioa_cfg: ioa config struct
9978 * @shutdown_type: shutdown type
9979 *
9980 * Description: This function will initiate bringing down the adapter.
9981 * This consists of issuing an IOA shutdown to the adapter
9982 * to flush the cache, and running BIST.
9983 * If the caller needs to wait on the completion of the reset,
9984 * the caller must sleep on the reset_wait_q.
9985 *
9986 * Return value:
9987 * none
9988 **/
9989static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9990 enum ipr_shutdown_type shutdown_type)
9991{
9992 ENTER;
9993 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9994 ioa_cfg->sdt_state = ABORT_DUMP;
9995 ioa_cfg->reset_retries = 0;
9996 ioa_cfg->in_ioa_bringdown = 1;
9997 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9998 LEAVE;
9999}
10000
10001/**
10002 * __ipr_remove - Remove a single adapter
10003 * @pdev: pci device struct
10004 *
10005 * Adapter hot plug remove entry point.
10006 *
10007 * Return value:
10008 * none
10009 **/
10010static void __ipr_remove(struct pci_dev *pdev)
10011{
10012 unsigned long host_lock_flags = 0;
10013 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
bfae7820 10014 int i;
feccada9 10015 unsigned long driver_lock_flags;
1da177e4
LT
10016 ENTER;
10017
10018 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 10019 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10020 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10021 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10022 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10023 }
10024
bfae7820
BK
10025 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10026 spin_lock(&ioa_cfg->hrrq[i]._lock);
10027 ioa_cfg->hrrq[i].removing_ioa = 1;
10028 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10029 }
10030 wmb();
1da177e4
LT
10031 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10032
10033 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10034 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 10035 flush_work(&ioa_cfg->work_q);
9077a944 10036 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1da177e4
LT
10037 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10038
feccada9 10039 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10040 list_del(&ioa_cfg->queue);
feccada9 10041 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10042
10043 if (ioa_cfg->sdt_state == ABORT_DUMP)
10044 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10045 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10046
10047 ipr_free_all_resources(ioa_cfg);
10048
10049 LEAVE;
10050}
10051
10052/**
10053 * ipr_remove - IOA hot plug remove entry point
10054 * @pdev: pci device struct
10055 *
10056 * Adapter hot plug remove entry point.
10057 *
10058 * Return value:
10059 * none
10060 **/
6f039790 10061static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
10062{
10063 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10064
10065 ENTER;
10066
ee959b00 10067 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 10068 &ipr_trace_attr);
ee959b00 10069 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10070 &ipr_dump_attr);
10071 scsi_remove_host(ioa_cfg->host);
10072
10073 __ipr_remove(pdev);
10074
10075 LEAVE;
10076}
10077
10078/**
10079 * ipr_probe - Adapter hot plug add entry point
10080 *
10081 * Return value:
10082 * 0 on success / non-zero on failure
10083 **/
6f039790 10084static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
10085{
10086 struct ipr_ioa_cfg *ioa_cfg;
b53d124a 10087 int rc, i;
1da177e4
LT
10088
10089 rc = ipr_probe_ioa(pdev, dev_id);
10090
10091 if (rc)
10092 return rc;
10093
10094 ioa_cfg = pci_get_drvdata(pdev);
10095 rc = ipr_probe_ioa_part2(ioa_cfg);
10096
10097 if (rc) {
10098 __ipr_remove(pdev);
10099 return rc;
10100 }
10101
10102 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10103
10104 if (rc) {
10105 __ipr_remove(pdev);
10106 return rc;
10107 }
10108
ee959b00 10109 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10110 &ipr_trace_attr);
10111
10112 if (rc) {
10113 scsi_remove_host(ioa_cfg->host);
10114 __ipr_remove(pdev);
10115 return rc;
10116 }
10117
ee959b00 10118 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10119 &ipr_dump_attr);
10120
10121 if (rc) {
ee959b00 10122 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10123 &ipr_trace_attr);
10124 scsi_remove_host(ioa_cfg->host);
10125 __ipr_remove(pdev);
10126 return rc;
10127 }
10128
10129 scsi_scan_host(ioa_cfg->host);
b53d124a 10130 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10131
89f8b33c 10132 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10133 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10134 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10135 ioa_cfg->iopoll_weight, ipr_iopoll);
10136 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10137 }
10138 }
10139
1da177e4
LT
10140 schedule_work(&ioa_cfg->work_q);
10141 return 0;
10142}
10143
10144/**
10145 * ipr_shutdown - Shutdown handler.
d18c3db5 10146 * @pdev: pci device struct
1da177e4
LT
10147 *
10148 * This function is invoked upon system shutdown/reboot. It will issue
10149 * an adapter shutdown to the adapter to flush the write cache.
10150 *
10151 * Return value:
10152 * none
10153 **/
d18c3db5 10154static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 10155{
d18c3db5 10156 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4 10157 unsigned long lock_flags = 0;
4fdd7c7a 10158 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
b53d124a 10159 int i;
1da177e4
LT
10160
10161 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
89f8b33c 10162 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10163 ioa_cfg->iopoll_weight = 0;
10164 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10165 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10166 }
10167
203fa3fe 10168 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10170 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10171 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10172 }
10173
4fdd7c7a
BK
10174 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10175 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10176
10177 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
1da177e4
LT
10178 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10179 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4fdd7c7a
BK
10180 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10181 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
10182 ioa_cfg->intr_flag == IPR_USE_MSIX) {
10183 int i;
10184 for (i = 0; i < ioa_cfg->nvectors; i++)
10185 free_irq(ioa_cfg->vectors_info[i].vec,
10186 &ioa_cfg->hrrq[i]);
10187 }
10188
10189 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
10190 pci_disable_msi(ioa_cfg->pdev);
10191 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
10192 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
10193 pci_disable_msix(ioa_cfg->pdev);
10194 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
10195 }
10196
10197 pci_disable_device(ioa_cfg->pdev);
10198 }
1da177e4
LT
10199}
10200
6f039790 10201static struct pci_device_id ipr_pci_table[] = {
1da177e4 10202 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10203 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 10204 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10205 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 10206 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10207 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 10208 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10209 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 10210 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10211 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 10212 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10213 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 10214 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10215 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 10216 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
10217 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10218 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10219 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 10220 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10221 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
10222 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10223 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10224 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
10225 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10226 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10227 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 10228 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10229 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
10230 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10231 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 10232 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
10233 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10234 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 10235 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
10236 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10237 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
10238 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10239 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
10240 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10241 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 10242 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 10243 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 10244 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 10245 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 10246 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 10247 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 10248 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 10249 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10250 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10251 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10252 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10253 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10254 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
10255 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10256 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10257 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10258 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10259 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10260 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 10261 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10262 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
10263 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10264 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
10265 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10266 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 10267 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10268 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 10269 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10270 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 10271 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10272 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
10273 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10274 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10275 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10276 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 10277 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10278 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10279 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10280 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10281 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10282 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10283 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10284 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
43c5fdaf 10285 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10286 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10287 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
f94d9964
WX
10288 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10289 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
43c5fdaf 10290 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10291 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10292 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10293 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10294 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10295 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10296 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10297 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10298 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10299 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10300 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
5eeac3e9
WX
10301 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10302 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10303 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10304 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10305 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10306 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
1da177e4
LT
10307 { }
10308};
10309MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10310
a55b2d21 10311static const struct pci_error_handlers ipr_err_handler = {
f8a88b19 10312 .error_detected = ipr_pci_error_detected,
6270e593 10313 .mmio_enabled = ipr_pci_mmio_enabled,
f8a88b19
LV
10314 .slot_reset = ipr_pci_slot_reset,
10315};
10316
1da177e4
LT
10317static struct pci_driver ipr_driver = {
10318 .name = IPR_NAME,
10319 .id_table = ipr_pci_table,
10320 .probe = ipr_probe,
6f039790 10321 .remove = ipr_remove,
d18c3db5 10322 .shutdown = ipr_shutdown,
f8a88b19 10323 .err_handler = &ipr_err_handler,
1da177e4
LT
10324};
10325
f72919ec
WB
10326/**
10327 * ipr_halt_done - Shutdown prepare completion
10328 *
10329 * Return value:
10330 * none
10331 **/
10332static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10333{
05a6538a 10334 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
10335}
10336
10337/**
10338 * ipr_halt - Issue shutdown prepare to all adapters
10339 *
10340 * Return value:
10341 * NOTIFY_OK on success / NOTIFY_DONE on failure
10342 **/
10343static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10344{
10345 struct ipr_cmnd *ipr_cmd;
10346 struct ipr_ioa_cfg *ioa_cfg;
feccada9 10347 unsigned long flags = 0, driver_lock_flags;
f72919ec
WB
10348
10349 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10350 return NOTIFY_DONE;
10351
feccada9 10352 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10353
10354 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10355 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4fdd7c7a
BK
10356 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10357 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
f72919ec
WB
10358 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10359 continue;
10360 }
10361
10362 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10363 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10364 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10365 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10366 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10367
10368 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10370 }
feccada9 10371 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10372
10373 return NOTIFY_OK;
10374}
10375
10376static struct notifier_block ipr_notifier = {
10377 ipr_halt, NULL, 0
10378};
10379
1da177e4
LT
10380/**
10381 * ipr_init - Module entry point
10382 *
10383 * Return value:
10384 * 0 on success / negative value on failure
10385 **/
10386static int __init ipr_init(void)
10387{
10388 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10389 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10390
f72919ec 10391 register_reboot_notifier(&ipr_notifier);
dcbccbde 10392 return pci_register_driver(&ipr_driver);
1da177e4
LT
10393}
10394
10395/**
10396 * ipr_exit - Module unload
10397 *
10398 * Module unload entry point.
10399 *
10400 * Return value:
10401 * none
10402 **/
10403static void __exit ipr_exit(void)
10404{
f72919ec 10405 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
10406 pci_unregister_driver(&ipr_driver);
10407}
10408
10409module_init(ipr_init);
10410module_exit(ipr_exit);