]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/scsi/ipr.c
Merge tag 'trace-v4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[mirror_ubuntu-eoan-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
cb05cbb3 101static unsigned int ipr_number_of_msix = 16;
4fdd7c7a 102static unsigned int ipr_fast_reboot;
1da177e4
LT
103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 108 .mailbox = 0x0042C,
89aad428 109 .max_cmds = 100,
1da177e4 110 .cache_line_size = 0x20,
7dd21308 111 .clear_isr = 1,
b53d124a 112 .iopoll_weight = 0,
1da177e4
LT
113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
214777ba 116 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 117 .sense_interrupt_mask_reg = 0x0022C,
214777ba 118 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 119 .clr_interrupt_reg = 0x00228,
214777ba 120 .clr_interrupt_reg32 = 0x00228,
1da177e4 121 .sense_interrupt_reg = 0x00224,
214777ba 122 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
214777ba 125 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 126 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
89aad428 134 .max_cmds = 100,
1da177e4 135 .cache_line_size = 0x20,
7dd21308 136 .clear_isr = 1,
b53d124a 137 .iopoll_weight = 0,
1da177e4
LT
138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
214777ba 141 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 142 .sense_interrupt_mask_reg = 0x00288,
214777ba 143 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 144 .clr_interrupt_reg = 0x00284,
214777ba 145 .clr_interrupt_reg32 = 0x00284,
1da177e4 146 .sense_interrupt_reg = 0x00280,
214777ba 147 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
214777ba 150 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 151 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
155 }
156 },
a74c1639 157 { /* CRoC */
110def85 158 .mailbox = 0x00044,
89aad428 159 .max_cmds = 1000,
a74c1639 160 .cache_line_size = 0x20,
7dd21308 161 .clear_isr = 0,
b53d124a 162 .iopoll_weight = 64,
a74c1639
WB
163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
214777ba 166 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 167 .sense_interrupt_mask_reg = 0x00010,
214777ba 168 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 169 .clr_interrupt_reg = 0x00008,
214777ba 170 .clr_interrupt_reg32 = 0x0000C,
a74c1639 171 .sense_interrupt_reg = 0x00000,
214777ba 172 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
214777ba 175 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 176 .set_uproc_interrupt_reg = 0x00020,
214777ba 177 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 178 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
dcbad00e 181 .dump_addr_reg = 0x00064,
8701f185
WB
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
a74c1639
WB
184 }
185 },
1da177e4
LT
186};
187
188static const struct ipr_chip_t ipr_chip[] = {
a299ee62
CH
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
199};
200
203fa3fe 201static int ipr_max_bus_speeds[] = {
1da177e4
LT
202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203};
204
205MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207module_param_named(max_speed, ipr_max_speed, uint, 0);
208MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209module_param_named(log_level, ipr_log_level, uint, 0);
210MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211module_param_named(testmode, ipr_testmode, int, 0);
212MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 213module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
214MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 217module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 218MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
219module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
221module_param_named(max_devs, ipr_max_devs, int, 0);
222MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 224module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
cb05cbb3 225MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
4fdd7c7a
BK
226module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
1da177e4
LT
228MODULE_LICENSE("GPL");
229MODULE_VERSION(IPR_DRIVER_VERSION);
230
1da177e4
LT
231/* A constant array of IOASCs/URCs/Error Messages */
232static const
233struct ipr_error_table_t ipr_error_table[] = {
933916f3 234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
235 "8155: An unknown error was received"},
236 {0x00330000, 0, 0,
237 "Soft underlength error"},
238 {0x005A0000, 0, 0,
239 "Command to be cancelled not found"},
240 {0x00808000, 0, 0,
241 "Qualified success"},
933916f3 242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 243 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 245 "4101: Soft device bus fabric error"},
5aa3a333
WB
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "FFF9: Device sector reassign successful"},
933916f3 262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 265 "7001: IOA sector reassignment successful"},
933916f3 266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 267 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 269 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 271 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 273 "FFF6: Device hardware error recovered by the IOA"},
933916f3 274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 275 "FFF6: Device hardware error recovered by the device"},
933916f3 276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 277 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 279 "FFFA: Undefined device response recovered by the IOA"},
933916f3 280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 281 "FFF6: Device bus error, message or command phase"},
933916f3 282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 283 "FFFE: Task Management Function failed"},
933916f3 284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 285 "FFF6: Failure prediction threshold exceeded"},
933916f3 286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 287 "8009: Impending cache battery pack failure"},
ed7bd661 288 {0x02040100, 0, 0,
289 "Logical Unit in process of becoming ready"},
290 {0x02040200, 0, 0,
291 "Initializing command required"},
1da177e4
LT
292 {0x02040400, 0, 0,
293 "34FF: Disk device format in progress"},
ed7bd661 294 {0x02040C00, 0, 0,
295 "Logical unit not accessible, target port in unavailable state"},
65f56475
BK
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
1da177e4
LT
298 {0x023F0000, 0, 0,
299 "Synchronization required"},
ed7bd661 300 {0x02408500, 0, 0,
301 "IOA microcode download required"},
302 {0x02408600, 0, 0,
303 "Device bus connection is prohibited by host"},
1da177e4
LT
304 {0x024E0000, 0, 0,
305 "No ready, IOA shutdown"},
306 {0x025A0000, 0, 0,
307 "Not ready, IOA has been shutdown"},
933916f3 308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
309 "3020: Storage subsystem configuration error"},
310 {0x03110B00, 0, 0,
311 "FFF5: Medium error, data unreadable, recommend reassign"},
312 {0x03110C00, 0, 0,
313 "7000: Medium error, data unreadable, do not reassign"},
933916f3 314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 315 "FFF3: Disk media format bad"},
933916f3 316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 317 "3002: Addressed device failed to respond to selection"},
933916f3 318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 319 "3100: Device bus error"},
933916f3 320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
321 "3109: IOA timed out a device command"},
322 {0x04088000, 0, 0,
323 "3120: SCSI bus is not operational"},
933916f3 324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 325 "4100: Hard device bus fabric error"},
5aa3a333
WB
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
933916f3 340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 341 "9000: IOA reserved area data check"},
933916f3 342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 343 "9001: IOA reserved area invalid data pattern"},
933916f3 344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 345 "9002: IOA reserved area LRC error"},
5aa3a333
WB
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
933916f3 348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 349 "102E: Out of alternate sectors for disk storage"},
933916f3 350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 351 "FFF4: Data transfer underlength error"},
933916f3 352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 353 "FFF4: Data transfer overlength error"},
933916f3 354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 355 "3400: Logical unit failure"},
933916f3 356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 357 "FFF4: Device microcode is corrupt"},
933916f3 358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
359 "8150: PCI bus error"},
360 {0x04430000, 1, 0,
361 "Unsupported device bus message received"},
933916f3 362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 363 "FFF4: Disk device problem"},
933916f3 364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 365 "8150: Permanent IOA failure"},
933916f3 366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 367 "3010: Disk device returned wrong response to IOA"},
933916f3 368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
369 "8151: IOA microcode error"},
370 {0x04448500, 0, 0,
371 "Device bus status error"},
933916f3 372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 373 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
374 {0x04448700, 0, 0,
375 "ATA device status error"},
1da177e4
LT
376 {0x04490000, 0, 0,
377 "Message reject received from the device"},
933916f3 378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 379 "8008: A permanent cache battery pack failure occurred"},
933916f3 380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 381 "9090: Disk unit has been modified after the last known status"},
933916f3 382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 383 "9081: IOA detected device error"},
933916f3 384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 385 "9082: IOA detected device error"},
933916f3 386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 387 "3110: Device bus error, message or command phase"},
933916f3 388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 389 "3110: SAS Command / Task Management Function failed"},
933916f3 390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 391 "9091: Incorrect hardware configuration change has been detected"},
933916f3 392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 393 "9073: Invalid multi-adapter configuration"},
933916f3 394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 395 "4010: Incorrect connection between cascaded expanders"},
933916f3 396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 397 "4020: Connections exceed IOA design limits"},
933916f3 398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 399 "4030: Incorrect multipath connection"},
933916f3 400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 401 "4110: Unsupported enclosure function"},
ed7bd661 402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
933916f3 404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
405 "FFF4: Command to logical unit failed"},
406 {0x05240000, 1, 0,
407 "Illegal request, invalid request type or request packet"},
408 {0x05250000, 0, 0,
409 "Illegal request, invalid resource handle"},
b0df54bb
BK
410 {0x05258000, 0, 0,
411 "Illegal request, commands not allowed to this device"},
412 {0x05258100, 0, 0,
413 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
414 {0x05258200, 0, 0,
415 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
416 {0x05260000, 0, 0,
417 "Illegal request, invalid field in parameter list"},
418 {0x05260100, 0, 0,
419 "Illegal request, parameter not supported"},
420 {0x05260200, 0, 0,
421 "Illegal request, parameter value invalid"},
422 {0x052C0000, 0, 0,
423 "Illegal request, command sequence error"},
b0df54bb
BK
424 {0x052C8000, 1, 0,
425 "Illegal request, dual adapter support not enabled"},
ed7bd661 426 {0x052C8100, 1, 0,
427 "Illegal request, another cable connector was physically disabled"},
428 {0x054E8000, 1, 0,
429 "Illegal request, inconsistent group id/group count"},
933916f3 430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 431 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 433 "9040: Array protection temporarily suspended, protection resuming"},
ed7bd661 434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
81471b07
WX
438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4086: SAS Adapter Hardware Configuration Error"},
933916f3 440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 441 "3140: Device bus not ready to ready transition"},
933916f3 442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
443 "FFFB: SCSI bus was reset"},
444 {0x06290500, 0, 0,
445 "FFFE: SCSI bus transition to single ended"},
446 {0x06290600, 0, 0,
447 "FFFE: SCSI bus transition to LVD"},
933916f3 448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 449 "FFFB: SCSI bus was reset by another initiator"},
933916f3 450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 451 "3029: A device replacement has occurred"},
ed7bd661 452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453 "4102: Device bus fabric performance degradation"},
933916f3 454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 455 "9051: IOA cache data exists for a missing or failed device"},
933916f3 456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 459 "9025: Disk unit is not supported at its physical location"},
933916f3 460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 461 "3020: IOA detected a SCSI bus configuration error"},
933916f3 462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 463 "3150: SCSI bus configuration error"},
933916f3 464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 465 "9074: Asymmetric advanced function disk configuration"},
933916f3 466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 467 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 469 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 471 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 473 "9076: Configuration error, missing remote IOA"},
933916f3 474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 475 "4050: Enclosure does not support a required multipath function"},
ed7bd661 476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
b75424fc
WB
484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485 "4070: Logically bad block written on device"},
933916f3 486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 487 "9041: Array protection temporarily suspended"},
933916f3 488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 489 "9042: Corrupt array parity detected on specified device"},
933916f3 490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 491 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 493 "9071: Link operational transition"},
933916f3 494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 495 "9072: Link not operational transition"},
933916f3 496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 497 "9032: Array exposed but still protected"},
7b3871fd 498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
e435340c 499 "70DD: Device forced failed by disrupt device command"},
933916f3 500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 501 "4061: Multipath redundancy level got better"},
933916f3 502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 503 "4060: Multipath redundancy level got worse"},
7b3871fd 504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
f8ee25d7 505 "9083: Device raw mode enabled"},
7b3871fd 506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
f8ee25d7 507 "9084: Device raw mode disabled"},
1da177e4
LT
508 {0x07270000, 0, 0,
509 "Failure due to other device"},
933916f3 510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 511 "9008: IOA does not support functions expected by devices"},
933916f3 512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 513 "9010: Cache data associated with attached devices cannot be found"},
933916f3 514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 515 "9011: Cache data belongs to devices other than those attached"},
933916f3 516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 517 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 519 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 521 "9022: Exposed array is missing a required device"},
933916f3 522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 523 "9023: Array member(s) not at required physical locations"},
933916f3 524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 525 "9024: Array not functional due to present hardware configuration"},
933916f3 526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 527 "9026: Array not functional due to present hardware configuration"},
933916f3 528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 529 "9027: Array is missing a device and parity is out of sync"},
933916f3 530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 531 "9028: Maximum number of arrays already exist"},
933916f3 532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 533 "9050: Required cache data cannot be located for a disk unit"},
933916f3 534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 535 "9052: Cache data exists for a device that has been modified"},
933916f3 536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 537 "9054: IOA resources not available due to previous problems"},
933916f3 538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 539 "9092: Disk unit requires initialization before use"},
933916f3 540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 541 "9029: Incorrect hardware configuration change has been detected"},
933916f3 542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 543 "9060: One or more disk pairs are missing from an array"},
933916f3 544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 545 "9061: One or more disks are missing from an array"},
933916f3 546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 547 "9062: One or more disks are missing from an array"},
933916f3 548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 549 "9063: Maximum number of functional arrays has been exceeded"},
ed7bd661 550 {0x07279A00, 0, 0,
551 "Data protect, other volume set problem"},
1da177e4
LT
552 {0x0B260000, 0, 0,
553 "Aborted command, invalid descriptor"},
ed7bd661 554 {0x0B3F9000, 0, 0,
555 "Target operating conditions have changed, dual adapter takeover"},
556 {0x0B530200, 0, 0,
557 "Aborted command, medium removal prevented"},
1da177e4 558 {0x0B5A0000, 0, 0,
ed7bd661 559 "Command terminated by host"},
560 {0x0B5B8000, 0, 0,
561 "Aborted command, command terminated by host"}
1da177e4
LT
562};
563
564static const struct ipr_ses_table_entry ipr_ses_table[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578};
579
580/*
581 * Function Prototypes
582 */
583static int ipr_reset_alert(struct ipr_cmnd *);
584static void ipr_process_ccn(struct ipr_cmnd *);
585static void ipr_process_error(struct ipr_cmnd *);
586static void ipr_reset_ioa_job(struct ipr_cmnd *);
587static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588 enum ipr_shutdown_type);
589
590#ifdef CONFIG_SCSI_IPR_TRACE
591/**
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
594 * @type: trace type
595 * @add_data: additional data
596 *
597 * Return value:
598 * none
599 **/
600static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601 u8 type, u32 add_data)
602{
603 struct ipr_trace_entry *trace_entry;
604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
bb7c5433 605 unsigned int trace_index;
1da177e4 606
bb7c5433
BK
607 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608 trace_entry = &ioa_cfg->trace[trace_index];
1da177e4
LT
609 trace_entry->time = jiffies;
610 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611 trace_entry->type = type;
a32c055f
WB
612 if (ipr_cmd->ioa_cfg->sis64)
613 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614 else
615 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 616 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
617 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618 trace_entry->u.add_data = add_data;
56d6aa33 619 wmb();
1da177e4
LT
620}
621#else
203fa3fe 622#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
623#endif
624
172cd6e1
BK
625/**
626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
628 *
629 * Return value:
630 * none
631 **/
632static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633{
634 unsigned long lock_flags;
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638 ipr_cmd->done(ipr_cmd);
639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640}
641
1da177e4
LT
642/**
643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
645 *
646 * Return value:
647 * none
648 **/
649static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650{
651 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
652 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 654 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 655 int hrrq_id;
1da177e4 656
05a6538a 657 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 658 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 659 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 660 ioarcb->data_transfer_length = 0;
1da177e4 661 ioarcb->read_data_transfer_length = 0;
a32c055f 662 ioarcb->ioadl_len = 0;
1da177e4 663 ioarcb->read_ioadl_len = 0;
a32c055f 664
96d21f00 665 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
666 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
668 ioasa64->u.gata.status = 0;
669 } else {
a32c055f
WB
670 ioarcb->write_ioadl_addr =
671 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 673 ioasa->u.gata.status = 0;
a32c055f
WB
674 }
675
96d21f00
WB
676 ioasa->hdr.ioasc = 0;
677 ioasa->hdr.residual_data_len = 0;
1da177e4 678 ipr_cmd->scsi_cmd = NULL;
35a39691 679 ipr_cmd->qc = NULL;
1da177e4
LT
680 ipr_cmd->sense_buffer[0] = 0;
681 ipr_cmd->dma_use_sg = 0;
682}
683
684/**
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
687 *
688 * Return value:
689 * none
690 **/
172cd6e1
BK
691static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
693{
694 ipr_reinit_ipr_cmnd(ipr_cmd);
695 ipr_cmd->u.scratch = 0;
696 ipr_cmd->sibling = NULL;
6cdb0817 697 ipr_cmd->eh_comp = NULL;
172cd6e1 698 ipr_cmd->fast_done = fast_done;
738c6ec5 699 timer_setup(&ipr_cmd->timer, NULL, 0);
1da177e4
LT
700}
701
702/**
00bfef2c 703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
704 * @ioa_cfg: ioa config struct
705 *
706 * Return value:
707 * pointer to ipr command struct
708 **/
709static
05a6538a 710struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 711{
05a6538a 712 struct ipr_cmnd *ipr_cmd = NULL;
713
714 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716 struct ipr_cmnd, queue);
717 list_del(&ipr_cmd->queue);
718 }
1da177e4 719
1da177e4
LT
720
721 return ipr_cmd;
722}
723
00bfef2c
BK
724/**
725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
727 *
728 * Return value:
729 * pointer to ipr command struct
730 **/
731static
732struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733{
05a6538a 734 struct ipr_cmnd *ipr_cmd =
735 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 736 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
737 return ipr_cmd;
738}
739
1da177e4
LT
740/**
741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
744 *
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
747 *
748 * Return value:
749 * none
750 **/
751static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752 u32 clr_ints)
753{
754 volatile u32 int_reg;
56d6aa33 755 int i;
1da177e4
LT
756
757 /* Stop new interrupts */
56d6aa33 758 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759 spin_lock(&ioa_cfg->hrrq[i]._lock);
760 ioa_cfg->hrrq[i].allow_interrupts = 0;
761 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762 }
1da177e4
LT
763
764 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
765 if (ioa_cfg->sis64)
766 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767 else
768 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
769
770 /* Clear any pending interrupts */
214777ba
WB
771 if (ioa_cfg->sis64)
772 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
773 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
774 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
775}
776
777/**
778 * ipr_save_pcix_cmd_reg - Save PCI-X command register
779 * @ioa_cfg: ioa config struct
780 *
781 * Return value:
782 * 0 on success / -EIO on failure
783 **/
784static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
785{
786 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
787
7dce0e1c
BK
788 if (pcix_cmd_reg == 0)
789 return 0;
1da177e4
LT
790
791 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
792 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
793 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
794 return -EIO;
795 }
796
797 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
798 return 0;
799}
800
801/**
802 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803 * @ioa_cfg: ioa config struct
804 *
805 * Return value:
806 * 0 on success / -EIO on failure
807 **/
808static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
809{
810 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
811
812 if (pcix_cmd_reg) {
813 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
814 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
815 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
816 return -EIO;
817 }
1da177e4
LT
818 }
819
820 return 0;
821}
822
35a39691 823/**
f646f325 824 * __ipr_sata_eh_done - done function for aborted SATA commands
35a39691
BK
825 * @ipr_cmd: ipr command struct
826 *
827 * This function is invoked for ops generated to SATA
828 * devices which are being aborted.
829 *
830 * Return value:
831 * none
832 **/
f646f325 833static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
35a39691 834{
35a39691
BK
835 struct ata_queued_cmd *qc = ipr_cmd->qc;
836 struct ipr_sata_port *sata_port = qc->ap->private_data;
837
838 qc->err_mask |= AC_ERR_OTHER;
839 sata_port->ioasa.status |= ATA_BUSY;
35a39691 840 ata_qc_complete(qc);
66a0d59c
BK
841 if (ipr_cmd->eh_comp)
842 complete(ipr_cmd->eh_comp);
843 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
844}
845
1da177e4 846/**
f646f325
BK
847 * ipr_sata_eh_done - done function for aborted SATA commands
848 * @ipr_cmd: ipr command struct
849 *
850 * This function is invoked for ops generated to SATA
851 * devices which are being aborted.
852 *
853 * Return value:
854 * none
855 **/
856static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
857{
858 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
859 unsigned long hrrq_flags;
860
861 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
862 __ipr_sata_eh_done(ipr_cmd);
863 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
864}
865
866/**
867 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
1da177e4
LT
868 * @ipr_cmd: ipr command struct
869 *
870 * This function is invoked by the interrupt handler for
871 * ops generated by the SCSI mid-layer which are being aborted.
872 *
873 * Return value:
874 * none
875 **/
f646f325 876static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
1da177e4 877{
1da177e4
LT
878 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
879
880 scsi_cmd->result |= (DID_ERROR << 16);
881
63015bc9 882 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 883 scsi_cmd->scsi_done(scsi_cmd);
6cdb0817
BK
884 if (ipr_cmd->eh_comp)
885 complete(ipr_cmd->eh_comp);
05a6538a 886 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
887}
888
f646f325
BK
889/**
890 * ipr_scsi_eh_done - mid-layer done function for aborted ops
891 * @ipr_cmd: ipr command struct
892 *
893 * This function is invoked by the interrupt handler for
894 * ops generated by the SCSI mid-layer which are being aborted.
895 *
896 * Return value:
897 * none
898 **/
899static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
900{
901 unsigned long hrrq_flags;
902 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
903
904 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
905 __ipr_scsi_eh_done(ipr_cmd);
906 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
907}
908
1da177e4
LT
909/**
910 * ipr_fail_all_ops - Fails all outstanding ops.
911 * @ioa_cfg: ioa config struct
912 *
913 * This function fails all outstanding ops.
914 *
915 * Return value:
916 * none
917 **/
918static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
919{
920 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 921 struct ipr_hrr_queue *hrrq;
1da177e4
LT
922
923 ENTER;
05a6538a 924 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 925 spin_lock(&hrrq->_lock);
05a6538a 926 list_for_each_entry_safe(ipr_cmd,
927 temp, &hrrq->hrrq_pending_q, queue) {
928 list_del(&ipr_cmd->queue);
1da177e4 929
05a6538a 930 ipr_cmd->s.ioasa.hdr.ioasc =
931 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
932 ipr_cmd->s.ioasa.hdr.ilid =
933 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 934
05a6538a 935 if (ipr_cmd->scsi_cmd)
f646f325 936 ipr_cmd->done = __ipr_scsi_eh_done;
05a6538a 937 else if (ipr_cmd->qc)
f646f325 938 ipr_cmd->done = __ipr_sata_eh_done;
1da177e4 939
05a6538a 940 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
941 IPR_IOASC_IOA_WAS_RESET);
942 del_timer(&ipr_cmd->timer);
943 ipr_cmd->done(ipr_cmd);
944 }
56d6aa33 945 spin_unlock(&hrrq->_lock);
1da177e4 946 }
1da177e4
LT
947 LEAVE;
948}
949
a32c055f
WB
950/**
951 * ipr_send_command - Send driver initiated requests.
952 * @ipr_cmd: ipr command struct
953 *
954 * This function sends a command to the adapter using the correct write call.
955 * In the case of sis64, calculate the ioarcb size required. Then or in the
956 * appropriate bits.
957 *
958 * Return value:
959 * none
960 **/
961static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
962{
963 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
964 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
965
966 if (ioa_cfg->sis64) {
967 /* The default size is 256 bytes */
968 send_dma_addr |= 0x1;
969
970 /* If the number of ioadls * size of ioadl > 128 bytes,
971 then use a 512 byte ioarcb */
972 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
973 send_dma_addr |= 0x4;
974 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
975 } else
976 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
977}
978
1da177e4
LT
979/**
980 * ipr_do_req - Send driver initiated requests.
981 * @ipr_cmd: ipr command struct
982 * @done: done function
983 * @timeout_func: timeout function
984 * @timeout: timeout value
985 *
986 * This function sends the specified command to the adapter with the
987 * timeout given. The done function is invoked on command completion.
988 *
989 * Return value:
990 * none
991 **/
992static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
993 void (*done) (struct ipr_cmnd *),
738c6ec5 994 void (*timeout_func) (struct timer_list *), u32 timeout)
1da177e4 995{
05a6538a 996 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
997
998 ipr_cmd->done = done;
999
1da177e4 1000 ipr_cmd->timer.expires = jiffies + timeout;
841b86f3 1001 ipr_cmd->timer.function = timeout_func;
1da177e4
LT
1002
1003 add_timer(&ipr_cmd->timer);
1004
1005 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
a32c055f 1007 ipr_send_command(ipr_cmd);
1da177e4
LT
1008}
1009
1010/**
1011 * ipr_internal_cmd_done - Op done function for an internally generated op.
1012 * @ipr_cmd: ipr command struct
1013 *
1014 * This function is the op done function for an internally generated,
1015 * blocking op. It simply wakes the sleeping thread.
1016 *
1017 * Return value:
1018 * none
1019 **/
1020static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021{
1022 if (ipr_cmd->sibling)
1023 ipr_cmd->sibling = NULL;
1024 else
1025 complete(&ipr_cmd->completion);
1026}
1027
a32c055f
WB
1028/**
1029 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 * @ipr_cmd: ipr command struct
1031 * @dma_addr: dma address
1032 * @len: transfer length
1033 * @flags: ioadl flag value
1034 *
1035 * This function initializes an ioadl in the case where there is only a single
1036 * descriptor.
1037 *
1038 * Return value:
1039 * nothing
1040 **/
1041static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042 u32 len, int flags)
1043{
1044 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047 ipr_cmd->dma_use_sg = 1;
1048
1049 if (ipr_cmd->ioa_cfg->sis64) {
1050 ioadl64->flags = cpu_to_be32(flags);
1051 ioadl64->data_len = cpu_to_be32(len);
1052 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054 ipr_cmd->ioarcb.ioadl_len =
1055 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057 } else {
1058 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059 ioadl->address = cpu_to_be32(dma_addr);
1060
1061 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062 ipr_cmd->ioarcb.read_ioadl_len =
1063 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065 } else {
1066 ipr_cmd->ioarcb.ioadl_len =
1067 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069 }
1070 }
1071}
1072
1da177e4
LT
1073/**
1074 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075 * @ipr_cmd: ipr command struct
1076 * @timeout_func: function to invoke if command times out
1077 * @timeout: timeout
1078 *
1079 * Return value:
1080 * none
1081 **/
1082static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
738c6ec5 1083 void (*timeout_func) (struct timer_list *),
1da177e4
LT
1084 u32 timeout)
1085{
1086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088 init_completion(&ipr_cmd->completion);
1089 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091 spin_unlock_irq(ioa_cfg->host->host_lock);
1092 wait_for_completion(&ipr_cmd->completion);
1093 spin_lock_irq(ioa_cfg->host->host_lock);
1094}
1095
05a6538a 1096static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097{
3f1c0581
BK
1098 unsigned int hrrq;
1099
05a6538a 1100 if (ioa_cfg->hrrq_num == 1)
3f1c0581
BK
1101 hrrq = 0;
1102 else {
1103 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105 }
1106 return hrrq;
05a6538a 1107}
1108
1da177e4
LT
1109/**
1110 * ipr_send_hcam - Send an HCAM to the adapter.
1111 * @ioa_cfg: ioa config struct
1112 * @type: HCAM type
1113 * @hostrcb: hostrcb struct
1114 *
1115 * This function will send a Host Controlled Async command to the adapter.
1116 * If HCAMs are currently not allowed to be issued to the adapter, it will
1117 * place the hostrcb on the free queue.
1118 *
1119 * Return value:
1120 * none
1121 **/
1122static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123 struct ipr_hostrcb *hostrcb)
1124{
1125 struct ipr_cmnd *ipr_cmd;
1126 struct ipr_ioarcb *ioarcb;
1127
56d6aa33 1128 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1129 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1130 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1131 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133 ipr_cmd->u.hostrcb = hostrcb;
1134 ioarcb = &ipr_cmd->ioarcb;
1135
1136 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139 ioarcb->cmd_pkt.cdb[1] = type;
1140 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
a32c055f
WB
1143 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1145
1146 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147 ipr_cmd->done = ipr_process_ccn;
1148 else
1149 ipr_cmd->done = ipr_process_error;
1150
1151 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
a32c055f 1153 ipr_send_command(ipr_cmd);
1da177e4
LT
1154 } else {
1155 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156 }
1157}
1158
3e7ebdfa
WB
1159/**
1160 * ipr_update_ata_class - Update the ata class in the resource entry
1161 * @res: resource entry struct
1162 * @proto: cfgte device bus protocol value
1163 *
1164 * Return value:
1165 * none
1166 **/
1167static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168{
203fa3fe 1169 switch (proto) {
3e7ebdfa
WB
1170 case IPR_PROTO_SATA:
1171 case IPR_PROTO_SAS_STP:
1172 res->ata_class = ATA_DEV_ATA;
1173 break;
1174 case IPR_PROTO_SATA_ATAPI:
1175 case IPR_PROTO_SAS_STP_ATAPI:
1176 res->ata_class = ATA_DEV_ATAPI;
1177 break;
1178 default:
1179 res->ata_class = ATA_DEV_UNKNOWN;
1180 break;
1181 };
1182}
1183
1da177e4
LT
1184/**
1185 * ipr_init_res_entry - Initialize a resource entry struct.
1186 * @res: resource entry struct
3e7ebdfa 1187 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1188 *
1189 * Return value:
1190 * none
1191 **/
3e7ebdfa
WB
1192static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1194{
3e7ebdfa
WB
1195 int found = 0;
1196 unsigned int proto;
1197 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198 struct ipr_resource_entry *gscsi_res = NULL;
1199
ee0a90fa 1200 res->needs_sync_complete = 0;
1da177e4
LT
1201 res->in_erp = 0;
1202 res->add_to_ml = 0;
1203 res->del_from_ml = 0;
1204 res->resetting_device = 0;
0b1f8d44 1205 res->reset_occurred = 0;
1da177e4 1206 res->sdev = NULL;
35a39691 1207 res->sata_port = NULL;
3e7ebdfa
WB
1208
1209 if (ioa_cfg->sis64) {
1210 proto = cfgtew->u.cfgte64->proto;
359d96e7
BK
1211 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
3e7ebdfa 1213 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1214 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1215
1216 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217 sizeof(res->res_path));
1218
1219 res->bus = 0;
0cb992ed
WB
1220 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1222 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227 found = 1;
1228 res->target = gscsi_res->target;
1229 break;
1230 }
1231 }
1232 if (!found) {
1233 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234 ioa_cfg->max_devs_supported);
1235 set_bit(res->target, ioa_cfg->target_ids);
1236 }
3e7ebdfa
WB
1237 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239 res->target = 0;
1240 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243 ioa_cfg->max_devs_supported);
1244 set_bit(res->target, ioa_cfg->array_ids);
1245 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246 res->bus = IPR_VSET_VIRTUAL_BUS;
1247 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248 ioa_cfg->max_devs_supported);
1249 set_bit(res->target, ioa_cfg->vset_ids);
1250 } else {
1251 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252 ioa_cfg->max_devs_supported);
1253 set_bit(res->target, ioa_cfg->target_ids);
1254 }
1255 } else {
1256 proto = cfgtew->u.cfgte->proto;
1257 res->qmodel = IPR_QUEUEING_MODEL(res);
1258 res->flags = cfgtew->u.cfgte->flags;
1259 if (res->flags & IPR_IS_IOA_RESOURCE)
1260 res->type = IPR_RES_TYPE_IOAFP;
1261 else
1262 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265 res->target = cfgtew->u.cfgte->res_addr.target;
1266 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1267 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1268 }
1269
1270 ipr_update_ata_class(res, proto);
1271}
1272
1273/**
1274 * ipr_is_same_device - Determine if two devices are the same.
1275 * @res: resource entry struct
1276 * @cfgtew: config table entry wrapper struct
1277 *
1278 * Return value:
1279 * 1 if the devices are the same / 0 otherwise
1280 **/
1281static int ipr_is_same_device(struct ipr_resource_entry *res,
1282 struct ipr_config_table_entry_wrapper *cfgtew)
1283{
1284 if (res->ioa_cfg->sis64) {
1285 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1287 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1288 sizeof(cfgtew->u.cfgte64->lun))) {
1289 return 1;
1290 }
1291 } else {
1292 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293 res->target == cfgtew->u.cfgte->res_addr.target &&
1294 res->lun == cfgtew->u.cfgte->res_addr.lun)
1295 return 1;
1296 }
1297
1298 return 0;
1299}
1300
1301/**
b3b3b407 1302 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1303 * @res_path: resource path
1304 * @buf: buffer
b3b3b407 1305 * @len: length of buffer provided
3e7ebdfa
WB
1306 *
1307 * Return value:
1308 * pointer to buffer
1309 **/
b3b3b407 1310static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1311{
1312 int i;
5adcbeb3 1313 char *p = buffer;
3e7ebdfa 1314
46d74563 1315 *p = '\0';
5adcbeb3
WB
1316 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1319
1320 return buffer;
1321}
1322
b3b3b407
BK
1323/**
1324 * ipr_format_res_path - Format the resource path for printing.
1325 * @ioa_cfg: ioa config struct
1326 * @res_path: resource path
1327 * @buf: buffer
1328 * @len: length of buffer provided
1329 *
1330 * Return value:
1331 * pointer to buffer
1332 **/
1333static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334 u8 *res_path, char *buffer, int len)
1335{
1336 char *p = buffer;
1337
1338 *p = '\0';
1339 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340 __ipr_format_res_path(res_path, p, len - (buffer - p));
1341 return buffer;
1342}
1343
3e7ebdfa
WB
1344/**
1345 * ipr_update_res_entry - Update the resource entry.
1346 * @res: resource entry struct
1347 * @cfgtew: config table entry wrapper struct
1348 *
1349 * Return value:
1350 * none
1351 **/
1352static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353 struct ipr_config_table_entry_wrapper *cfgtew)
1354{
1355 char buffer[IPR_MAX_RES_PATH_LENGTH];
1356 unsigned int proto;
1357 int new_path = 0;
1358
1359 if (res->ioa_cfg->sis64) {
359d96e7
BK
1360 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
75576bb9 1362 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1363
1364 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365 sizeof(struct ipr_std_inq_data));
1366
1367 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368 proto = cfgtew->u.cfgte64->proto;
1369 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373 sizeof(res->dev_lun.scsi_lun));
1374
1375 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376 sizeof(res->res_path))) {
1377 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378 sizeof(res->res_path));
1379 new_path = 1;
1380 }
1381
1382 if (res->sdev && new_path)
1383 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1384 ipr_format_res_path(res->ioa_cfg,
1385 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1386 } else {
1387 res->flags = cfgtew->u.cfgte->flags;
1388 if (res->flags & IPR_IS_IOA_RESOURCE)
1389 res->type = IPR_RES_TYPE_IOAFP;
1390 else
1391 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394 sizeof(struct ipr_std_inq_data));
1395
1396 res->qmodel = IPR_QUEUEING_MODEL(res);
1397 proto = cfgtew->u.cfgte->proto;
1398 res->res_handle = cfgtew->u.cfgte->res_handle;
1399 }
1400
1401 ipr_update_ata_class(res, proto);
1402}
1403
1404/**
1405 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406 * for the resource.
1407 * @res: resource entry struct
1408 * @cfgtew: config table entry wrapper struct
1409 *
1410 * Return value:
1411 * none
1412 **/
1413static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414{
1415 struct ipr_resource_entry *gscsi_res = NULL;
1416 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418 if (!ioa_cfg->sis64)
1419 return;
1420
1421 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422 clear_bit(res->target, ioa_cfg->array_ids);
1423 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424 clear_bit(res->target, ioa_cfg->vset_ids);
1425 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428 return;
1429 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431 } else if (res->bus == 0)
1432 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1433}
1434
1435/**
1436 * ipr_handle_config_change - Handle a config change from the adapter
1437 * @ioa_cfg: ioa config struct
1438 * @hostrcb: hostrcb
1439 *
1440 * Return value:
1441 * none
1442 **/
1443static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1444 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1445{
1446 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1447 struct ipr_config_table_entry_wrapper cfgtew;
1448 __be32 cc_res_handle;
1449
1da177e4
LT
1450 u32 is_ndn = 1;
1451
3e7ebdfa
WB
1452 if (ioa_cfg->sis64) {
1453 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455 } else {
1456 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458 }
1da177e4
LT
1459
1460 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1461 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1462 is_ndn = 0;
1463 break;
1464 }
1465 }
1466
1467 if (is_ndn) {
1468 if (list_empty(&ioa_cfg->free_res_q)) {
1469 ipr_send_hcam(ioa_cfg,
1470 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471 hostrcb);
1472 return;
1473 }
1474
1475 res = list_entry(ioa_cfg->free_res_q.next,
1476 struct ipr_resource_entry, queue);
1477
1478 list_del(&res->queue);
3e7ebdfa 1479 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1480 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481 }
1482
3e7ebdfa 1483 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1484
1485 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486 if (res->sdev) {
1da177e4 1487 res->del_from_ml = 1;
3e7ebdfa 1488 res->res_handle = IPR_INVALID_RES_HANDLE;
f688f96d 1489 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1490 } else {
1491 ipr_clear_res_target(res);
1da177e4 1492 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1493 }
5767a1c4 1494 } else if (!res->sdev || res->del_from_ml) {
1da177e4 1495 res->add_to_ml = 1;
f688f96d 1496 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
1497 }
1498
1499 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500}
1501
1502/**
1503 * ipr_process_ccn - Op done function for a CCN.
1504 * @ipr_cmd: ipr command struct
1505 *
1506 * This function is the op done function for a configuration
1507 * change notification host controlled async from the adapter.
1508 *
1509 * Return value:
1510 * none
1511 **/
1512static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513{
1514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1516 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4 1517
afc3f83c 1518 list_del_init(&hostrcb->queue);
05a6538a 1519 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1520
1521 if (ioasc) {
4fdd7c7a
BK
1522 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1da177e4
LT
1524 dev_err(&ioa_cfg->pdev->dev,
1525 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528 } else {
1529 ipr_handle_config_change(ioa_cfg, hostrcb);
1530 }
1531}
1532
8cf093e2
BK
1533/**
1534 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535 * @i: index into buffer
1536 * @buf: string to modify
1537 *
1538 * This function will strip all trailing whitespace, pad the end
1539 * of the string with a single space, and NULL terminate the string.
1540 *
1541 * Return value:
1542 * new length of string
1543 **/
1544static int strip_and_pad_whitespace(int i, char *buf)
1545{
1546 while (i && buf[i] == ' ')
1547 i--;
1548 buf[i+1] = ' ';
1549 buf[i+2] = '\0';
1550 return i + 2;
1551}
1552
1553/**
1554 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555 * @prefix: string to print at start of printk
1556 * @hostrcb: hostrcb pointer
1557 * @vpd: vendor/product id/sn struct
1558 *
1559 * Return value:
1560 * none
1561 **/
1562static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563 struct ipr_vpd *vpd)
1564{
1565 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566 int i = 0;
1567
1568 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578}
1579
1da177e4
LT
1580/**
1581 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1582 * @vpd: vendor/product id/sn struct
1da177e4
LT
1583 *
1584 * Return value:
1585 * none
1586 **/
cfc32139 1587static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1588{
1589 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590 + IPR_SERIAL_NUM_LEN];
1591
cfc32139
BK
1592 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1594 IPR_PROD_ID_LEN);
1595 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596 ipr_err("Vendor/Product ID: %s\n", buffer);
1597
cfc32139 1598 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1599 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600 ipr_err(" Serial Number: %s\n", buffer);
1601}
1602
8cf093e2
BK
1603/**
1604 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605 * @prefix: string to print at start of printk
1606 * @hostrcb: hostrcb pointer
1607 * @vpd: vendor/product id/sn/wwn struct
1608 *
1609 * Return value:
1610 * none
1611 **/
1612static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613 struct ipr_ext_vpd *vpd)
1614{
1615 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618}
1619
ee0f05b8
BK
1620/**
1621 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622 * @vpd: vendor/product id/sn/wwn struct
1623 *
1624 * Return value:
1625 * none
1626 **/
1627static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628{
1629 ipr_log_vpd(&vpd->vpd);
1630 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631 be32_to_cpu(vpd->wwid[1]));
1632}
1633
1634/**
1635 * ipr_log_enhanced_cache_error - Log a cache error.
1636 * @ioa_cfg: ioa config struct
1637 * @hostrcb: hostrcb struct
1638 *
1639 * Return value:
1640 * none
1641 **/
1642static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643 struct ipr_hostrcb *hostrcb)
1644{
4565e370
WB
1645 struct ipr_hostrcb_type_12_error *error;
1646
1647 if (ioa_cfg->sis64)
1648 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649 else
1650 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1651
1652 ipr_err("-----Current Configuration-----\n");
1653 ipr_err("Cache Directory Card Information:\n");
1654 ipr_log_ext_vpd(&error->ioa_vpd);
1655 ipr_err("Adapter Card Information:\n");
1656 ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658 ipr_err("-----Expected Configuration-----\n");
1659 ipr_err("Cache Directory Card Information:\n");
1660 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661 ipr_err("Adapter Card Information:\n");
1662 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665 be32_to_cpu(error->ioa_data[0]),
1666 be32_to_cpu(error->ioa_data[1]),
1667 be32_to_cpu(error->ioa_data[2]));
1668}
1669
1da177e4
LT
1670/**
1671 * ipr_log_cache_error - Log a cache error.
1672 * @ioa_cfg: ioa config struct
1673 * @hostrcb: hostrcb struct
1674 *
1675 * Return value:
1676 * none
1677 **/
1678static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679 struct ipr_hostrcb *hostrcb)
1680{
1681 struct ipr_hostrcb_type_02_error *error =
1682 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684 ipr_err("-----Current Configuration-----\n");
1685 ipr_err("Cache Directory Card Information:\n");
cfc32139 1686 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1687 ipr_err("Adapter Card Information:\n");
cfc32139 1688 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1689
1690 ipr_err("-----Expected Configuration-----\n");
1691 ipr_err("Cache Directory Card Information:\n");
cfc32139 1692 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1693 ipr_err("Adapter Card Information:\n");
cfc32139 1694 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1695
1696 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697 be32_to_cpu(error->ioa_data[0]),
1698 be32_to_cpu(error->ioa_data[1]),
1699 be32_to_cpu(error->ioa_data[2]));
1700}
1701
ee0f05b8
BK
1702/**
1703 * ipr_log_enhanced_config_error - Log a configuration error.
1704 * @ioa_cfg: ioa config struct
1705 * @hostrcb: hostrcb struct
1706 *
1707 * Return value:
1708 * none
1709 **/
1710static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711 struct ipr_hostrcb *hostrcb)
1712{
1713 int errors_logged, i;
1714 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715 struct ipr_hostrcb_type_13_error *error;
1716
1717 error = &hostrcb->hcam.u.error.u.type_13_error;
1718 errors_logged = be32_to_cpu(error->errors_logged);
1719
1720 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723 dev_entry = error->dev;
1724
1725 for (i = 0; i < errors_logged; i++, dev_entry++) {
1726 ipr_err_separator;
1727
1728 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731 ipr_err("-----New Device Information-----\n");
1732 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734 ipr_err("Cache Directory Card Information:\n");
1735 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737 ipr_err("Adapter Card Information:\n");
1738 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739 }
1740}
1741
4565e370
WB
1742/**
1743 * ipr_log_sis64_config_error - Log a device error.
1744 * @ioa_cfg: ioa config struct
1745 * @hostrcb: hostrcb struct
1746 *
1747 * Return value:
1748 * none
1749 **/
1750static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751 struct ipr_hostrcb *hostrcb)
1752{
1753 int errors_logged, i;
1754 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755 struct ipr_hostrcb_type_23_error *error;
1756 char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758 error = &hostrcb->hcam.u.error64.u.type_23_error;
1759 errors_logged = be32_to_cpu(error->errors_logged);
1760
1761 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764 dev_entry = error->dev;
1765
1766 for (i = 0; i < errors_logged; i++, dev_entry++) {
1767 ipr_err_separator;
1768
1769 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1770 __ipr_format_res_path(dev_entry->res_path,
1771 buffer, sizeof(buffer)));
4565e370
WB
1772 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774 ipr_err("-----New Device Information-----\n");
1775 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777 ipr_err("Cache Directory Card Information:\n");
1778 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780 ipr_err("Adapter Card Information:\n");
1781 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782 }
1783}
1784
1da177e4
LT
1785/**
1786 * ipr_log_config_error - Log a configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1789 *
1790 * Return value:
1791 * none
1792 **/
1793static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794 struct ipr_hostrcb *hostrcb)
1795{
1796 int errors_logged, i;
1797 struct ipr_hostrcb_device_data_entry *dev_entry;
1798 struct ipr_hostrcb_type_03_error *error;
1799
1800 error = &hostrcb->hcam.u.error.u.type_03_error;
1801 errors_logged = be32_to_cpu(error->errors_logged);
1802
1803 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804 be32_to_cpu(error->errors_detected), errors_logged);
1805
cfc32139 1806 dev_entry = error->dev;
1da177e4
LT
1807
1808 for (i = 0; i < errors_logged; i++, dev_entry++) {
1809 ipr_err_separator;
1810
fa15b1f6 1811 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1812 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1813
1814 ipr_err("-----New Device Information-----\n");
cfc32139 1815 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1816
1817 ipr_err("Cache Directory Card Information:\n");
cfc32139 1818 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1819
1820 ipr_err("Adapter Card Information:\n");
cfc32139 1821 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1822
1823 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824 be32_to_cpu(dev_entry->ioa_data[0]),
1825 be32_to_cpu(dev_entry->ioa_data[1]),
1826 be32_to_cpu(dev_entry->ioa_data[2]),
1827 be32_to_cpu(dev_entry->ioa_data[3]),
1828 be32_to_cpu(dev_entry->ioa_data[4]));
1829 }
1830}
1831
ee0f05b8
BK
1832/**
1833 * ipr_log_enhanced_array_error - Log an array configuration error.
1834 * @ioa_cfg: ioa config struct
1835 * @hostrcb: hostrcb struct
1836 *
1837 * Return value:
1838 * none
1839 **/
1840static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841 struct ipr_hostrcb *hostrcb)
1842{
1843 int i, num_entries;
1844 struct ipr_hostrcb_type_14_error *error;
1845 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848 error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850 ipr_err_separator;
1851
1852 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853 error->protection_level,
1854 ioa_cfg->host->host_no,
1855 error->last_func_vset_res_addr.bus,
1856 error->last_func_vset_res_addr.target,
1857 error->last_func_vset_res_addr.lun);
1858
1859 ipr_err_separator;
1860
1861 array_entry = error->array_member;
1862 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1863 ARRAY_SIZE(error->array_member));
ee0f05b8
BK
1864
1865 for (i = 0; i < num_entries; i++, array_entry++) {
1866 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867 continue;
1868
1869 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870 ipr_err("Exposed Array Member %d:\n", i);
1871 else
1872 ipr_err("Array Member %d:\n", i);
1873
1874 ipr_log_ext_vpd(&array_entry->vpd);
1875 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877 "Expected Location");
1878
1879 ipr_err_separator;
1880 }
1881}
1882
1da177e4
LT
1883/**
1884 * ipr_log_array_error - Log an array configuration error.
1885 * @ioa_cfg: ioa config struct
1886 * @hostrcb: hostrcb struct
1887 *
1888 * Return value:
1889 * none
1890 **/
1891static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892 struct ipr_hostrcb *hostrcb)
1893{
1894 int i;
1895 struct ipr_hostrcb_type_04_error *error;
1896 struct ipr_hostrcb_array_data_entry *array_entry;
1897 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899 error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901 ipr_err_separator;
1902
1903 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904 error->protection_level,
1905 ioa_cfg->host->host_no,
1906 error->last_func_vset_res_addr.bus,
1907 error->last_func_vset_res_addr.target,
1908 error->last_func_vset_res_addr.lun);
1909
1910 ipr_err_separator;
1911
1912 array_entry = error->array_member;
1913
1914 for (i = 0; i < 18; i++) {
cfc32139 1915 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1916 continue;
1917
fa15b1f6 1918 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1919 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1920 else
1da177e4 1921 ipr_err("Array Member %d:\n", i);
1da177e4 1922
cfc32139 1923 ipr_log_vpd(&array_entry->vpd);
1da177e4 1924
fa15b1f6
BK
1925 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927 "Expected Location");
1da177e4
LT
1928
1929 ipr_err_separator;
1930
1931 if (i == 9)
1932 array_entry = error->array_member2;
1933 else
1934 array_entry++;
1935 }
1936}
1937
1938/**
b0df54bb 1939 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1940 * @ioa_cfg: ioa config struct
b0df54bb
BK
1941 * @data: IOA error data
1942 * @len: data length
1da177e4
LT
1943 *
1944 * Return value:
1945 * none
1946 **/
359d96e7 1947static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1da177e4
LT
1948{
1949 int i;
1da177e4 1950
b0df54bb 1951 if (len == 0)
1da177e4
LT
1952 return;
1953
ac719aba
BK
1954 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
b0df54bb 1957 for (i = 0; i < len / 4; i += 4) {
1da177e4 1958 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1959 be32_to_cpu(data[i]),
1960 be32_to_cpu(data[i+1]),
1961 be32_to_cpu(data[i+2]),
1962 be32_to_cpu(data[i+3]));
1da177e4
LT
1963 }
1964}
1965
ee0f05b8
BK
1966/**
1967 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968 * @ioa_cfg: ioa config struct
1969 * @hostrcb: hostrcb struct
1970 *
1971 * Return value:
1972 * none
1973 **/
1974static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975 struct ipr_hostrcb *hostrcb)
1976{
1977 struct ipr_hostrcb_type_17_error *error;
1978
4565e370
WB
1979 if (ioa_cfg->sis64)
1980 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981 else
1982 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
ee0f05b8 1984 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1985 strim(error->failure_reason);
ee0f05b8 1986
8cf093e2
BK
1987 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988 be32_to_cpu(hostrcb->hcam.u.error.prc));
1989 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1990 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1991 be32_to_cpu(hostrcb->hcam.length) -
1992 (offsetof(struct ipr_hostrcb_error, u) +
1993 offsetof(struct ipr_hostrcb_type_17_error, data)));
1994}
1995
b0df54bb
BK
1996/**
1997 * ipr_log_dual_ioa_error - Log a dual adapter error.
1998 * @ioa_cfg: ioa config struct
1999 * @hostrcb: hostrcb struct
2000 *
2001 * Return value:
2002 * none
2003 **/
2004static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005 struct ipr_hostrcb *hostrcb)
2006{
2007 struct ipr_hostrcb_type_07_error *error;
2008
2009 error = &hostrcb->hcam.u.error.u.type_07_error;
2010 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 2011 strim(error->failure_reason);
b0df54bb 2012
8cf093e2
BK
2013 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014 be32_to_cpu(hostrcb->hcam.u.error.prc));
2015 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 2016 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
2017 be32_to_cpu(hostrcb->hcam.length) -
2018 (offsetof(struct ipr_hostrcb_error, u) +
2019 offsetof(struct ipr_hostrcb_type_07_error, data)));
2020}
2021
49dc6a18
BK
2022static const struct {
2023 u8 active;
2024 char *desc;
2025} path_active_desc[] = {
2026 { IPR_PATH_NO_INFO, "Path" },
2027 { IPR_PATH_ACTIVE, "Active path" },
2028 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029};
2030
2031static const struct {
2032 u8 state;
2033 char *desc;
2034} path_state_desc[] = {
2035 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036 { IPR_PATH_HEALTHY, "is healthy" },
2037 { IPR_PATH_DEGRADED, "is degraded" },
2038 { IPR_PATH_FAILED, "is failed" }
2039};
2040
2041/**
2042 * ipr_log_fabric_path - Log a fabric path error
2043 * @hostrcb: hostrcb struct
2044 * @fabric: fabric descriptor
2045 *
2046 * Return value:
2047 * none
2048 **/
2049static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050 struct ipr_hostrcb_fabric_desc *fabric)
2051{
2052 int i, j;
2053 u8 path_state = fabric->path_state;
2054 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055 u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058 if (path_active_desc[i].active != active)
2059 continue;
2060
2061 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062 if (path_state_desc[j].state != state)
2063 continue;
2064
2065 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067 path_active_desc[i].desc, path_state_desc[j].desc,
2068 fabric->ioa_port);
2069 } else if (fabric->cascaded_expander == 0xff) {
2070 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071 path_active_desc[i].desc, path_state_desc[j].desc,
2072 fabric->ioa_port, fabric->phy);
2073 } else if (fabric->phy == 0xff) {
2074 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075 path_active_desc[i].desc, path_state_desc[j].desc,
2076 fabric->ioa_port, fabric->cascaded_expander);
2077 } else {
2078 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079 path_active_desc[i].desc, path_state_desc[j].desc,
2080 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081 }
2082 return;
2083 }
2084 }
2085
2086 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088}
2089
4565e370
WB
2090/**
2091 * ipr_log64_fabric_path - Log a fabric path error
2092 * @hostrcb: hostrcb struct
2093 * @fabric: fabric descriptor
2094 *
2095 * Return value:
2096 * none
2097 **/
2098static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099 struct ipr_hostrcb64_fabric_desc *fabric)
2100{
2101 int i, j;
2102 u8 path_state = fabric->path_state;
2103 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104 u8 state = path_state & IPR_PATH_STATE_MASK;
2105 char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108 if (path_active_desc[i].active != active)
2109 continue;
2110
2111 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112 if (path_state_desc[j].state != state)
2113 continue;
2114
2115 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2117 ipr_format_res_path(hostrcb->ioa_cfg,
2118 fabric->res_path,
2119 buffer, sizeof(buffer)));
4565e370
WB
2120 return;
2121 }
2122 }
2123
2124 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2125 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126 buffer, sizeof(buffer)));
4565e370
WB
2127}
2128
49dc6a18
BK
2129static const struct {
2130 u8 type;
2131 char *desc;
2132} path_type_desc[] = {
2133 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137};
2138
2139static const struct {
2140 u8 status;
2141 char *desc;
2142} path_status_desc[] = {
2143 { IPR_PATH_CFG_NO_PROB, "Functional" },
2144 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145 { IPR_PATH_CFG_FAILED, "Failed" },
2146 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147 { IPR_PATH_NOT_DETECTED, "Missing" },
2148 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149};
2150
2151static const char *link_rate[] = {
2152 "unknown",
2153 "disabled",
2154 "phy reset problem",
2155 "spinup hold",
2156 "port selector",
2157 "unknown",
2158 "unknown",
2159 "unknown",
2160 "1.5Gbps",
2161 "3.0Gbps",
2162 "unknown",
2163 "unknown",
2164 "unknown",
2165 "unknown",
2166 "unknown",
2167 "unknown"
2168};
2169
2170/**
2171 * ipr_log_path_elem - Log a fabric path element.
2172 * @hostrcb: hostrcb struct
2173 * @cfg: fabric path element struct
2174 *
2175 * Return value:
2176 * none
2177 **/
2178static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179 struct ipr_hostrcb_config_element *cfg)
2180{
2181 int i, j;
2182 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185 if (type == IPR_PATH_CFG_NOT_EXIST)
2186 return;
2187
2188 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189 if (path_type_desc[i].type != type)
2190 continue;
2191
2192 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193 if (path_status_desc[j].status != status)
2194 continue;
2195
2196 if (type == IPR_PATH_CFG_IOA_PORT) {
2197 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198 path_status_desc[j].desc, path_type_desc[i].desc,
2199 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201 } else {
2202 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204 path_status_desc[j].desc, path_type_desc[i].desc,
2205 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207 } else if (cfg->cascaded_expander == 0xff) {
2208 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209 "WWN=%08X%08X\n", path_status_desc[j].desc,
2210 path_type_desc[i].desc, cfg->phy,
2211 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213 } else if (cfg->phy == 0xff) {
2214 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215 "WWN=%08X%08X\n", path_status_desc[j].desc,
2216 path_type_desc[i].desc, cfg->cascaded_expander,
2217 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219 } else {
2220 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221 "WWN=%08X%08X\n", path_status_desc[j].desc,
2222 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225 }
2226 }
2227 return;
2228 }
2229 }
2230
2231 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235}
2236
4565e370
WB
2237/**
2238 * ipr_log64_path_elem - Log a fabric path element.
2239 * @hostrcb: hostrcb struct
2240 * @cfg: fabric path element struct
2241 *
2242 * Return value:
2243 * none
2244 **/
2245static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246 struct ipr_hostrcb64_config_element *cfg)
2247{
2248 int i, j;
2249 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252 char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255 return;
2256
2257 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258 if (path_type_desc[i].type != type)
2259 continue;
2260
2261 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262 if (path_status_desc[j].status != status)
2263 continue;
2264
2265 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2267 ipr_format_res_path(hostrcb->ioa_cfg,
2268 cfg->res_path, buffer, sizeof(buffer)),
2269 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270 be32_to_cpu(cfg->wwid[0]),
2271 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2272 return;
2273 }
2274 }
2275 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2277 ipr_format_res_path(hostrcb->ioa_cfg,
2278 cfg->res_path, buffer, sizeof(buffer)),
2279 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2281}
2282
49dc6a18
BK
2283/**
2284 * ipr_log_fabric_error - Log a fabric error.
2285 * @ioa_cfg: ioa config struct
2286 * @hostrcb: hostrcb struct
2287 *
2288 * Return value:
2289 * none
2290 **/
2291static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292 struct ipr_hostrcb *hostrcb)
2293{
2294 struct ipr_hostrcb_type_20_error *error;
2295 struct ipr_hostrcb_fabric_desc *fabric;
2296 struct ipr_hostrcb_config_element *cfg;
2297 int i, add_len;
2298
2299 error = &hostrcb->hcam.u.error.u.type_20_error;
2300 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303 add_len = be32_to_cpu(hostrcb->hcam.length) -
2304 (offsetof(struct ipr_hostrcb_error, u) +
2305 offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308 ipr_log_fabric_path(hostrcb, fabric);
2309 for_each_fabric_cfg(fabric, cfg)
2310 ipr_log_path_elem(hostrcb, cfg);
2311
2312 add_len -= be16_to_cpu(fabric->length);
2313 fabric = (struct ipr_hostrcb_fabric_desc *)
2314 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315 }
2316
359d96e7 2317 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
49dc6a18
BK
2318}
2319
4565e370
WB
2320/**
2321 * ipr_log_sis64_array_error - Log a sis64 array error.
2322 * @ioa_cfg: ioa config struct
2323 * @hostrcb: hostrcb struct
2324 *
2325 * Return value:
2326 * none
2327 **/
2328static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329 struct ipr_hostrcb *hostrcb)
2330{
2331 int i, num_entries;
2332 struct ipr_hostrcb_type_24_error *error;
2333 struct ipr_hostrcb64_array_data_entry *array_entry;
2334 char buffer[IPR_MAX_RES_PATH_LENGTH];
2335 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337 error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339 ipr_err_separator;
2340
2341 ipr_err("RAID %s Array Configuration: %s\n",
2342 error->protection_level,
b3b3b407
BK
2343 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344 buffer, sizeof(buffer)));
4565e370
WB
2345
2346 ipr_err_separator;
2347
2348 array_entry = error->array_member;
7262026f
WB
2349 num_entries = min_t(u32, error->num_entries,
2350 ARRAY_SIZE(error->array_member));
4565e370
WB
2351
2352 for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355 continue;
2356
2357 if (error->exposed_mode_adn == i)
2358 ipr_err("Exposed Array Member %d:\n", i);
2359 else
2360 ipr_err("Array Member %d:\n", i);
2361
2362 ipr_err("Array Member %d:\n", i);
2363 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2364 ipr_err("Current Location: %s\n",
b3b3b407
BK
2365 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366 buffer, sizeof(buffer)));
7262026f 2367 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2368 ipr_format_res_path(ioa_cfg,
2369 array_entry->expected_res_path,
2370 buffer, sizeof(buffer)));
4565e370
WB
2371
2372 ipr_err_separator;
2373 }
2374}
2375
2376/**
2377 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378 * @ioa_cfg: ioa config struct
2379 * @hostrcb: hostrcb struct
2380 *
2381 * Return value:
2382 * none
2383 **/
2384static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385 struct ipr_hostrcb *hostrcb)
2386{
2387 struct ipr_hostrcb_type_30_error *error;
2388 struct ipr_hostrcb64_fabric_desc *fabric;
2389 struct ipr_hostrcb64_config_element *cfg;
2390 int i, add_len;
2391
2392 error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397 add_len = be32_to_cpu(hostrcb->hcam.length) -
2398 (offsetof(struct ipr_hostrcb64_error, u) +
2399 offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402 ipr_log64_fabric_path(hostrcb, fabric);
2403 for_each_fabric_cfg(fabric, cfg)
2404 ipr_log64_path_elem(hostrcb, cfg);
2405
2406 add_len -= be16_to_cpu(fabric->length);
2407 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409 }
2410
359d96e7 2411 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
4565e370
WB
2412}
2413
15c5a5e0
WX
2414/**
2415 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2416 * @ioa_cfg: ioa config struct
2417 * @hostrcb: hostrcb struct
2418 *
2419 * Return value:
2420 * none
2421 **/
2422static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2423 struct ipr_hostrcb *hostrcb)
2424{
2425 struct ipr_hostrcb_type_41_error *error;
2426
2427 error = &hostrcb->hcam.u.error64.u.type_41_error;
2428
2429 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2430 ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2431 ipr_log_hex_data(ioa_cfg, error->data,
2432 be32_to_cpu(hostrcb->hcam.length) -
2433 (offsetof(struct ipr_hostrcb_error, u) +
2434 offsetof(struct ipr_hostrcb_type_41_error, data)));
2435}
b0df54bb
BK
2436/**
2437 * ipr_log_generic_error - Log an adapter error.
2438 * @ioa_cfg: ioa config struct
2439 * @hostrcb: hostrcb struct
2440 *
2441 * Return value:
2442 * none
2443 **/
2444static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2445 struct ipr_hostrcb *hostrcb)
2446{
ac719aba 2447 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2448 be32_to_cpu(hostrcb->hcam.length));
2449}
2450
169b9ec8
WX
2451/**
2452 * ipr_log_sis64_device_error - Log a cache error.
2453 * @ioa_cfg: ioa config struct
2454 * @hostrcb: hostrcb struct
2455 *
2456 * Return value:
2457 * none
2458 **/
2459static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2460 struct ipr_hostrcb *hostrcb)
2461{
2462 struct ipr_hostrcb_type_21_error *error;
2463 char buffer[IPR_MAX_RES_PATH_LENGTH];
2464
2465 error = &hostrcb->hcam.u.error64.u.type_21_error;
2466
2467 ipr_err("-----Failing Device Information-----\n");
2468 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2469 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2470 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2471 ipr_err("Device Resource Path: %s\n",
2472 __ipr_format_res_path(error->res_path,
2473 buffer, sizeof(buffer)));
2474 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2475 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2476 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2477 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2478 ipr_err("SCSI Sense Data:\n");
2479 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2480 ipr_err("SCSI Command Descriptor Block: \n");
2481 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2482
2483 ipr_err("Additional IOA Data:\n");
2484 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2485}
2486
1da177e4
LT
2487/**
2488 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2489 * @ioasc: IOASC
2490 *
2491 * This function will return the index of into the ipr_error_table
2492 * for the specified IOASC. If the IOASC is not in the table,
2493 * 0 will be returned, which points to the entry used for unknown errors.
2494 *
2495 * Return value:
2496 * index into the ipr_error_table
2497 **/
2498static u32 ipr_get_error(u32 ioasc)
2499{
2500 int i;
2501
2502 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2503 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2504 return i;
2505
2506 return 0;
2507}
2508
2509/**
2510 * ipr_handle_log_data - Log an adapter error.
2511 * @ioa_cfg: ioa config struct
2512 * @hostrcb: hostrcb struct
2513 *
2514 * This function logs an adapter error to the system.
2515 *
2516 * Return value:
2517 * none
2518 **/
2519static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2520 struct ipr_hostrcb *hostrcb)
2521{
2522 u32 ioasc;
2523 int error_index;
3185ea63 2524 struct ipr_hostrcb_type_21_error *error;
1da177e4
LT
2525
2526 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2527 return;
2528
2529 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2530 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2531
4565e370
WB
2532 if (ioa_cfg->sis64)
2533 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2534 else
2535 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2536
4565e370
WB
2537 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2538 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2539 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2540 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2541 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2542 }
2543
2544 error_index = ipr_get_error(ioasc);
2545
2546 if (!ipr_error_table[error_index].log_hcam)
2547 return;
2548
3185ea63 2549 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2550 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2551 error = &hostrcb->hcam.u.error64.u.type_21_error;
2552
2553 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2554 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2555 return;
2556 }
2557
49dc6a18 2558 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2559
2560 /* Set indication we have logged an error */
2561 ioa_cfg->errors_logged++;
2562
933916f3 2563 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2564 return;
cf852037
BK
2565 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2566 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2567
2568 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2569 case IPR_HOST_RCB_OVERLAY_ID_2:
2570 ipr_log_cache_error(ioa_cfg, hostrcb);
2571 break;
2572 case IPR_HOST_RCB_OVERLAY_ID_3:
2573 ipr_log_config_error(ioa_cfg, hostrcb);
2574 break;
2575 case IPR_HOST_RCB_OVERLAY_ID_4:
2576 case IPR_HOST_RCB_OVERLAY_ID_6:
2577 ipr_log_array_error(ioa_cfg, hostrcb);
2578 break;
b0df54bb
BK
2579 case IPR_HOST_RCB_OVERLAY_ID_7:
2580 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2581 break;
ee0f05b8
BK
2582 case IPR_HOST_RCB_OVERLAY_ID_12:
2583 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2584 break;
2585 case IPR_HOST_RCB_OVERLAY_ID_13:
2586 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2587 break;
2588 case IPR_HOST_RCB_OVERLAY_ID_14:
2589 case IPR_HOST_RCB_OVERLAY_ID_16:
2590 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2591 break;
2592 case IPR_HOST_RCB_OVERLAY_ID_17:
2593 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2594 break;
49dc6a18
BK
2595 case IPR_HOST_RCB_OVERLAY_ID_20:
2596 ipr_log_fabric_error(ioa_cfg, hostrcb);
2597 break;
169b9ec8
WX
2598 case IPR_HOST_RCB_OVERLAY_ID_21:
2599 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2600 break;
4565e370
WB
2601 case IPR_HOST_RCB_OVERLAY_ID_23:
2602 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2603 break;
2604 case IPR_HOST_RCB_OVERLAY_ID_24:
2605 case IPR_HOST_RCB_OVERLAY_ID_26:
2606 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2607 break;
2608 case IPR_HOST_RCB_OVERLAY_ID_30:
2609 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2610 break;
15c5a5e0
WX
2611 case IPR_HOST_RCB_OVERLAY_ID_41:
2612 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2613 break;
cf852037 2614 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2615 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2616 default:
a9cfca96 2617 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2618 break;
2619 }
2620}
2621
afc3f83c
BK
2622static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2623{
2624 struct ipr_hostrcb *hostrcb;
2625
2626 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2627 struct ipr_hostrcb, queue);
2628
2629 if (unlikely(!hostrcb)) {
2630 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2631 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2632 struct ipr_hostrcb, queue);
2633 }
2634
2635 list_del_init(&hostrcb->queue);
2636 return hostrcb;
2637}
2638
1da177e4
LT
2639/**
2640 * ipr_process_error - Op done function for an adapter error log.
2641 * @ipr_cmd: ipr command struct
2642 *
2643 * This function is the op done function for an error log host
2644 * controlled async from the adapter. It will log the error and
2645 * send the HCAM back to the adapter.
2646 *
2647 * Return value:
2648 * none
2649 **/
2650static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2651{
2652 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2653 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2654 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2655 u32 fd_ioasc;
2656
2657 if (ioa_cfg->sis64)
2658 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2659 else
2660 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2661
afc3f83c 2662 list_del_init(&hostrcb->queue);
05a6538a 2663 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2664
2665 if (!ioasc) {
2666 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2667 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2668 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4fdd7c7a
BK
2669 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2670 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
1da177e4
LT
2671 dev_err(&ioa_cfg->pdev->dev,
2672 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2673 }
2674
afc3f83c 2675 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
8a4236a2 2676 schedule_work(&ioa_cfg->work_q);
afc3f83c 2677 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
afc3f83c 2678
1da177e4
LT
2679 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2680}
2681
2682/**
2683 * ipr_timeout - An internally generated op has timed out.
2684 * @ipr_cmd: ipr command struct
2685 *
2686 * This function blocks host requests and initiates an
2687 * adapter reset.
2688 *
2689 * Return value:
2690 * none
2691 **/
738c6ec5 2692static void ipr_timeout(struct timer_list *t)
1da177e4 2693{
738c6ec5 2694 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
2695 unsigned long lock_flags = 0;
2696 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2697
2698 ENTER;
2699 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2700
2701 ioa_cfg->errors_logged++;
2702 dev_err(&ioa_cfg->pdev->dev,
2703 "Adapter being reset due to command timeout.\n");
2704
2705 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2706 ioa_cfg->sdt_state = GET_DUMP;
2707
2708 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2709 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2710
2711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2712 LEAVE;
2713}
2714
2715/**
2716 * ipr_oper_timeout - Adapter timed out transitioning to operational
2717 * @ipr_cmd: ipr command struct
2718 *
2719 * This function blocks host requests and initiates an
2720 * adapter reset.
2721 *
2722 * Return value:
2723 * none
2724 **/
738c6ec5 2725static void ipr_oper_timeout(struct timer_list *t)
1da177e4 2726{
738c6ec5 2727 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
2728 unsigned long lock_flags = 0;
2729 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2730
2731 ENTER;
2732 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2733
2734 ioa_cfg->errors_logged++;
2735 dev_err(&ioa_cfg->pdev->dev,
2736 "Adapter timed out transitioning to operational.\n");
2737
2738 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2739 ioa_cfg->sdt_state = GET_DUMP;
2740
2741 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2742 if (ipr_fastfail)
2743 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2744 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2745 }
2746
2747 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2748 LEAVE;
2749}
2750
1da177e4
LT
2751/**
2752 * ipr_find_ses_entry - Find matching SES in SES table
2753 * @res: resource entry struct of SES
2754 *
2755 * Return value:
2756 * pointer to SES table entry / NULL on failure
2757 **/
2758static const struct ipr_ses_table_entry *
2759ipr_find_ses_entry(struct ipr_resource_entry *res)
2760{
2761 int i, j, matches;
3e7ebdfa 2762 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2763 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2764
2765 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2766 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2767 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2768 vpids = &res->std_inq_data.vpids;
2769 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2770 matches++;
2771 else
2772 break;
2773 } else
2774 matches++;
2775 }
2776
2777 if (matches == IPR_PROD_ID_LEN)
2778 return ste;
2779 }
2780
2781 return NULL;
2782}
2783
2784/**
2785 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2786 * @ioa_cfg: ioa config struct
2787 * @bus: SCSI bus
2788 * @bus_width: bus width
2789 *
2790 * Return value:
2791 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2792 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2793 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2794 * max 160MHz = max 320MB/sec).
2795 **/
2796static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2797{
2798 struct ipr_resource_entry *res;
2799 const struct ipr_ses_table_entry *ste;
2800 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2801
2802 /* Loop through each config table entry in the config table buffer */
2803 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2804 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2805 continue;
2806
3e7ebdfa 2807 if (bus != res->bus)
1da177e4
LT
2808 continue;
2809
2810 if (!(ste = ipr_find_ses_entry(res)))
2811 continue;
2812
2813 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2814 }
2815
2816 return max_xfer_rate;
2817}
2818
2819/**
2820 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2821 * @ioa_cfg: ioa config struct
2822 * @max_delay: max delay in micro-seconds to wait
2823 *
2824 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2825 *
2826 * Return value:
2827 * 0 on success / other on failure
2828 **/
2829static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2830{
2831 volatile u32 pcii_reg;
2832 int delay = 1;
2833
2834 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2835 while (delay < max_delay) {
2836 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2837
2838 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2839 return 0;
2840
2841 /* udelay cannot be used if delay is more than a few milliseconds */
2842 if ((delay / 1000) > MAX_UDELAY_MS)
2843 mdelay(delay / 1000);
2844 else
2845 udelay(delay);
2846
2847 delay += delay;
2848 }
2849 return -EIO;
2850}
2851
dcbad00e
WB
2852/**
2853 * ipr_get_sis64_dump_data_section - Dump IOA memory
2854 * @ioa_cfg: ioa config struct
2855 * @start_addr: adapter address to dump
2856 * @dest: destination kernel buffer
2857 * @length_in_words: length to dump in 4 byte words
2858 *
2859 * Return value:
2860 * 0 on success
2861 **/
2862static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2863 u32 start_addr,
2864 __be32 *dest, u32 length_in_words)
2865{
2866 int i;
2867
2868 for (i = 0; i < length_in_words; i++) {
2869 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2870 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2871 dest++;
2872 }
2873
2874 return 0;
2875}
2876
1da177e4
LT
2877/**
2878 * ipr_get_ldump_data_section - Dump IOA memory
2879 * @ioa_cfg: ioa config struct
2880 * @start_addr: adapter address to dump
2881 * @dest: destination kernel buffer
2882 * @length_in_words: length to dump in 4 byte words
2883 *
2884 * Return value:
2885 * 0 on success / -EIO on failure
2886 **/
2887static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2888 u32 start_addr,
2889 __be32 *dest, u32 length_in_words)
2890{
2891 volatile u32 temp_pcii_reg;
2892 int i, delay = 0;
2893
dcbad00e
WB
2894 if (ioa_cfg->sis64)
2895 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2896 dest, length_in_words);
2897
1da177e4
LT
2898 /* Write IOA interrupt reg starting LDUMP state */
2899 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2900 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2901
2902 /* Wait for IO debug acknowledge */
2903 if (ipr_wait_iodbg_ack(ioa_cfg,
2904 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2905 dev_err(&ioa_cfg->pdev->dev,
2906 "IOA dump long data transfer timeout\n");
2907 return -EIO;
2908 }
2909
2910 /* Signal LDUMP interlocked - clear IO debug ack */
2911 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912 ioa_cfg->regs.clr_interrupt_reg);
2913
2914 /* Write Mailbox with starting address */
2915 writel(start_addr, ioa_cfg->ioa_mailbox);
2916
2917 /* Signal address valid - clear IOA Reset alert */
2918 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2919 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2920
2921 for (i = 0; i < length_in_words; i++) {
2922 /* Wait for IO debug acknowledge */
2923 if (ipr_wait_iodbg_ack(ioa_cfg,
2924 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2925 dev_err(&ioa_cfg->pdev->dev,
2926 "IOA dump short data transfer timeout\n");
2927 return -EIO;
2928 }
2929
2930 /* Read data from mailbox and increment destination pointer */
2931 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2932 dest++;
2933
2934 /* For all but the last word of data, signal data received */
2935 if (i < (length_in_words - 1)) {
2936 /* Signal dump data received - Clear IO debug Ack */
2937 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2938 ioa_cfg->regs.clr_interrupt_reg);
2939 }
2940 }
2941
2942 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2943 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2944 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2945
2946 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2947 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2948
2949 /* Signal dump data received - Clear IO debug Ack */
2950 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2951 ioa_cfg->regs.clr_interrupt_reg);
2952
2953 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2954 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2955 temp_pcii_reg =
214777ba 2956 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2957
2958 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2959 return 0;
2960
2961 udelay(10);
2962 delay += 10;
2963 }
2964
2965 return 0;
2966}
2967
2968#ifdef CONFIG_SCSI_IPR_DUMP
2969/**
2970 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2971 * @ioa_cfg: ioa config struct
2972 * @pci_address: adapter address
2973 * @length: length of data to copy
2974 *
2975 * Copy data from PCI adapter to kernel buffer.
2976 * Note: length MUST be a 4 byte multiple
2977 * Return value:
2978 * 0 on success / other on failure
2979 **/
2980static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2981 unsigned long pci_address, u32 length)
2982{
2983 int bytes_copied = 0;
4d4dd706 2984 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2985 __be32 *page;
2986 unsigned long lock_flags = 0;
2987 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2988
4d4dd706
KSS
2989 if (ioa_cfg->sis64)
2990 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2991 else
2992 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2993
1da177e4 2994 while (bytes_copied < length &&
4d4dd706 2995 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2996 if (ioa_dump->page_offset >= PAGE_SIZE ||
2997 ioa_dump->page_offset == 0) {
2998 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2999
3000 if (!page) {
3001 ipr_trace;
3002 return bytes_copied;
3003 }
3004
3005 ioa_dump->page_offset = 0;
3006 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
3007 ioa_dump->next_page_index++;
3008 } else
3009 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
3010
3011 rem_len = length - bytes_copied;
3012 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
3013 cur_len = min(rem_len, rem_page_len);
3014
3015 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3016 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3017 rc = -EIO;
3018 } else {
3019 rc = ipr_get_ldump_data_section(ioa_cfg,
3020 pci_address + bytes_copied,
3021 &page[ioa_dump->page_offset / 4],
3022 (cur_len / sizeof(u32)));
3023 }
3024 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3025
3026 if (!rc) {
3027 ioa_dump->page_offset += cur_len;
3028 bytes_copied += cur_len;
3029 } else {
3030 ipr_trace;
3031 break;
3032 }
3033 schedule();
3034 }
3035
3036 return bytes_copied;
3037}
3038
3039/**
3040 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3041 * @hdr: dump entry header struct
3042 *
3043 * Return value:
3044 * nothing
3045 **/
3046static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3047{
3048 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3049 hdr->num_elems = 1;
3050 hdr->offset = sizeof(*hdr);
3051 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3052}
3053
3054/**
3055 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3056 * @ioa_cfg: ioa config struct
3057 * @driver_dump: driver dump struct
3058 *
3059 * Return value:
3060 * nothing
3061 **/
3062static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3063 struct ipr_driver_dump *driver_dump)
3064{
3065 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3066
3067 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3068 driver_dump->ioa_type_entry.hdr.len =
3069 sizeof(struct ipr_dump_ioa_type_entry) -
3070 sizeof(struct ipr_dump_entry_header);
3071 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3072 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3073 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3074 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3075 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3076 ucode_vpd->minor_release[1];
3077 driver_dump->hdr.num_entries++;
3078}
3079
3080/**
3081 * ipr_dump_version_data - Fill in the driver version in the dump.
3082 * @ioa_cfg: ioa config struct
3083 * @driver_dump: driver dump struct
3084 *
3085 * Return value:
3086 * nothing
3087 **/
3088static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3089 struct ipr_driver_dump *driver_dump)
3090{
3091 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3092 driver_dump->version_entry.hdr.len =
3093 sizeof(struct ipr_dump_version_entry) -
3094 sizeof(struct ipr_dump_entry_header);
3095 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3096 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3097 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3098 driver_dump->hdr.num_entries++;
3099}
3100
3101/**
3102 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3103 * @ioa_cfg: ioa config struct
3104 * @driver_dump: driver dump struct
3105 *
3106 * Return value:
3107 * nothing
3108 **/
3109static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3110 struct ipr_driver_dump *driver_dump)
3111{
3112 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3113 driver_dump->trace_entry.hdr.len =
3114 sizeof(struct ipr_dump_trace_entry) -
3115 sizeof(struct ipr_dump_entry_header);
3116 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3117 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3118 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3119 driver_dump->hdr.num_entries++;
3120}
3121
3122/**
3123 * ipr_dump_location_data - Fill in the IOA location in the dump.
3124 * @ioa_cfg: ioa config struct
3125 * @driver_dump: driver dump struct
3126 *
3127 * Return value:
3128 * nothing
3129 **/
3130static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3131 struct ipr_driver_dump *driver_dump)
3132{
3133 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3134 driver_dump->location_entry.hdr.len =
3135 sizeof(struct ipr_dump_location_entry) -
3136 sizeof(struct ipr_dump_entry_header);
3137 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3138 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 3139 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
3140 driver_dump->hdr.num_entries++;
3141}
3142
3143/**
3144 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3145 * @ioa_cfg: ioa config struct
3146 * @dump: dump struct
3147 *
3148 * Return value:
3149 * nothing
3150 **/
3151static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3152{
3153 unsigned long start_addr, sdt_word;
3154 unsigned long lock_flags = 0;
3155 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3156 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
3157 u32 num_entries, max_num_entries, start_off, end_off;
3158 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 3159 struct ipr_sdt *sdt;
dcbad00e 3160 int valid = 1;
1da177e4
LT
3161 int i;
3162
3163 ENTER;
3164
3165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3166
41e9a696 3167 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
3168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3169 return;
3170 }
3171
110def85
WB
3172 if (ioa_cfg->sis64) {
3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174 ssleep(IPR_DUMP_DELAY_SECONDS);
3175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3176 }
3177
1da177e4
LT
3178 start_addr = readl(ioa_cfg->ioa_mailbox);
3179
dcbad00e 3180 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3181 dev_err(&ioa_cfg->pdev->dev,
3182 "Invalid dump table format: %lx\n", start_addr);
3183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3184 return;
3185 }
3186
3187 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3188
3189 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3190
3191 /* Initialize the overall dump header */
3192 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3193 driver_dump->hdr.num_entries = 1;
3194 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3195 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3196 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3197 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3198
3199 ipr_dump_version_data(ioa_cfg, driver_dump);
3200 ipr_dump_location_data(ioa_cfg, driver_dump);
3201 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3202 ipr_dump_trace_data(ioa_cfg, driver_dump);
3203
3204 /* Update dump_header */
3205 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3206
3207 /* IOA Dump entry */
3208 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3209 ioa_dump->hdr.len = 0;
3210 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3211 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3212
3213 /* First entries in sdt are actually a list of dump addresses and
3214 lengths to gather the real dump data. sdt represents the pointer
3215 to the ioa generated dump table. Dump data will be extracted based
3216 on entries in this table */
3217 sdt = &ioa_dump->sdt;
3218
4d4dd706
KSS
3219 if (ioa_cfg->sis64) {
3220 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3221 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3222 } else {
3223 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3224 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3225 }
3226
3227 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3228 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3229 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3230 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3231
3232 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3233 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3234 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3235 dev_err(&ioa_cfg->pdev->dev,
3236 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3237 rc, be32_to_cpu(sdt->hdr.state));
3238 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3239 ioa_cfg->sdt_state = DUMP_OBTAINED;
3240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241 return;
3242 }
3243
3244 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3245
4d4dd706
KSS
3246 if (num_entries > max_num_entries)
3247 num_entries = max_num_entries;
3248
3249 /* Update dump length to the actual data to be copied */
3250 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3251 if (ioa_cfg->sis64)
3252 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3253 else
3254 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3255
3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257
3258 for (i = 0; i < num_entries; i++) {
4d4dd706 3259 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3260 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3261 break;
3262 }
3263
3264 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3265 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3266 if (ioa_cfg->sis64)
3267 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3268 else {
3269 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3270 end_off = be32_to_cpu(sdt->entry[i].end_token);
3271
3272 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3273 bytes_to_copy = end_off - start_off;
3274 else
3275 valid = 0;
3276 }
3277 if (valid) {
4d4dd706 3278 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3279 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3280 continue;
3281 }
3282
3283 /* Copy data from adapter to driver buffers */
3284 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3285 bytes_to_copy);
3286
3287 ioa_dump->hdr.len += bytes_copied;
3288
3289 if (bytes_copied != bytes_to_copy) {
3290 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3291 break;
3292 }
3293 }
3294 }
3295 }
3296
3297 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3298
3299 /* Update dump_header */
3300 driver_dump->hdr.len += ioa_dump->hdr.len;
3301 wmb();
3302 ioa_cfg->sdt_state = DUMP_OBTAINED;
3303 LEAVE;
3304}
3305
3306#else
203fa3fe 3307#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3308#endif
3309
3310/**
3311 * ipr_release_dump - Free adapter dump memory
3312 * @kref: kref struct
3313 *
3314 * Return value:
3315 * nothing
3316 **/
3317static void ipr_release_dump(struct kref *kref)
3318{
203fa3fe 3319 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3320 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3321 unsigned long lock_flags = 0;
3322 int i;
3323
3324 ENTER;
3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326 ioa_cfg->dump = NULL;
3327 ioa_cfg->sdt_state = INACTIVE;
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329
3330 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3331 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3332
4d4dd706 3333 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3334 kfree(dump);
3335 LEAVE;
3336}
3337
3338/**
3339 * ipr_worker_thread - Worker thread
c4028958 3340 * @work: ioa config struct
1da177e4
LT
3341 *
3342 * Called at task level from a work thread. This function takes care
3343 * of adding and removing device from the mid-layer as configuration
3344 * changes are detected by the adapter.
3345 *
3346 * Return value:
3347 * nothing
3348 **/
c4028958 3349static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3350{
3351 unsigned long lock_flags;
3352 struct ipr_resource_entry *res;
3353 struct scsi_device *sdev;
3354 struct ipr_dump *dump;
c4028958
DH
3355 struct ipr_ioa_cfg *ioa_cfg =
3356 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3357 u8 bus, target, lun;
3358 int did_work;
3359
3360 ENTER;
3361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3362
41e9a696 3363 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3364 dump = ioa_cfg->dump;
3365 if (!dump) {
3366 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3367 return;
3368 }
3369 kref_get(&dump->kref);
3370 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3371 ipr_get_ioa_dump(ioa_cfg, dump);
3372 kref_put(&dump->kref, ipr_release_dump);
3373
3374 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3375 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3376 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3377 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3378 return;
3379 }
3380
b0e17a9b
BK
3381 if (ioa_cfg->scsi_unblock) {
3382 ioa_cfg->scsi_unblock = 0;
3383 ioa_cfg->scsi_blocked = 0;
3384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385 scsi_unblock_requests(ioa_cfg->host);
3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387 if (ioa_cfg->scsi_blocked)
3388 scsi_block_requests(ioa_cfg->host);
3389 }
3390
b195d5e2
BK
3391 if (!ioa_cfg->scan_enabled) {
3392 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393 return;
3394 }
3395
1da177e4
LT
3396restart:
3397 do {
3398 did_work = 0;
f688f96d 3399 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4
LT
3400 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3401 return;
3402 }
3403
3404 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3405 if (res->del_from_ml && res->sdev) {
3406 did_work = 1;
3407 sdev = res->sdev;
3408 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3409 if (!res->add_to_ml)
3410 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3411 else
3412 res->del_from_ml = 0;
1da177e4
LT
3413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3414 scsi_remove_device(sdev);
3415 scsi_device_put(sdev);
3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417 }
3418 break;
3419 }
3420 }
203fa3fe 3421 } while (did_work);
1da177e4
LT
3422
3423 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3424 if (res->add_to_ml) {
3e7ebdfa
WB
3425 bus = res->bus;
3426 target = res->target;
3427 lun = res->lun;
1121b794 3428 res->add_to_ml = 0;
1da177e4
LT
3429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3430 scsi_add_device(ioa_cfg->host, bus, target, lun);
3431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3432 goto restart;
3433 }
3434 }
3435
f688f96d 3436 ioa_cfg->scan_done = 1;
1da177e4 3437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3438 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3439 LEAVE;
3440}
3441
3442#ifdef CONFIG_SCSI_IPR_TRACE
3443/**
3444 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3445 * @filp: open sysfs file
1da177e4 3446 * @kobj: kobject struct
91a69029 3447 * @bin_attr: bin_attribute struct
1da177e4
LT
3448 * @buf: buffer
3449 * @off: offset
3450 * @count: buffer size
3451 *
3452 * Return value:
3453 * number of bytes printed to buffer
3454 **/
2c3c8bea 3455static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3456 struct bin_attribute *bin_attr,
3457 char *buf, loff_t off, size_t count)
1da177e4 3458{
ee959b00
TJ
3459 struct device *dev = container_of(kobj, struct device, kobj);
3460 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3462 unsigned long lock_flags = 0;
d777aaf3 3463 ssize_t ret;
1da177e4
LT
3464
3465 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3466 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3467 IPR_TRACE_SIZE);
1da177e4 3468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3469
3470 return ret;
1da177e4
LT
3471}
3472
3473static struct bin_attribute ipr_trace_attr = {
3474 .attr = {
3475 .name = "trace",
3476 .mode = S_IRUGO,
3477 },
3478 .size = 0,
3479 .read = ipr_read_trace,
3480};
3481#endif
3482
3483/**
3484 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3485 * @dev: class device struct
3486 * @buf: buffer
1da177e4
LT
3487 *
3488 * Return value:
3489 * number of bytes printed to buffer
3490 **/
ee959b00
TJ
3491static ssize_t ipr_show_fw_version(struct device *dev,
3492 struct device_attribute *attr, char *buf)
1da177e4 3493{
ee959b00 3494 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3495 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3496 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3497 unsigned long lock_flags = 0;
3498 int len;
3499
3500 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3501 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3502 ucode_vpd->major_release, ucode_vpd->card_type,
3503 ucode_vpd->minor_release[0],
3504 ucode_vpd->minor_release[1]);
3505 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3506 return len;
3507}
3508
ee959b00 3509static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3510 .attr = {
3511 .name = "fw_version",
3512 .mode = S_IRUGO,
3513 },
3514 .show = ipr_show_fw_version,
3515};
3516
3517/**
3518 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3519 * @dev: class device struct
3520 * @buf: buffer
1da177e4
LT
3521 *
3522 * Return value:
3523 * number of bytes printed to buffer
3524 **/
ee959b00
TJ
3525static ssize_t ipr_show_log_level(struct device *dev,
3526 struct device_attribute *attr, char *buf)
1da177e4 3527{
ee959b00 3528 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3529 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3530 unsigned long lock_flags = 0;
3531 int len;
3532
3533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3534 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3536 return len;
3537}
3538
3539/**
3540 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3541 * @dev: class device struct
3542 * @buf: buffer
1da177e4
LT
3543 *
3544 * Return value:
3545 * number of bytes printed to buffer
3546 **/
ee959b00 3547static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3548 struct device_attribute *attr,
1da177e4
LT
3549 const char *buf, size_t count)
3550{
ee959b00 3551 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3552 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3553 unsigned long lock_flags = 0;
3554
3555 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3556 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3557 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3558 return strlen(buf);
3559}
3560
ee959b00 3561static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3562 .attr = {
3563 .name = "log_level",
3564 .mode = S_IRUGO | S_IWUSR,
3565 },
3566 .show = ipr_show_log_level,
3567 .store = ipr_store_log_level
3568};
3569
3570/**
3571 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3572 * @dev: device struct
3573 * @buf: buffer
3574 * @count: buffer size
1da177e4
LT
3575 *
3576 * This function will reset the adapter and wait a reasonable
3577 * amount of time for any errors that the adapter might log.
3578 *
3579 * Return value:
3580 * count on success / other on failure
3581 **/
ee959b00
TJ
3582static ssize_t ipr_store_diagnostics(struct device *dev,
3583 struct device_attribute *attr,
1da177e4
LT
3584 const char *buf, size_t count)
3585{
ee959b00 3586 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3587 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3588 unsigned long lock_flags = 0;
3589 int rc = count;
3590
3591 if (!capable(CAP_SYS_ADMIN))
3592 return -EACCES;
3593
1da177e4 3594 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3595 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3596 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3597 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3598 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3599 }
3600
1da177e4
LT
3601 ioa_cfg->errors_logged = 0;
3602 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3603
3604 if (ioa_cfg->in_reset_reload) {
3605 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3606 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3607
3608 /* Wait for a second for any errors to be logged */
3609 msleep(1000);
3610 } else {
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 return -EIO;
3613 }
3614
3615 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3616 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3617 rc = -EIO;
3618 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3619
3620 return rc;
3621}
3622
ee959b00 3623static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3624 .attr = {
3625 .name = "run_diagnostics",
3626 .mode = S_IWUSR,
3627 },
3628 .store = ipr_store_diagnostics
3629};
3630
f37eb54b
BK
3631/**
3632 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3633 * @class_dev: device struct
3634 * @buf: buffer
f37eb54b
BK
3635 *
3636 * Return value:
3637 * number of bytes printed to buffer
3638 **/
ee959b00
TJ
3639static ssize_t ipr_show_adapter_state(struct device *dev,
3640 struct device_attribute *attr, char *buf)
f37eb54b 3641{
ee959b00 3642 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3643 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3644 unsigned long lock_flags = 0;
3645 int len;
3646
3647 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3648 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b
BK
3649 len = snprintf(buf, PAGE_SIZE, "offline\n");
3650 else
3651 len = snprintf(buf, PAGE_SIZE, "online\n");
3652 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3653 return len;
3654}
3655
3656/**
3657 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3658 * @dev: device struct
3659 * @buf: buffer
3660 * @count: buffer size
f37eb54b
BK
3661 *
3662 * This function will change the adapter's state.
3663 *
3664 * Return value:
3665 * count on success / other on failure
3666 **/
ee959b00
TJ
3667static ssize_t ipr_store_adapter_state(struct device *dev,
3668 struct device_attribute *attr,
f37eb54b
BK
3669 const char *buf, size_t count)
3670{
ee959b00 3671 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3672 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3673 unsigned long lock_flags;
56d6aa33 3674 int result = count, i;
f37eb54b
BK
3675
3676 if (!capable(CAP_SYS_ADMIN))
3677 return -EACCES;
3678
3679 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3680 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3681 !strncmp(buf, "online", 6)) {
3682 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3683 spin_lock(&ioa_cfg->hrrq[i]._lock);
3684 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3685 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3686 }
3687 wmb();
f37eb54b
BK
3688 ioa_cfg->reset_retries = 0;
3689 ioa_cfg->in_ioa_bringdown = 0;
3690 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3691 }
3692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3693 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3694
3695 return result;
3696}
3697
ee959b00 3698static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3699 .attr = {
49dd0961 3700 .name = "online_state",
f37eb54b
BK
3701 .mode = S_IRUGO | S_IWUSR,
3702 },
3703 .show = ipr_show_adapter_state,
3704 .store = ipr_store_adapter_state
3705};
3706
1da177e4
LT
3707/**
3708 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3709 * @dev: device struct
3710 * @buf: buffer
3711 * @count: buffer size
1da177e4
LT
3712 *
3713 * This function will reset the adapter.
3714 *
3715 * Return value:
3716 * count on success / other on failure
3717 **/
ee959b00
TJ
3718static ssize_t ipr_store_reset_adapter(struct device *dev,
3719 struct device_attribute *attr,
1da177e4
LT
3720 const char *buf, size_t count)
3721{
ee959b00 3722 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3723 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3724 unsigned long lock_flags;
3725 int result = count;
3726
3727 if (!capable(CAP_SYS_ADMIN))
3728 return -EACCES;
3729
3730 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3731 if (!ioa_cfg->in_reset_reload)
3732 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3734 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3735
3736 return result;
3737}
3738
ee959b00 3739static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3740 .attr = {
3741 .name = "reset_host",
3742 .mode = S_IWUSR,
3743 },
3744 .store = ipr_store_reset_adapter
3745};
3746
511cbce2 3747static int ipr_iopoll(struct irq_poll *iop, int budget);
b53d124a 3748 /**
3749 * ipr_show_iopoll_weight - Show ipr polling mode
3750 * @dev: class device struct
3751 * @buf: buffer
3752 *
3753 * Return value:
3754 * number of bytes printed to buffer
3755 **/
3756static ssize_t ipr_show_iopoll_weight(struct device *dev,
3757 struct device_attribute *attr, char *buf)
3758{
3759 struct Scsi_Host *shost = class_to_shost(dev);
3760 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3761 unsigned long lock_flags = 0;
3762 int len;
3763
3764 spin_lock_irqsave(shost->host_lock, lock_flags);
3765 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3766 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3767
3768 return len;
3769}
3770
3771/**
3772 * ipr_store_iopoll_weight - Change the adapter's polling mode
3773 * @dev: class device struct
3774 * @buf: buffer
3775 *
3776 * Return value:
3777 * number of bytes printed to buffer
3778 **/
3779static ssize_t ipr_store_iopoll_weight(struct device *dev,
3780 struct device_attribute *attr,
3781 const char *buf, size_t count)
3782{
3783 struct Scsi_Host *shost = class_to_shost(dev);
3784 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3785 unsigned long user_iopoll_weight;
3786 unsigned long lock_flags = 0;
3787 int i;
3788
3789 if (!ioa_cfg->sis64) {
511cbce2 3790 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
b53d124a 3791 return -EINVAL;
3792 }
3793 if (kstrtoul(buf, 10, &user_iopoll_weight))
3794 return -EINVAL;
3795
3796 if (user_iopoll_weight > 256) {
511cbce2 3797 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
b53d124a 3798 return -EINVAL;
3799 }
3800
3801 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
511cbce2 3802 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
b53d124a 3803 return strlen(buf);
3804 }
3805
89f8b33c 3806 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3807 for (i = 1; i < ioa_cfg->hrrq_num; i++)
511cbce2 3808 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
b53d124a 3809 }
3810
3811 spin_lock_irqsave(shost->host_lock, lock_flags);
3812 ioa_cfg->iopoll_weight = user_iopoll_weight;
89f8b33c 3813 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3814 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
511cbce2 3815 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
b53d124a 3816 ioa_cfg->iopoll_weight, ipr_iopoll);
b53d124a 3817 }
3818 }
3819 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3820
3821 return strlen(buf);
3822}
3823
3824static struct device_attribute ipr_iopoll_weight_attr = {
3825 .attr = {
3826 .name = "iopoll_weight",
3827 .mode = S_IRUGO | S_IWUSR,
3828 },
3829 .show = ipr_show_iopoll_weight,
3830 .store = ipr_store_iopoll_weight
3831};
3832
1da177e4
LT
3833/**
3834 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3835 * @buf_len: buffer length
3836 *
3837 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3838 * list to use for microcode download
3839 *
3840 * Return value:
3841 * pointer to sglist / NULL on failure
3842 **/
3843static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3844{
f95dc1bb 3845 int sg_size, order;
1da177e4 3846 struct ipr_sglist *sglist;
1da177e4
LT
3847
3848 /* Get the minimum size per scatter/gather element */
3849 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3850
3851 /* Get the actual size per element */
3852 order = get_order(sg_size);
3853
1da177e4 3854 /* Allocate a scatter/gather list for the DMA */
f95dc1bb 3855 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
1da177e4
LT
3856 if (sglist == NULL) {
3857 ipr_trace;
3858 return NULL;
3859 }
1da177e4 3860 sglist->order = order;
f95dc1bb
BVA
3861 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3862 &sglist->num_sg);
3863 if (!sglist->scatterlist) {
3864 kfree(sglist);
3865 return NULL;
1da177e4
LT
3866 }
3867
3868 return sglist;
3869}
3870
3871/**
3872 * ipr_free_ucode_buffer - Frees a microcode download buffer
3873 * @p_dnld: scatter/gather list pointer
3874 *
3875 * Free a DMA'able ucode download buffer previously allocated with
3876 * ipr_alloc_ucode_buffer
3877 *
3878 * Return value:
3879 * nothing
3880 **/
3881static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3882{
f95dc1bb 3883 sgl_free_order(sglist->scatterlist, sglist->order);
1da177e4
LT
3884 kfree(sglist);
3885}
3886
3887/**
3888 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3889 * @sglist: scatter/gather list pointer
3890 * @buffer: buffer pointer
3891 * @len: buffer length
3892 *
3893 * Copy a microcode image from a user buffer into a buffer allocated by
3894 * ipr_alloc_ucode_buffer
3895 *
3896 * Return value:
3897 * 0 on success / other on failure
3898 **/
3899static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3900 u8 *buffer, u32 len)
3901{
3902 int bsize_elem, i, result = 0;
3903 struct scatterlist *scatterlist;
3904 void *kaddr;
3905
3906 /* Determine the actual number of bytes per element */
3907 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3908
3909 scatterlist = sglist->scatterlist;
3910
3911 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3912 struct page *page = sg_page(&scatterlist[i]);
3913
3914 kaddr = kmap(page);
1da177e4 3915 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3916 kunmap(page);
1da177e4
LT
3917
3918 scatterlist[i].length = bsize_elem;
3919
3920 if (result != 0) {
3921 ipr_trace;
3922 return result;
3923 }
3924 }
3925
3926 if (len % bsize_elem) {
45711f1a
JA
3927 struct page *page = sg_page(&scatterlist[i]);
3928
3929 kaddr = kmap(page);
1da177e4 3930 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3931 kunmap(page);
1da177e4
LT
3932
3933 scatterlist[i].length = len % bsize_elem;
3934 }
3935
3936 sglist->buffer_len = len;
3937 return result;
3938}
3939
a32c055f
WB
3940/**
3941 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3942 * @ipr_cmd: ipr command struct
3943 * @sglist: scatter/gather list
3944 *
3945 * Builds a microcode download IOA data list (IOADL).
3946 *
3947 **/
3948static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3949 struct ipr_sglist *sglist)
3950{
3951 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3952 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3953 struct scatterlist *scatterlist = sglist->scatterlist;
3954 int i;
3955
3956 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3957 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3958 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3959
3960 ioarcb->ioadl_len =
3961 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3962 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3963 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3964 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3965 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3966 }
3967
3968 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3969}
3970
1da177e4 3971/**
12baa420 3972 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3973 * @ipr_cmd: ipr command struct
3974 * @sglist: scatter/gather list
1da177e4 3975 *
12baa420 3976 * Builds a microcode download IOA data list (IOADL).
1da177e4 3977 *
1da177e4 3978 **/
12baa420
BK
3979static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3980 struct ipr_sglist *sglist)
1da177e4 3981{
1da177e4 3982 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3983 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3984 struct scatterlist *scatterlist = sglist->scatterlist;
3985 int i;
3986
12baa420 3987 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3988 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3989 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3990
3991 ioarcb->ioadl_len =
1da177e4
LT
3992 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3993
3994 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3995 ioadl[i].flags_and_data_len =
3996 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3997 ioadl[i].address =
3998 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3999 }
4000
12baa420
BK
4001 ioadl[i-1].flags_and_data_len |=
4002 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4003}
4004
4005/**
4006 * ipr_update_ioa_ucode - Update IOA's microcode
4007 * @ioa_cfg: ioa config struct
4008 * @sglist: scatter/gather list
4009 *
4010 * Initiate an adapter reset to update the IOA's microcode
4011 *
4012 * Return value:
4013 * 0 on success / -EIO on failure
4014 **/
4015static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4016 struct ipr_sglist *sglist)
4017{
4018 unsigned long lock_flags;
4019
4020 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 4021 while (ioa_cfg->in_reset_reload) {
970ea294
BK
4022 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4023 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4024 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4025 }
12baa420
BK
4026
4027 if (ioa_cfg->ucode_sglist) {
4028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4029 dev_err(&ioa_cfg->pdev->dev,
4030 "Microcode download already in progress\n");
4031 return -EIO;
1da177e4 4032 }
12baa420 4033
d73341bf
AB
4034 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4035 sglist->scatterlist, sglist->num_sg,
4036 DMA_TO_DEVICE);
12baa420
BK
4037
4038 if (!sglist->num_dma_sg) {
4039 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4040 dev_err(&ioa_cfg->pdev->dev,
4041 "Failed to map microcode download buffer!\n");
1da177e4
LT
4042 return -EIO;
4043 }
4044
12baa420
BK
4045 ioa_cfg->ucode_sglist = sglist;
4046 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4048 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4049
4050 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4051 ioa_cfg->ucode_sglist = NULL;
4052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
4053 return 0;
4054}
4055
4056/**
4057 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
4058 * @class_dev: device struct
4059 * @buf: buffer
4060 * @count: buffer size
1da177e4
LT
4061 *
4062 * This function will update the firmware on the adapter.
4063 *
4064 * Return value:
4065 * count on success / other on failure
4066 **/
ee959b00
TJ
4067static ssize_t ipr_store_update_fw(struct device *dev,
4068 struct device_attribute *attr,
4069 const char *buf, size_t count)
1da177e4 4070{
ee959b00 4071 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
4072 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4073 struct ipr_ucode_image_header *image_hdr;
4074 const struct firmware *fw_entry;
4075 struct ipr_sglist *sglist;
1da177e4
LT
4076 char fname[100];
4077 char *src;
21b81716 4078 char *endline;
d63c7dd5 4079 int result, dnld_size;
1da177e4
LT
4080
4081 if (!capable(CAP_SYS_ADMIN))
4082 return -EACCES;
4083
d63c7dd5 4084 snprintf(fname, sizeof(fname), "%s", buf);
1da177e4 4085
21b81716
GKB
4086 endline = strchr(fname, '\n');
4087 if (endline)
4088 *endline = '\0';
4089
203fa3fe 4090 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
4091 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4092 return -EIO;
4093 }
4094
4095 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4096
1da177e4
LT
4097 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4098 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4099 sglist = ipr_alloc_ucode_buffer(dnld_size);
4100
4101 if (!sglist) {
4102 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4103 release_firmware(fw_entry);
4104 return -ENOMEM;
4105 }
4106
4107 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4108
4109 if (result) {
4110 dev_err(&ioa_cfg->pdev->dev,
4111 "Microcode buffer copy to DMA buffer failed\n");
12baa420 4112 goto out;
1da177e4
LT
4113 }
4114
14ed9cc7
WB
4115 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4116
12baa420 4117 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 4118
12baa420
BK
4119 if (!result)
4120 result = count;
4121out:
1da177e4
LT
4122 ipr_free_ucode_buffer(sglist);
4123 release_firmware(fw_entry);
12baa420 4124 return result;
1da177e4
LT
4125}
4126
ee959b00 4127static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
4128 .attr = {
4129 .name = "update_fw",
4130 .mode = S_IWUSR,
4131 },
4132 .store = ipr_store_update_fw
4133};
4134
75576bb9
WB
4135/**
4136 * ipr_show_fw_type - Show the adapter's firmware type.
4137 * @dev: class device struct
4138 * @buf: buffer
4139 *
4140 * Return value:
4141 * number of bytes printed to buffer
4142 **/
4143static ssize_t ipr_show_fw_type(struct device *dev,
4144 struct device_attribute *attr, char *buf)
4145{
4146 struct Scsi_Host *shost = class_to_shost(dev);
4147 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4148 unsigned long lock_flags = 0;
4149 int len;
4150
4151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4152 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4154 return len;
4155}
4156
4157static struct device_attribute ipr_ioa_fw_type_attr = {
4158 .attr = {
4159 .name = "fw_type",
4160 .mode = S_IRUGO,
4161 },
4162 .show = ipr_show_fw_type
4163};
4164
afc3f83c
BK
4165static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4166 struct bin_attribute *bin_attr, char *buf,
4167 loff_t off, size_t count)
4168{
4169 struct device *cdev = container_of(kobj, struct device, kobj);
4170 struct Scsi_Host *shost = class_to_shost(cdev);
4171 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4172 struct ipr_hostrcb *hostrcb;
4173 unsigned long lock_flags = 0;
4174 int ret;
4175
4176 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4177 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4178 struct ipr_hostrcb, queue);
4179 if (!hostrcb) {
4180 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4181 return 0;
4182 }
4183 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4184 sizeof(hostrcb->hcam));
4185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4186 return ret;
4187}
4188
4189static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4190 struct bin_attribute *bin_attr, char *buf,
4191 loff_t off, size_t count)
4192{
4193 struct device *cdev = container_of(kobj, struct device, kobj);
4194 struct Scsi_Host *shost = class_to_shost(cdev);
4195 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4196 struct ipr_hostrcb *hostrcb;
4197 unsigned long lock_flags = 0;
4198
4199 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4200 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4201 struct ipr_hostrcb, queue);
4202 if (!hostrcb) {
4203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4204 return count;
4205 }
4206
4207 /* Reclaim hostrcb before exit */
4208 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4209 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4210 return count;
4211}
4212
4213static struct bin_attribute ipr_ioa_async_err_log = {
4214 .attr = {
4215 .name = "async_err_log",
4216 .mode = S_IRUGO | S_IWUSR,
4217 },
4218 .size = 0,
4219 .read = ipr_read_async_err_log,
4220 .write = ipr_next_async_err_log
4221};
4222
ee959b00 4223static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
4224 &ipr_fw_version_attr,
4225 &ipr_log_level_attr,
4226 &ipr_diagnostics_attr,
f37eb54b 4227 &ipr_ioa_state_attr,
1da177e4
LT
4228 &ipr_ioa_reset_attr,
4229 &ipr_update_fw_attr,
75576bb9 4230 &ipr_ioa_fw_type_attr,
b53d124a 4231 &ipr_iopoll_weight_attr,
1da177e4
LT
4232 NULL,
4233};
4234
4235#ifdef CONFIG_SCSI_IPR_DUMP
4236/**
4237 * ipr_read_dump - Dump the adapter
2c3c8bea 4238 * @filp: open sysfs file
1da177e4 4239 * @kobj: kobject struct
91a69029 4240 * @bin_attr: bin_attribute struct
1da177e4
LT
4241 * @buf: buffer
4242 * @off: offset
4243 * @count: buffer size
4244 *
4245 * Return value:
4246 * number of bytes printed to buffer
4247 **/
2c3c8bea 4248static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4249 struct bin_attribute *bin_attr,
4250 char *buf, loff_t off, size_t count)
1da177e4 4251{
ee959b00 4252 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4253 struct Scsi_Host *shost = class_to_shost(cdev);
4254 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4255 struct ipr_dump *dump;
4256 unsigned long lock_flags = 0;
4257 char *src;
4d4dd706 4258 int len, sdt_end;
1da177e4
LT
4259 size_t rc = count;
4260
4261 if (!capable(CAP_SYS_ADMIN))
4262 return -EACCES;
4263
4264 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4265 dump = ioa_cfg->dump;
4266
4267 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4269 return 0;
4270 }
4271 kref_get(&dump->kref);
4272 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4273
4274 if (off > dump->driver_dump.hdr.len) {
4275 kref_put(&dump->kref, ipr_release_dump);
4276 return 0;
4277 }
4278
4279 if (off + count > dump->driver_dump.hdr.len) {
4280 count = dump->driver_dump.hdr.len - off;
4281 rc = count;
4282 }
4283
4284 if (count && off < sizeof(dump->driver_dump)) {
4285 if (off + count > sizeof(dump->driver_dump))
4286 len = sizeof(dump->driver_dump) - off;
4287 else
4288 len = count;
4289 src = (u8 *)&dump->driver_dump + off;
4290 memcpy(buf, src, len);
4291 buf += len;
4292 off += len;
4293 count -= len;
4294 }
4295
4296 off -= sizeof(dump->driver_dump);
4297
4d4dd706
KSS
4298 if (ioa_cfg->sis64)
4299 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4300 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4301 sizeof(struct ipr_sdt_entry));
4302 else
4303 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4304 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4305
4306 if (count && off < sdt_end) {
4307 if (off + count > sdt_end)
4308 len = sdt_end - off;
1da177e4
LT
4309 else
4310 len = count;
4311 src = (u8 *)&dump->ioa_dump + off;
4312 memcpy(buf, src, len);
4313 buf += len;
4314 off += len;
4315 count -= len;
4316 }
4317
4d4dd706 4318 off -= sdt_end;
1da177e4
LT
4319
4320 while (count) {
4321 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4322 len = PAGE_ALIGN(off) - off;
4323 else
4324 len = count;
4325 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4326 src += off & ~PAGE_MASK;
4327 memcpy(buf, src, len);
4328 buf += len;
4329 off += len;
4330 count -= len;
4331 }
4332
4333 kref_put(&dump->kref, ipr_release_dump);
4334 return rc;
4335}
4336
4337/**
4338 * ipr_alloc_dump - Prepare for adapter dump
4339 * @ioa_cfg: ioa config struct
4340 *
4341 * Return value:
4342 * 0 on success / other on failure
4343 **/
4344static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4345{
4346 struct ipr_dump *dump;
4d4dd706 4347 __be32 **ioa_data;
1da177e4
LT
4348 unsigned long lock_flags = 0;
4349
0bc42e35 4350 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4351
4352 if (!dump) {
4353 ipr_err("Dump memory allocation failed\n");
4354 return -ENOMEM;
4355 }
4356
4d4dd706 4357 if (ioa_cfg->sis64)
42bc47b3
KC
4358 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4359 sizeof(__be32 *)));
4d4dd706 4360 else
42bc47b3
KC
4361 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4362 sizeof(__be32 *)));
4d4dd706
KSS
4363
4364 if (!ioa_data) {
4365 ipr_err("Dump memory allocation failed\n");
4366 kfree(dump);
4367 return -ENOMEM;
4368 }
4369
4370 dump->ioa_dump.ioa_data = ioa_data;
4371
1da177e4
LT
4372 kref_init(&dump->kref);
4373 dump->ioa_cfg = ioa_cfg;
4374
4375 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4376
4377 if (INACTIVE != ioa_cfg->sdt_state) {
4378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4379 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4380 kfree(dump);
4381 return 0;
4382 }
4383
4384 ioa_cfg->dump = dump;
4385 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4386 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4387 ioa_cfg->dump_taken = 1;
4388 schedule_work(&ioa_cfg->work_q);
4389 }
4390 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4391
1da177e4
LT
4392 return 0;
4393}
4394
4395/**
4396 * ipr_free_dump - Free adapter dump memory
4397 * @ioa_cfg: ioa config struct
4398 *
4399 * Return value:
4400 * 0 on success / other on failure
4401 **/
4402static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4403{
4404 struct ipr_dump *dump;
4405 unsigned long lock_flags = 0;
4406
4407 ENTER;
4408
4409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4410 dump = ioa_cfg->dump;
4411 if (!dump) {
4412 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4413 return 0;
4414 }
4415
4416 ioa_cfg->dump = NULL;
4417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4418
4419 kref_put(&dump->kref, ipr_release_dump);
4420
4421 LEAVE;
4422 return 0;
4423}
4424
4425/**
4426 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4427 * @filp: open sysfs file
1da177e4 4428 * @kobj: kobject struct
91a69029 4429 * @bin_attr: bin_attribute struct
1da177e4
LT
4430 * @buf: buffer
4431 * @off: offset
4432 * @count: buffer size
4433 *
4434 * Return value:
4435 * number of bytes printed to buffer
4436 **/
2c3c8bea 4437static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4438 struct bin_attribute *bin_attr,
4439 char *buf, loff_t off, size_t count)
1da177e4 4440{
ee959b00 4441 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4442 struct Scsi_Host *shost = class_to_shost(cdev);
4443 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4444 int rc;
4445
4446 if (!capable(CAP_SYS_ADMIN))
4447 return -EACCES;
4448
4449 if (buf[0] == '1')
4450 rc = ipr_alloc_dump(ioa_cfg);
4451 else if (buf[0] == '0')
4452 rc = ipr_free_dump(ioa_cfg);
4453 else
4454 return -EINVAL;
4455
4456 if (rc)
4457 return rc;
4458 else
4459 return count;
4460}
4461
4462static struct bin_attribute ipr_dump_attr = {
4463 .attr = {
4464 .name = "dump",
4465 .mode = S_IRUSR | S_IWUSR,
4466 },
4467 .size = 0,
4468 .read = ipr_read_dump,
4469 .write = ipr_write_dump
4470};
4471#else
4472static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4473#endif
4474
4475/**
4476 * ipr_change_queue_depth - Change the device's queue depth
4477 * @sdev: scsi device struct
4478 * @qdepth: depth to set
e881a172 4479 * @reason: calling context
1da177e4
LT
4480 *
4481 * Return value:
4482 * actual depth set
4483 **/
db5ed4df 4484static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
1da177e4 4485{
35a39691
BK
4486 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4487 struct ipr_resource_entry *res;
4488 unsigned long lock_flags = 0;
4489
4490 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4491 res = (struct ipr_resource_entry *)sdev->hostdata;
4492
4493 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4494 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4495 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4496
db5ed4df 4497 scsi_change_queue_depth(sdev, qdepth);
1da177e4
LT
4498 return sdev->queue_depth;
4499}
4500
1da177e4
LT
4501/**
4502 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4503 * @dev: device struct
46d74563 4504 * @attr: device attribute structure
1da177e4
LT
4505 * @buf: buffer
4506 *
4507 * Return value:
4508 * number of bytes printed to buffer
4509 **/
10523b3b 4510static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4511{
4512 struct scsi_device *sdev = to_scsi_device(dev);
4513 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4514 struct ipr_resource_entry *res;
4515 unsigned long lock_flags = 0;
4516 ssize_t len = -ENXIO;
4517
4518 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4519 res = (struct ipr_resource_entry *)sdev->hostdata;
4520 if (res)
3e7ebdfa 4521 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4522 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4523 return len;
4524}
4525
4526static struct device_attribute ipr_adapter_handle_attr = {
4527 .attr = {
4528 .name = "adapter_handle",
4529 .mode = S_IRUSR,
4530 },
4531 .show = ipr_show_adapter_handle
4532};
4533
3e7ebdfa 4534/**
5adcbeb3
WB
4535 * ipr_show_resource_path - Show the resource path or the resource address for
4536 * this device.
3e7ebdfa 4537 * @dev: device struct
46d74563 4538 * @attr: device attribute structure
3e7ebdfa
WB
4539 * @buf: buffer
4540 *
4541 * Return value:
4542 * number of bytes printed to buffer
4543 **/
4544static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4545{
4546 struct scsi_device *sdev = to_scsi_device(dev);
4547 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4548 struct ipr_resource_entry *res;
4549 unsigned long lock_flags = 0;
4550 ssize_t len = -ENXIO;
4551 char buffer[IPR_MAX_RES_PATH_LENGTH];
4552
4553 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4554 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4555 if (res && ioa_cfg->sis64)
3e7ebdfa 4556 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4557 __ipr_format_res_path(res->res_path, buffer,
4558 sizeof(buffer)));
5adcbeb3
WB
4559 else if (res)
4560 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4561 res->bus, res->target, res->lun);
4562
3e7ebdfa
WB
4563 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4564 return len;
4565}
4566
4567static struct device_attribute ipr_resource_path_attr = {
4568 .attr = {
4569 .name = "resource_path",
75576bb9 4570 .mode = S_IRUGO,
3e7ebdfa
WB
4571 },
4572 .show = ipr_show_resource_path
4573};
4574
46d74563
WB
4575/**
4576 * ipr_show_device_id - Show the device_id for this device.
4577 * @dev: device struct
4578 * @attr: device attribute structure
4579 * @buf: buffer
4580 *
4581 * Return value:
4582 * number of bytes printed to buffer
4583 **/
4584static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4585{
4586 struct scsi_device *sdev = to_scsi_device(dev);
4587 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4588 struct ipr_resource_entry *res;
4589 unsigned long lock_flags = 0;
4590 ssize_t len = -ENXIO;
4591
4592 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4593 res = (struct ipr_resource_entry *)sdev->hostdata;
4594 if (res && ioa_cfg->sis64)
bb8647e8 4595 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
46d74563
WB
4596 else if (res)
4597 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4598
4599 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4600 return len;
4601}
4602
4603static struct device_attribute ipr_device_id_attr = {
4604 .attr = {
4605 .name = "device_id",
4606 .mode = S_IRUGO,
4607 },
4608 .show = ipr_show_device_id
4609};
4610
75576bb9
WB
4611/**
4612 * ipr_show_resource_type - Show the resource type for this device.
4613 * @dev: device struct
46d74563 4614 * @attr: device attribute structure
75576bb9
WB
4615 * @buf: buffer
4616 *
4617 * Return value:
4618 * number of bytes printed to buffer
4619 **/
4620static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4621{
4622 struct scsi_device *sdev = to_scsi_device(dev);
4623 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4624 struct ipr_resource_entry *res;
4625 unsigned long lock_flags = 0;
4626 ssize_t len = -ENXIO;
4627
4628 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4629 res = (struct ipr_resource_entry *)sdev->hostdata;
4630
4631 if (res)
4632 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4633
4634 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4635 return len;
4636}
4637
4638static struct device_attribute ipr_resource_type_attr = {
4639 .attr = {
4640 .name = "resource_type",
4641 .mode = S_IRUGO,
4642 },
4643 .show = ipr_show_resource_type
4644};
4645
f8ee25d7
WX
4646/**
4647 * ipr_show_raw_mode - Show the adapter's raw mode
4648 * @dev: class device struct
4649 * @buf: buffer
4650 *
4651 * Return value:
4652 * number of bytes printed to buffer
4653 **/
4654static ssize_t ipr_show_raw_mode(struct device *dev,
4655 struct device_attribute *attr, char *buf)
4656{
4657 struct scsi_device *sdev = to_scsi_device(dev);
4658 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4659 struct ipr_resource_entry *res;
4660 unsigned long lock_flags = 0;
4661 ssize_t len;
4662
4663 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4664 res = (struct ipr_resource_entry *)sdev->hostdata;
4665 if (res)
4666 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4667 else
4668 len = -ENXIO;
4669 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4670 return len;
4671}
4672
4673/**
4674 * ipr_store_raw_mode - Change the adapter's raw mode
4675 * @dev: class device struct
4676 * @buf: buffer
4677 *
4678 * Return value:
4679 * number of bytes printed to buffer
4680 **/
4681static ssize_t ipr_store_raw_mode(struct device *dev,
4682 struct device_attribute *attr,
4683 const char *buf, size_t count)
4684{
4685 struct scsi_device *sdev = to_scsi_device(dev);
4686 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4687 struct ipr_resource_entry *res;
4688 unsigned long lock_flags = 0;
4689 ssize_t len;
4690
4691 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4692 res = (struct ipr_resource_entry *)sdev->hostdata;
4693 if (res) {
e35d7f27 4694 if (ipr_is_af_dasd_device(res)) {
f8ee25d7
WX
4695 res->raw_mode = simple_strtoul(buf, NULL, 10);
4696 len = strlen(buf);
4697 if (res->sdev)
4698 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4699 res->raw_mode ? "enabled" : "disabled");
4700 } else
4701 len = -EINVAL;
4702 } else
4703 len = -ENXIO;
4704 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4705 return len;
4706}
4707
4708static struct device_attribute ipr_raw_mode_attr = {
4709 .attr = {
4710 .name = "raw_mode",
4711 .mode = S_IRUGO | S_IWUSR,
4712 },
4713 .show = ipr_show_raw_mode,
4714 .store = ipr_store_raw_mode
4715};
4716
1da177e4
LT
4717static struct device_attribute *ipr_dev_attrs[] = {
4718 &ipr_adapter_handle_attr,
3e7ebdfa 4719 &ipr_resource_path_attr,
46d74563 4720 &ipr_device_id_attr,
75576bb9 4721 &ipr_resource_type_attr,
f8ee25d7 4722 &ipr_raw_mode_attr,
1da177e4
LT
4723 NULL,
4724};
4725
4726/**
4727 * ipr_biosparam - Return the HSC mapping
4728 * @sdev: scsi device struct
4729 * @block_device: block device pointer
4730 * @capacity: capacity of the device
4731 * @parm: Array containing returned HSC values.
4732 *
4733 * This function generates the HSC parms that fdisk uses.
4734 * We want to make sure we return something that places partitions
4735 * on 4k boundaries for best performance with the IOA.
4736 *
4737 * Return value:
4738 * 0 on success
4739 **/
4740static int ipr_biosparam(struct scsi_device *sdev,
4741 struct block_device *block_device,
4742 sector_t capacity, int *parm)
4743{
4744 int heads, sectors;
4745 sector_t cylinders;
4746
4747 heads = 128;
4748 sectors = 32;
4749
4750 cylinders = capacity;
4751 sector_div(cylinders, (128 * 32));
4752
4753 /* return result */
4754 parm[0] = heads;
4755 parm[1] = sectors;
4756 parm[2] = cylinders;
4757
4758 return 0;
4759}
4760
35a39691
BK
4761/**
4762 * ipr_find_starget - Find target based on bus/target.
4763 * @starget: scsi target struct
4764 *
4765 * Return value:
4766 * resource entry pointer if found / NULL if not found
4767 **/
4768static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4769{
4770 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4771 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4772 struct ipr_resource_entry *res;
4773
4774 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4775 if ((res->bus == starget->channel) &&
0ee1d714 4776 (res->target == starget->id)) {
35a39691
BK
4777 return res;
4778 }
4779 }
4780
4781 return NULL;
4782}
4783
4784static struct ata_port_info sata_port_info;
4785
4786/**
4787 * ipr_target_alloc - Prepare for commands to a SCSI target
4788 * @starget: scsi target struct
4789 *
4790 * If the device is a SATA device, this function allocates an
4791 * ATA port with libata, else it does nothing.
4792 *
4793 * Return value:
4794 * 0 on success / non-0 on failure
4795 **/
4796static int ipr_target_alloc(struct scsi_target *starget)
4797{
4798 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4799 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4800 struct ipr_sata_port *sata_port;
4801 struct ata_port *ap;
4802 struct ipr_resource_entry *res;
4803 unsigned long lock_flags;
4804
4805 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4806 res = ipr_find_starget(starget);
4807 starget->hostdata = NULL;
4808
4809 if (res && ipr_is_gata(res)) {
4810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4811 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4812 if (!sata_port)
4813 return -ENOMEM;
4814
4815 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4816 if (ap) {
4817 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4818 sata_port->ioa_cfg = ioa_cfg;
4819 sata_port->ap = ap;
4820 sata_port->res = res;
4821
4822 res->sata_port = sata_port;
4823 ap->private_data = sata_port;
4824 starget->hostdata = sata_port;
4825 } else {
4826 kfree(sata_port);
4827 return -ENOMEM;
4828 }
4829 }
4830 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4831
4832 return 0;
4833}
4834
4835/**
4836 * ipr_target_destroy - Destroy a SCSI target
4837 * @starget: scsi target struct
4838 *
4839 * If the device was a SATA device, this function frees the libata
4840 * ATA port, else it does nothing.
4841 *
4842 **/
4843static void ipr_target_destroy(struct scsi_target *starget)
4844{
4845 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4846 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4847 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4848
4849 if (ioa_cfg->sis64) {
0ee1d714
BK
4850 if (!ipr_find_starget(starget)) {
4851 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4852 clear_bit(starget->id, ioa_cfg->array_ids);
4853 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4854 clear_bit(starget->id, ioa_cfg->vset_ids);
4855 else if (starget->channel == 0)
4856 clear_bit(starget->id, ioa_cfg->target_ids);
4857 }
3e7ebdfa 4858 }
35a39691
BK
4859
4860 if (sata_port) {
4861 starget->hostdata = NULL;
4862 ata_sas_port_destroy(sata_port->ap);
4863 kfree(sata_port);
4864 }
4865}
4866
4867/**
4868 * ipr_find_sdev - Find device based on bus/target/lun.
4869 * @sdev: scsi device struct
4870 *
4871 * Return value:
4872 * resource entry pointer if found / NULL if not found
4873 **/
4874static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4875{
4876 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4877 struct ipr_resource_entry *res;
4878
4879 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4880 if ((res->bus == sdev->channel) &&
4881 (res->target == sdev->id) &&
4882 (res->lun == sdev->lun))
35a39691
BK
4883 return res;
4884 }
4885
4886 return NULL;
4887}
4888
1da177e4
LT
4889/**
4890 * ipr_slave_destroy - Unconfigure a SCSI device
4891 * @sdev: scsi device struct
4892 *
4893 * Return value:
4894 * nothing
4895 **/
4896static void ipr_slave_destroy(struct scsi_device *sdev)
4897{
4898 struct ipr_resource_entry *res;
4899 struct ipr_ioa_cfg *ioa_cfg;
4900 unsigned long lock_flags = 0;
4901
4902 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4903
4904 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4905 res = (struct ipr_resource_entry *) sdev->hostdata;
4906 if (res) {
35a39691 4907 if (res->sata_port)
3e4ec344 4908 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4909 sdev->hostdata = NULL;
4910 res->sdev = NULL;
35a39691 4911 res->sata_port = NULL;
1da177e4
LT
4912 }
4913 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4914}
4915
4916/**
4917 * ipr_slave_configure - Configure a SCSI device
4918 * @sdev: scsi device struct
4919 *
4920 * This function configures the specified scsi device.
4921 *
4922 * Return value:
4923 * 0 on success
4924 **/
4925static int ipr_slave_configure(struct scsi_device *sdev)
4926{
4927 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4928 struct ipr_resource_entry *res;
dd406ef8 4929 struct ata_port *ap = NULL;
1da177e4 4930 unsigned long lock_flags = 0;
3e7ebdfa 4931 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4932
4933 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4934 res = sdev->hostdata;
4935 if (res) {
4936 if (ipr_is_af_dasd_device(res))
4937 sdev->type = TYPE_RAID;
0726ce26 4938 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4939 sdev->scsi_level = 4;
0726ce26
BK
4940 sdev->no_uld_attach = 1;
4941 }
1da177e4 4942 if (ipr_is_vset_device(res)) {
60654e25 4943 sdev->scsi_level = SCSI_SPC_3;
723cd772 4944 sdev->no_report_opcodes = 1;
242f9dcb
JA
4945 blk_queue_rq_timeout(sdev->request_queue,
4946 IPR_VSET_RW_TIMEOUT);
086fa5ff 4947 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4948 }
dd406ef8
BK
4949 if (ipr_is_gata(res) && res->sata_port)
4950 ap = res->sata_port->ap;
4951 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4952
4953 if (ap) {
db5ed4df 4954 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8 4955 ata_sas_slave_configure(sdev, ap);
c8b09f6f
CH
4956 }
4957
3e7ebdfa
WB
4958 if (ioa_cfg->sis64)
4959 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4960 ipr_format_res_path(ioa_cfg,
4961 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4962 return 0;
1da177e4
LT
4963 }
4964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4965 return 0;
4966}
4967
35a39691
BK
4968/**
4969 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4970 * @sdev: scsi device struct
4971 *
4972 * This function initializes an ATA port so that future commands
4973 * sent through queuecommand will work.
4974 *
4975 * Return value:
4976 * 0 on success
4977 **/
4978static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4979{
4980 struct ipr_sata_port *sata_port = NULL;
4981 int rc = -ENXIO;
4982
4983 ENTER;
4984 if (sdev->sdev_target)
4985 sata_port = sdev->sdev_target->hostdata;
b2024459 4986 if (sata_port) {
35a39691 4987 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4988 if (rc == 0)
4989 rc = ata_sas_sync_probe(sata_port->ap);
4990 }
4991
35a39691
BK
4992 if (rc)
4993 ipr_slave_destroy(sdev);
4994
4995 LEAVE;
4996 return rc;
4997}
4998
1da177e4
LT
4999/**
5000 * ipr_slave_alloc - Prepare for commands to a device.
5001 * @sdev: scsi device struct
5002 *
5003 * This function saves a pointer to the resource entry
5004 * in the scsi device struct if the device exists. We
5005 * can then use this pointer in ipr_queuecommand when
5006 * handling new commands.
5007 *
5008 * Return value:
692aebfc 5009 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
5010 **/
5011static int ipr_slave_alloc(struct scsi_device *sdev)
5012{
5013 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5014 struct ipr_resource_entry *res;
5015 unsigned long lock_flags;
692aebfc 5016 int rc = -ENXIO;
1da177e4
LT
5017
5018 sdev->hostdata = NULL;
5019
5020 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5021
35a39691
BK
5022 res = ipr_find_sdev(sdev);
5023 if (res) {
5024 res->sdev = sdev;
5025 res->add_to_ml = 0;
5026 res->in_erp = 0;
5027 sdev->hostdata = res;
5028 if (!ipr_is_naca_model(res))
5029 res->needs_sync_complete = 1;
5030 rc = 0;
5031 if (ipr_is_gata(res)) {
5032 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5033 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
5034 }
5035 }
5036
5037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5038
692aebfc 5039 return rc;
1da177e4
LT
5040}
5041
6cdb0817
BK
5042/**
5043 * ipr_match_lun - Match function for specified LUN
5044 * @ipr_cmd: ipr command struct
5045 * @device: device to match (sdev)
5046 *
5047 * Returns:
5048 * 1 if command matches sdev / 0 if command does not match sdev
5049 **/
5050static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5051{
5052 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5053 return 1;
5054 return 0;
5055}
5056
439ae285
BK
5057/**
5058 * ipr_cmnd_is_free - Check if a command is free or not
5059 * @ipr_cmd ipr command struct
5060 *
5061 * Returns:
5062 * true / false
5063 **/
5064static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5065{
5066 struct ipr_cmnd *loop_cmd;
5067
5068 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5069 if (loop_cmd == ipr_cmd)
5070 return true;
5071 }
5072
5073 return false;
5074}
5075
ef97d8ae
BK
5076/**
5077 * ipr_match_res - Match function for specified resource entry
5078 * @ipr_cmd: ipr command struct
5079 * @resource: resource entry to match
5080 *
5081 * Returns:
5082 * 1 if command matches sdev / 0 if command does not match sdev
5083 **/
5084static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5085{
5086 struct ipr_resource_entry *res = resource;
5087
5088 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5089 return 1;
5090 return 0;
5091}
5092
6cdb0817
BK
5093/**
5094 * ipr_wait_for_ops - Wait for matching commands to complete
5095 * @ipr_cmd: ipr command struct
5096 * @device: device to match (sdev)
5097 * @match: match function to use
5098 *
5099 * Returns:
5100 * SUCCESS / FAILED
5101 **/
5102static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5103 int (*match)(struct ipr_cmnd *, void *))
5104{
5105 struct ipr_cmnd *ipr_cmd;
439ae285 5106 int wait, i;
6cdb0817
BK
5107 unsigned long flags;
5108 struct ipr_hrr_queue *hrrq;
5109 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5110 DECLARE_COMPLETION_ONSTACK(comp);
5111
5112 ENTER;
5113 do {
5114 wait = 0;
5115
5116 for_each_hrrq(hrrq, ioa_cfg) {
5117 spin_lock_irqsave(hrrq->lock, flags);
439ae285
BK
5118 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5119 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5120 if (!ipr_cmnd_is_free(ipr_cmd)) {
5121 if (match(ipr_cmd, device)) {
5122 ipr_cmd->eh_comp = &comp;
5123 wait++;
5124 }
6cdb0817
BK
5125 }
5126 }
5127 spin_unlock_irqrestore(hrrq->lock, flags);
5128 }
5129
5130 if (wait) {
5131 timeout = wait_for_completion_timeout(&comp, timeout);
5132
5133 if (!timeout) {
5134 wait = 0;
5135
5136 for_each_hrrq(hrrq, ioa_cfg) {
5137 spin_lock_irqsave(hrrq->lock, flags);
439ae285
BK
5138 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5139 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5140 if (!ipr_cmnd_is_free(ipr_cmd)) {
5141 if (match(ipr_cmd, device)) {
5142 ipr_cmd->eh_comp = NULL;
5143 wait++;
5144 }
6cdb0817
BK
5145 }
5146 }
5147 spin_unlock_irqrestore(hrrq->lock, flags);
5148 }
5149
5150 if (wait)
5151 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5152 LEAVE;
5153 return wait ? FAILED : SUCCESS;
5154 }
5155 }
5156 } while (wait);
5157
5158 LEAVE;
5159 return SUCCESS;
5160}
5161
70233ac5 5162static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
1da177e4
LT
5163{
5164 struct ipr_ioa_cfg *ioa_cfg;
70233ac5 5165 unsigned long lock_flags = 0;
5166 int rc = SUCCESS;
1da177e4
LT
5167
5168 ENTER;
70233ac5 5169 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5170 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 5171
96b04db9 5172 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
70233ac5 5173 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
a92fa25c
KSS
5174 dev_err(&ioa_cfg->pdev->dev,
5175 "Adapter being reset as a result of error recovery.\n");
1da177e4 5176
a92fa25c
KSS
5177 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5178 ioa_cfg->sdt_state = GET_DUMP;
5179 }
1da177e4 5180
70233ac5 5181 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5182 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5183 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
df0ae249 5184
70233ac5 5185 /* If we got hit with a host reset while we were already resetting
5186 the adapter for some reason, and the reset failed. */
5187 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5188 ipr_trace;
5189 rc = FAILED;
5190 }
df0ae249 5191
70233ac5 5192 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5193 LEAVE;
df0ae249
JG
5194 return rc;
5195}
5196
c6513096
BK
5197/**
5198 * ipr_device_reset - Reset the device
5199 * @ioa_cfg: ioa config struct
5200 * @res: resource entry struct
5201 *
5202 * This function issues a device reset to the affected device.
5203 * If the device is a SCSI device, a LUN reset will be sent
5204 * to the device first. If that does not work, a target reset
35a39691
BK
5205 * will be sent. If the device is a SATA device, a PHY reset will
5206 * be sent.
c6513096
BK
5207 *
5208 * Return value:
5209 * 0 on success / non-zero on failure
5210 **/
5211static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5212 struct ipr_resource_entry *res)
5213{
5214 struct ipr_cmnd *ipr_cmd;
5215 struct ipr_ioarcb *ioarcb;
5216 struct ipr_cmd_pkt *cmd_pkt;
35a39691 5217 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
5218 u32 ioasc;
5219
5220 ENTER;
5221 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5222 ioarcb = &ipr_cmd->ioarcb;
5223 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
5224
5225 if (ipr_cmd->ioa_cfg->sis64) {
5226 regs = &ipr_cmd->i.ata_ioadl.regs;
5227 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5228 } else
5229 regs = &ioarcb->u.add_data.u.regs;
c6513096 5230
3e7ebdfa 5231 ioarcb->res_handle = res->res_handle;
c6513096
BK
5232 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5233 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
5234 if (ipr_is_gata(res)) {
5235 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 5236 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
5237 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5238 }
c6513096
BK
5239
5240 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 5241 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 5242 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
5243 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5244 if (ipr_cmd->ioa_cfg->sis64)
5245 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5246 sizeof(struct ipr_ioasa_gata));
5247 else
5248 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5249 sizeof(struct ipr_ioasa_gata));
5250 }
c6513096
BK
5251
5252 LEAVE;
203fa3fe 5253 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
5254}
5255
35a39691
BK
5256/**
5257 * ipr_sata_reset - Reset the SATA port
cc0680a5 5258 * @link: SATA link to reset
35a39691
BK
5259 * @classes: class of the attached device
5260 *
cc0680a5 5261 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
5262 *
5263 * Return value:
5264 * 0 on success / non-zero on failure
5265 **/
cc0680a5 5266static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 5267 unsigned long deadline)
35a39691 5268{
cc0680a5 5269 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
5270 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5271 struct ipr_resource_entry *res;
5272 unsigned long lock_flags = 0;
ef97d8ae 5273 int rc = -ENXIO, ret;
35a39691
BK
5274
5275 ENTER;
5276 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 5277 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
5278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5279 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5280 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5281 }
5282
35a39691
BK
5283 res = sata_port->res;
5284 if (res) {
5285 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 5286 *classes = res->ata_class;
ef97d8ae
BK
5287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5288
5289 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5290 if (ret != SUCCESS) {
5291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5292 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5294
5295 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5296 }
5297 } else
5298 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
35a39691 5299
35a39691
BK
5300 LEAVE;
5301 return rc;
5302}
5303
1da177e4
LT
5304/**
5305 * ipr_eh_dev_reset - Reset the device
5306 * @scsi_cmd: scsi command struct
5307 *
5308 * This function issues a device reset to the affected device.
5309 * A LUN reset will be sent to the device first. If that does
5310 * not work, a target reset will be sent.
5311 *
5312 * Return value:
5313 * SUCCESS / FAILED
5314 **/
203fa3fe 5315static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5316{
5317 struct ipr_cmnd *ipr_cmd;
5318 struct ipr_ioa_cfg *ioa_cfg;
5319 struct ipr_resource_entry *res;
35a39691 5320 struct ata_port *ap;
439ae285 5321 int rc = 0, i;
05a6538a 5322 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5323
5324 ENTER;
5325 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5326 res = scsi_cmd->device->hostdata;
5327
1da177e4
LT
5328 /*
5329 * If we are currently going through reset/reload, return failed. This will force the
5330 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5331 * reset to complete
5332 */
5333 if (ioa_cfg->in_reset_reload)
5334 return FAILED;
56d6aa33 5335 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
5336 return FAILED;
5337
05a6538a 5338 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5339 spin_lock(&hrrq->_lock);
439ae285
BK
5340 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5341 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5342
05a6538a 5343 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
960e9648
BK
5344 if (!ipr_cmd->qc)
5345 continue;
439ae285
BK
5346 if (ipr_cmnd_is_free(ipr_cmd))
5347 continue;
960e9648
BK
5348
5349 ipr_cmd->done = ipr_sata_eh_done;
5350 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
05a6538a 5351 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5352 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5353 }
7402ecef 5354 }
1da177e4 5355 }
56d6aa33 5356 spin_unlock(&hrrq->_lock);
1da177e4 5357 }
1da177e4 5358 res->resetting_device = 1;
fb3ed3cb 5359 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
5360
5361 if (ipr_is_gata(res) && res->sata_port) {
5362 ap = res->sata_port->ap;
5363 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 5364 ata_std_error_handler(ap);
35a39691
BK
5365 spin_lock_irq(scsi_cmd->device->host->host_lock);
5366 } else
5367 rc = ipr_device_reset(ioa_cfg, res);
1da177e4 5368 res->resetting_device = 0;
0b1f8d44 5369 res->reset_occurred = 1;
1da177e4 5370
1da177e4 5371 LEAVE;
203fa3fe 5372 return rc ? FAILED : SUCCESS;
1da177e4
LT
5373}
5374
203fa3fe 5375static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
5376{
5377 int rc;
6cdb0817 5378 struct ipr_ioa_cfg *ioa_cfg;
ef97d8ae 5379 struct ipr_resource_entry *res;
6cdb0817
BK
5380
5381 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
ef97d8ae
BK
5382 res = cmd->device->hostdata;
5383
5384 if (!res)
5385 return FAILED;
94d0e7b8
JG
5386
5387 spin_lock_irq(cmd->device->host->host_lock);
5388 rc = __ipr_eh_dev_reset(cmd);
5389 spin_unlock_irq(cmd->device->host->host_lock);
5390
ef97d8ae
BK
5391 if (rc == SUCCESS) {
5392 if (ipr_is_gata(res) && res->sata_port)
5393 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5394 else
5395 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5396 }
6cdb0817 5397
94d0e7b8
JG
5398 return rc;
5399}
5400
1da177e4
LT
5401/**
5402 * ipr_bus_reset_done - Op done function for bus reset.
5403 * @ipr_cmd: ipr command struct
5404 *
5405 * This function is the op done function for a bus reset
5406 *
5407 * Return value:
5408 * none
5409 **/
5410static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5411{
5412 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5413 struct ipr_resource_entry *res;
5414
5415 ENTER;
3e7ebdfa
WB
5416 if (!ioa_cfg->sis64)
5417 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5418 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5419 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5420 break;
5421 }
1da177e4 5422 }
1da177e4
LT
5423
5424 /*
5425 * If abort has not completed, indicate the reset has, else call the
5426 * abort's done function to wake the sleeping eh thread
5427 */
5428 if (ipr_cmd->sibling->sibling)
5429 ipr_cmd->sibling->sibling = NULL;
5430 else
5431 ipr_cmd->sibling->done(ipr_cmd->sibling);
5432
05a6538a 5433 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5434 LEAVE;
5435}
5436
5437/**
5438 * ipr_abort_timeout - An abort task has timed out
5439 * @ipr_cmd: ipr command struct
5440 *
5441 * This function handles when an abort task times out. If this
5442 * happens we issue a bus reset since we have resources tied
5443 * up that must be freed before returning to the midlayer.
5444 *
5445 * Return value:
5446 * none
5447 **/
738c6ec5 5448static void ipr_abort_timeout(struct timer_list *t)
1da177e4 5449{
738c6ec5 5450 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
5451 struct ipr_cmnd *reset_cmd;
5452 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5453 struct ipr_cmd_pkt *cmd_pkt;
5454 unsigned long lock_flags = 0;
5455
5456 ENTER;
5457 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5458 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5459 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5460 return;
5461 }
5462
fb3ed3cb 5463 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5464 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5465 ipr_cmd->sibling = reset_cmd;
5466 reset_cmd->sibling = ipr_cmd;
5467 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5468 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5469 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5470 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5471 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5472
5473 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5475 LEAVE;
5476}
5477
5478/**
5479 * ipr_cancel_op - Cancel specified op
5480 * @scsi_cmd: scsi command struct
5481 *
5482 * This function cancels specified op.
5483 *
5484 * Return value:
5485 * SUCCESS / FAILED
5486 **/
203fa3fe 5487static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5488{
5489 struct ipr_cmnd *ipr_cmd;
5490 struct ipr_ioa_cfg *ioa_cfg;
5491 struct ipr_resource_entry *res;
5492 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5493 u32 ioasc, int_reg;
439ae285 5494 int i, op_found = 0;
05a6538a 5495 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5496
5497 ENTER;
5498 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5499 res = scsi_cmd->device->hostdata;
5500
8fa728a2
JG
5501 /* If we are currently going through reset/reload, return failed.
5502 * This will force the mid-layer to call ipr_eh_host_reset,
5503 * which will then go to sleep and wait for the reset to complete
5504 */
56d6aa33 5505 if (ioa_cfg->in_reset_reload ||
5506 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5507 return FAILED;
a92fa25c
KSS
5508 if (!res)
5509 return FAILED;
5510
5511 /*
5512 * If we are aborting a timed out op, chances are that the timeout was caused
5513 * by a still not detected EEH error. In such cases, reading a register will
5514 * trigger the EEH recovery infrastructure.
5515 */
5516 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5517
5518 if (!ipr_is_gscsi(res))
1da177e4
LT
5519 return FAILED;
5520
05a6538a 5521 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5522 spin_lock(&hrrq->_lock);
439ae285
BK
5523 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5524 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5525 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5526 op_found = 1;
5527 break;
5528 }
05a6538a 5529 }
1da177e4 5530 }
56d6aa33 5531 spin_unlock(&hrrq->_lock);
1da177e4
LT
5532 }
5533
5534 if (!op_found)
5535 return SUCCESS;
5536
5537 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5538 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5539 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5540 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5541 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5542 ipr_cmd->u.sdev = scsi_cmd->device;
5543
fb3ed3cb
BK
5544 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5545 scsi_cmd->cmnd[0]);
1da177e4 5546 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5547 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5548
5549 /*
5550 * If the abort task timed out and we sent a bus reset, we will get
5551 * one the following responses to the abort
5552 */
5553 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5554 ioasc = 0;
5555 ipr_trace;
5556 }
5557
c4ee22a3 5558 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ee0a90fa
BK
5559 if (!ipr_is_naca_model(res))
5560 res->needs_sync_complete = 1;
1da177e4
LT
5561
5562 LEAVE;
203fa3fe 5563 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5564}
5565
5566/**
5567 * ipr_eh_abort - Abort a single op
5568 * @scsi_cmd: scsi command struct
5569 *
5570 * Return value:
f688f96d
BK
5571 * 0 if scan in progress / 1 if scan is complete
5572 **/
5573static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5574{
5575 unsigned long lock_flags;
5576 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5577 int rc = 0;
5578
5579 spin_lock_irqsave(shost->host_lock, lock_flags);
5580 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5581 rc = 1;
5582 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5583 rc = 1;
5584 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5585 return rc;
5586}
5587
5588/**
5589 * ipr_eh_host_reset - Reset the host adapter
5590 * @scsi_cmd: scsi command struct
5591 *
5592 * Return value:
1da177e4
LT
5593 * SUCCESS / FAILED
5594 **/
203fa3fe 5595static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5596{
8fa728a2
JG
5597 unsigned long flags;
5598 int rc;
6cdb0817 5599 struct ipr_ioa_cfg *ioa_cfg;
1da177e4
LT
5600
5601 ENTER;
1da177e4 5602
6cdb0817
BK
5603 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5604
8fa728a2
JG
5605 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5606 rc = ipr_cancel_op(scsi_cmd);
5607 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4 5608
6cdb0817
BK
5609 if (rc == SUCCESS)
5610 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
1da177e4 5611 LEAVE;
8fa728a2 5612 return rc;
1da177e4
LT
5613}
5614
5615/**
5616 * ipr_handle_other_interrupt - Handle "other" interrupts
5617 * @ioa_cfg: ioa config struct
634651fa 5618 * @int_reg: interrupt register
1da177e4
LT
5619 *
5620 * Return value:
5621 * IRQ_NONE / IRQ_HANDLED
5622 **/
634651fa 5623static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5624 u32 int_reg)
1da177e4
LT
5625{
5626 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5627 u32 int_mask_reg;
56d6aa33 5628
7dacb64f
WB
5629 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5630 int_reg &= ~int_mask_reg;
5631
5632 /* If an interrupt on the adapter did not occur, ignore it.
5633 * Or in the case of SIS 64, check for a stage change interrupt.
5634 */
5635 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5636 if (ioa_cfg->sis64) {
5637 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5638 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5639 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5640
5641 /* clear stage change */
5642 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5643 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5644 list_del(&ioa_cfg->reset_cmd->queue);
5645 del_timer(&ioa_cfg->reset_cmd->timer);
5646 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5647 return IRQ_HANDLED;
5648 }
5649 }
5650
5651 return IRQ_NONE;
5652 }
1da177e4
LT
5653
5654 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5655 /* Mask the interrupt */
5656 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
5657 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5658
5659 list_del(&ioa_cfg->reset_cmd->queue);
5660 del_timer(&ioa_cfg->reset_cmd->timer);
5661 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5662 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5663 if (ioa_cfg->clear_isr) {
5664 if (ipr_debug && printk_ratelimit())
5665 dev_err(&ioa_cfg->pdev->dev,
5666 "Spurious interrupt detected. 0x%08X\n", int_reg);
5667 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5668 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5669 return IRQ_NONE;
5670 }
1da177e4
LT
5671 } else {
5672 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5673 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5674 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5675 dev_err(&ioa_cfg->pdev->dev,
5676 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5677 else
5678 dev_err(&ioa_cfg->pdev->dev,
5679 "Permanent IOA failure. 0x%08X\n", int_reg);
5680
5681 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5682 ioa_cfg->sdt_state = GET_DUMP;
5683
5684 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5685 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5686 }
56d6aa33 5687
1da177e4
LT
5688 return rc;
5689}
5690
3feeb89d
WB
5691/**
5692 * ipr_isr_eh - Interrupt service routine error handler
5693 * @ioa_cfg: ioa config struct
5694 * @msg: message to log
5695 *
5696 * Return value:
5697 * none
5698 **/
05a6538a 5699static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5700{
5701 ioa_cfg->errors_logged++;
05a6538a 5702 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5703
5704 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5705 ioa_cfg->sdt_state = GET_DUMP;
5706
5707 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5708}
5709
b53d124a 5710static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
05a6538a 5711 struct list_head *doneq)
5712{
5713 u32 ioasc;
5714 u16 cmd_index;
5715 struct ipr_cmnd *ipr_cmd;
5716 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5717 int num_hrrq = 0;
5718
5719 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5720 if (!hrr_queue->allow_interrupts)
05a6538a 5721 return 0;
5722
5723 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5724 hrr_queue->toggle_bit) {
5725
5726 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5727 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5728 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5729
5730 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5731 cmd_index < hrr_queue->min_cmd_id)) {
5732 ipr_isr_eh(ioa_cfg,
5733 "Invalid response handle from IOA: ",
5734 cmd_index);
5735 break;
5736 }
5737
5738 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5739 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5740
5741 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5742
5743 list_move_tail(&ipr_cmd->queue, doneq);
5744
5745 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5746 hrr_queue->hrrq_curr++;
5747 } else {
5748 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5749 hrr_queue->toggle_bit ^= 1u;
5750 }
5751 num_hrrq++;
b53d124a 5752 if (budget > 0 && num_hrrq >= budget)
5753 break;
05a6538a 5754 }
b53d124a 5755
05a6538a 5756 return num_hrrq;
5757}
b53d124a 5758
511cbce2 5759static int ipr_iopoll(struct irq_poll *iop, int budget)
b53d124a 5760{
5761 struct ipr_ioa_cfg *ioa_cfg;
5762 struct ipr_hrr_queue *hrrq;
5763 struct ipr_cmnd *ipr_cmd, *temp;
5764 unsigned long hrrq_flags;
5765 int completed_ops;
5766 LIST_HEAD(doneq);
5767
5768 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5769 ioa_cfg = hrrq->ioa_cfg;
5770
5771 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5772 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5773
5774 if (completed_ops < budget)
511cbce2 5775 irq_poll_complete(iop);
b53d124a 5776 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5777
5778 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5779 list_del(&ipr_cmd->queue);
5780 del_timer(&ipr_cmd->timer);
5781 ipr_cmd->fast_done(ipr_cmd);
5782 }
5783
5784 return completed_ops;
5785}
5786
1da177e4
LT
5787/**
5788 * ipr_isr - Interrupt service routine
5789 * @irq: irq number
5790 * @devp: pointer to ioa config struct
1da177e4
LT
5791 *
5792 * Return value:
5793 * IRQ_NONE / IRQ_HANDLED
5794 **/
7d12e780 5795static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5796{
05a6538a 5797 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5798 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5799 unsigned long hrrq_flags = 0;
7dacb64f 5800 u32 int_reg = 0;
3feeb89d 5801 int num_hrrq = 0;
7dacb64f 5802 int irq_none = 0;
172cd6e1 5803 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5804 irqreturn_t rc = IRQ_NONE;
172cd6e1 5805 LIST_HEAD(doneq);
1da177e4 5806
56d6aa33 5807 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5808 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5809 if (!hrrq->allow_interrupts) {
5810 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5811 return IRQ_NONE;
5812 }
5813
1da177e4 5814 while (1) {
b53d124a 5815 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5816 rc = IRQ_HANDLED;
1da177e4 5817
b53d124a 5818 if (!ioa_cfg->clear_isr)
5819 break;
7dd21308 5820
1da177e4 5821 /* Clear the PCI interrupt */
a5442ba4 5822 num_hrrq = 0;
3feeb89d 5823 do {
b53d124a 5824 writel(IPR_PCII_HRRQ_UPDATED,
5825 ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5826 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d 5827 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
b53d124a 5828 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
3feeb89d 5829
7dacb64f
WB
5830 } else if (rc == IRQ_NONE && irq_none == 0) {
5831 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5832 irq_none++;
a5442ba4
WB
5833 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5834 int_reg & IPR_PCII_HRRQ_UPDATED) {
b53d124a 5835 ipr_isr_eh(ioa_cfg,
5836 "Error clearing HRRQ: ", num_hrrq);
172cd6e1 5837 rc = IRQ_HANDLED;
b53d124a 5838 break;
1da177e4
LT
5839 } else
5840 break;
5841 }
5842
5843 if (unlikely(rc == IRQ_NONE))
634651fa 5844 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5845
56d6aa33 5846 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5847 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5848 list_del(&ipr_cmd->queue);
5849 del_timer(&ipr_cmd->timer);
5850 ipr_cmd->fast_done(ipr_cmd);
5851 }
05a6538a 5852 return rc;
5853}
5854
5855/**
5856 * ipr_isr_mhrrq - Interrupt service routine
5857 * @irq: irq number
5858 * @devp: pointer to ioa config struct
5859 *
5860 * Return value:
5861 * IRQ_NONE / IRQ_HANDLED
5862 **/
5863static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5864{
5865 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
b53d124a 5866 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5867 unsigned long hrrq_flags = 0;
05a6538a 5868 struct ipr_cmnd *ipr_cmd, *temp;
5869 irqreturn_t rc = IRQ_NONE;
5870 LIST_HEAD(doneq);
172cd6e1 5871
56d6aa33 5872 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5873
5874 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5875 if (!hrrq->allow_interrupts) {
5876 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5877 return IRQ_NONE;
5878 }
5879
89f8b33c 5880 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 5881 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5882 hrrq->toggle_bit) {
ea51190c 5883 irq_poll_sched(&hrrq->iopoll);
b53d124a 5884 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5885 return IRQ_HANDLED;
5886 }
5887 } else {
5888 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5889 hrrq->toggle_bit)
05a6538a 5890
b53d124a 5891 if (ipr_process_hrrq(hrrq, -1, &doneq))
5892 rc = IRQ_HANDLED;
5893 }
05a6538a 5894
56d6aa33 5895 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5896
5897 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5898 list_del(&ipr_cmd->queue);
5899 del_timer(&ipr_cmd->timer);
5900 ipr_cmd->fast_done(ipr_cmd);
5901 }
1da177e4
LT
5902 return rc;
5903}
5904
a32c055f
WB
5905/**
5906 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5907 * @ioa_cfg: ioa config struct
5908 * @ipr_cmd: ipr command struct
5909 *
5910 * Return value:
5911 * 0 on success / -1 on failure
5912 **/
5913static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5914 struct ipr_cmnd *ipr_cmd)
5915{
5916 int i, nseg;
5917 struct scatterlist *sg;
5918 u32 length;
5919 u32 ioadl_flags = 0;
5920 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5921 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5922 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5923
5924 length = scsi_bufflen(scsi_cmd);
5925 if (!length)
5926 return 0;
5927
5928 nseg = scsi_dma_map(scsi_cmd);
5929 if (nseg < 0) {
51f52a47 5930 if (printk_ratelimit())
d73341bf 5931 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
a32c055f
WB
5932 return -1;
5933 }
5934
5935 ipr_cmd->dma_use_sg = nseg;
5936
438b0331 5937 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5938 ioarcb->ioadl_len =
5939 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5940
a32c055f
WB
5941 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5942 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5943 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5944 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5945 ioadl_flags = IPR_IOADL_FLAGS_READ;
5946
5947 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5948 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5949 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5950 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5951 }
5952
5953 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5954 return 0;
5955}
5956
1da177e4
LT
5957/**
5958 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5959 * @ioa_cfg: ioa config struct
5960 * @ipr_cmd: ipr command struct
5961 *
5962 * Return value:
5963 * 0 on success / -1 on failure
5964 **/
5965static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5966 struct ipr_cmnd *ipr_cmd)
5967{
63015bc9
FT
5968 int i, nseg;
5969 struct scatterlist *sg;
1da177e4
LT
5970 u32 length;
5971 u32 ioadl_flags = 0;
5972 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5973 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5974 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5975
63015bc9
FT
5976 length = scsi_bufflen(scsi_cmd);
5977 if (!length)
1da177e4
LT
5978 return 0;
5979
63015bc9
FT
5980 nseg = scsi_dma_map(scsi_cmd);
5981 if (nseg < 0) {
d73341bf 5982 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
63015bc9
FT
5983 return -1;
5984 }
51b1c7e1 5985
63015bc9
FT
5986 ipr_cmd->dma_use_sg = nseg;
5987
5988 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5989 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5990 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5991 ioarcb->data_transfer_length = cpu_to_be32(length);
5992 ioarcb->ioadl_len =
63015bc9
FT
5993 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5994 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5995 ioadl_flags = IPR_IOADL_FLAGS_READ;
5996 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5997 ioarcb->read_ioadl_len =
5998 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5999 }
1da177e4 6000
a32c055f
WB
6001 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6002 ioadl = ioarcb->u.add_data.u.ioadl;
6003 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6004 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
6005 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6006 }
1da177e4 6007
63015bc9
FT
6008 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6009 ioadl[i].flags_and_data_len =
6010 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6011 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
6012 }
6013
63015bc9
FT
6014 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6015 return 0;
1da177e4
LT
6016}
6017
1da177e4 6018/**
f646f325 6019 * __ipr_erp_done - Process completion of ERP for a device
1da177e4
LT
6020 * @ipr_cmd: ipr command struct
6021 *
6022 * This function copies the sense buffer into the scsi_cmd
6023 * struct and pushes the scsi_done function.
6024 *
6025 * Return value:
6026 * nothing
6027 **/
f646f325 6028static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6029{
6030 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6031 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6032 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
6033
6034 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6035 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
6036 scmd_printk(KERN_ERR, scsi_cmd,
6037 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
6038 } else {
6039 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6040 SCSI_SENSE_BUFFERSIZE);
6041 }
6042
6043 if (res) {
ee0a90fa
BK
6044 if (!ipr_is_naca_model(res))
6045 res->needs_sync_complete = 1;
1da177e4
LT
6046 res->in_erp = 0;
6047 }
63015bc9 6048 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 6049 scsi_cmd->scsi_done(scsi_cmd);
66a0d59c
BK
6050 if (ipr_cmd->eh_comp)
6051 complete(ipr_cmd->eh_comp);
6052 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6053}
6054
f646f325
BK
6055/**
6056 * ipr_erp_done - Process completion of ERP for a device
6057 * @ipr_cmd: ipr command struct
6058 *
6059 * This function copies the sense buffer into the scsi_cmd
6060 * struct and pushes the scsi_done function.
6061 *
6062 * Return value:
6063 * nothing
6064 **/
6065static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6066{
6067 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6068 unsigned long hrrq_flags;
6069
6070 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6071 __ipr_erp_done(ipr_cmd);
6072 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
1da177e4
LT
6073}
6074
6075/**
6076 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6077 * @ipr_cmd: ipr command struct
6078 *
6079 * Return value:
6080 * none
6081 **/
6082static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6083{
51b1c7e1 6084 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 6085 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 6086 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
6087
6088 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 6089 ioarcb->data_transfer_length = 0;
1da177e4 6090 ioarcb->read_data_transfer_length = 0;
a32c055f 6091 ioarcb->ioadl_len = 0;
1da177e4 6092 ioarcb->read_ioadl_len = 0;
96d21f00
WB
6093 ioasa->hdr.ioasc = 0;
6094 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
6095
6096 if (ipr_cmd->ioa_cfg->sis64)
6097 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6098 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6099 else {
6100 ioarcb->write_ioadl_addr =
6101 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6102 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6103 }
1da177e4
LT
6104}
6105
6106/**
f646f325 6107 * __ipr_erp_request_sense - Send request sense to a device
1da177e4
LT
6108 * @ipr_cmd: ipr command struct
6109 *
6110 * This function sends a request sense to a device as a result
6111 * of a check condition.
6112 *
6113 * Return value:
6114 * nothing
6115 **/
f646f325 6116static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6117{
6118 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 6119 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
6120
6121 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
f646f325 6122 __ipr_erp_done(ipr_cmd);
1da177e4
LT
6123 return;
6124 }
6125
6126 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6127
6128 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6129 cmd_pkt->cdb[0] = REQUEST_SENSE;
6130 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6131 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6132 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6133 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6134
a32c055f
WB
6135 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6136 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6137
6138 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6139 IPR_REQUEST_SENSE_TIMEOUT * 2);
6140}
6141
f646f325
BK
6142/**
6143 * ipr_erp_request_sense - Send request sense to a device
6144 * @ipr_cmd: ipr command struct
6145 *
6146 * This function sends a request sense to a device as a result
6147 * of a check condition.
6148 *
6149 * Return value:
6150 * nothing
6151 **/
6152static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6153{
6154 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6155 unsigned long hrrq_flags;
6156
6157 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6158 __ipr_erp_request_sense(ipr_cmd);
6159 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6160}
6161
1da177e4
LT
6162/**
6163 * ipr_erp_cancel_all - Send cancel all to a device
6164 * @ipr_cmd: ipr command struct
6165 *
6166 * This function sends a cancel all to a device to clear the
6167 * queue. If we are running TCQ on the device, QERR is set to 1,
6168 * which means all outstanding ops have been dropped on the floor.
6169 * Cancel all will return them to us.
6170 *
6171 * Return value:
6172 * nothing
6173 **/
6174static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6175{
6176 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6177 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6178 struct ipr_cmd_pkt *cmd_pkt;
6179
6180 res->in_erp = 1;
6181
6182 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6183
17ea0126 6184 if (!scsi_cmd->device->simple_tags) {
f646f325 6185 __ipr_erp_request_sense(ipr_cmd);
1da177e4
LT
6186 return;
6187 }
6188
6189 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6190 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6191 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6192
6193 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6194 IPR_CANCEL_ALL_TIMEOUT);
6195}
6196
6197/**
6198 * ipr_dump_ioasa - Dump contents of IOASA
6199 * @ioa_cfg: ioa config struct
6200 * @ipr_cmd: ipr command struct
fe964d0a 6201 * @res: resource entry struct
1da177e4
LT
6202 *
6203 * This function is invoked by the interrupt handler when ops
6204 * fail. It will log the IOASA if appropriate. Only called
6205 * for GPDD ops.
6206 *
6207 * Return value:
6208 * none
6209 **/
6210static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 6211 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
6212{
6213 int i;
6214 u16 data_len;
b0692dd4 6215 u32 ioasc, fd_ioasc;
96d21f00 6216 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
6217 __be32 *ioasa_data = (__be32 *)ioasa;
6218 int error_index;
6219
96d21f00
WB
6220 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6221 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6222
6223 if (0 == ioasc)
6224 return;
6225
6226 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6227 return;
6228
b0692dd4
BK
6229 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6230 error_index = ipr_get_error(fd_ioasc);
6231 else
6232 error_index = ipr_get_error(ioasc);
1da177e4
LT
6233
6234 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6235 /* Don't log an error if the IOA already logged one */
96d21f00 6236 if (ioasa->hdr.ilid != 0)
1da177e4
LT
6237 return;
6238
cc9bd5d4
BK
6239 if (!ipr_is_gscsi(res))
6240 return;
6241
1da177e4
LT
6242 if (ipr_error_table[error_index].log_ioasa == 0)
6243 return;
6244 }
6245
fe964d0a 6246 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 6247
96d21f00
WB
6248 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6249 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6250 data_len = sizeof(struct ipr_ioasa64);
6251 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 6252 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
6253
6254 ipr_err("IOASA Dump:\n");
6255
6256 for (i = 0; i < data_len / 4; i += 4) {
6257 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6258 be32_to_cpu(ioasa_data[i]),
6259 be32_to_cpu(ioasa_data[i+1]),
6260 be32_to_cpu(ioasa_data[i+2]),
6261 be32_to_cpu(ioasa_data[i+3]));
6262 }
6263}
6264
6265/**
6266 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6267 * @ioasa: IOASA
6268 * @sense_buf: sense data buffer
6269 *
6270 * Return value:
6271 * none
6272 **/
6273static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6274{
6275 u32 failing_lba;
6276 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6277 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
6278 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6279 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
6280
6281 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6282
6283 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6284 return;
6285
6286 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6287
6288 if (ipr_is_vset_device(res) &&
6289 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6290 ioasa->u.vset.failing_lba_hi != 0) {
6291 sense_buf[0] = 0x72;
6292 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6293 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6294 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6295
6296 sense_buf[7] = 12;
6297 sense_buf[8] = 0;
6298 sense_buf[9] = 0x0A;
6299 sense_buf[10] = 0x80;
6300
6301 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6302
6303 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6304 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6305 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6306 sense_buf[15] = failing_lba & 0x000000ff;
6307
6308 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6309
6310 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6311 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6312 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6313 sense_buf[19] = failing_lba & 0x000000ff;
6314 } else {
6315 sense_buf[0] = 0x70;
6316 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6317 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6318 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6319
6320 /* Illegal request */
6321 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 6322 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
6323 sense_buf[7] = 10; /* additional length */
6324
6325 /* IOARCB was in error */
6326 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6327 sense_buf[15] = 0xC0;
6328 else /* Parameter data was invalid */
6329 sense_buf[15] = 0x80;
6330
6331 sense_buf[16] =
6332 ((IPR_FIELD_POINTER_MASK &
96d21f00 6333 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
6334 sense_buf[17] =
6335 (IPR_FIELD_POINTER_MASK &
96d21f00 6336 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
6337 } else {
6338 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6339 if (ipr_is_vset_device(res))
6340 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6341 else
6342 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6343
6344 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6345 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6346 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6347 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6348 sense_buf[6] = failing_lba & 0x000000ff;
6349 }
6350
6351 sense_buf[7] = 6; /* additional length */
6352 }
6353 }
6354}
6355
ee0a90fa
BK
6356/**
6357 * ipr_get_autosense - Copy autosense data to sense buffer
6358 * @ipr_cmd: ipr command struct
6359 *
6360 * This function copies the autosense buffer to the buffer
6361 * in the scsi_cmd, if there is autosense available.
6362 *
6363 * Return value:
6364 * 1 if autosense was available / 0 if not
6365 **/
6366static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6367{
96d21f00
WB
6368 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6369 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 6370
96d21f00 6371 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
6372 return 0;
6373
96d21f00
WB
6374 if (ipr_cmd->ioa_cfg->sis64)
6375 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6376 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6377 SCSI_SENSE_BUFFERSIZE));
6378 else
6379 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6380 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6381 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
6382 return 1;
6383}
6384
1da177e4
LT
6385/**
6386 * ipr_erp_start - Process an error response for a SCSI op
6387 * @ioa_cfg: ioa config struct
6388 * @ipr_cmd: ipr command struct
6389 *
6390 * This function determines whether or not to initiate ERP
6391 * on the affected device.
6392 *
6393 * Return value:
6394 * nothing
6395 **/
6396static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6397 struct ipr_cmnd *ipr_cmd)
6398{
6399 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6400 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6401 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 6402 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6403
6404 if (!res) {
f646f325 6405 __ipr_scsi_eh_done(ipr_cmd);
1da177e4
LT
6406 return;
6407 }
6408
8a048994 6409 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
6410 ipr_gen_sense(ipr_cmd);
6411
cc9bd5d4
BK
6412 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6413
8a048994 6414 switch (masked_ioasc) {
1da177e4 6415 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
6416 if (ipr_is_naca_model(res))
6417 scsi_cmd->result |= (DID_ABORT << 16);
6418 else
6419 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
6420 break;
6421 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 6422 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
6423 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6424 break;
6425 case IPR_IOASC_HW_SEL_TIMEOUT:
6426 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
6427 if (!ipr_is_naca_model(res))
6428 res->needs_sync_complete = 1;
1da177e4
LT
6429 break;
6430 case IPR_IOASC_SYNC_REQUIRED:
6431 if (!res->in_erp)
6432 res->needs_sync_complete = 1;
6433 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6434 break;
6435 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 6436 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
785a4704
MFO
6437 /*
6438 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6439 * so SCSI mid-layer and upper layers handle it accordingly.
6440 */
6441 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6442 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
1da177e4
LT
6443 break;
6444 case IPR_IOASC_BUS_WAS_RESET:
6445 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6446 /*
6447 * Report the bus reset and ask for a retry. The device
6448 * will give CC/UA the next command.
6449 */
6450 if (!res->resetting_device)
6451 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6452 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
6453 if (!ipr_is_naca_model(res))
6454 res->needs_sync_complete = 1;
1da177e4
LT
6455 break;
6456 case IPR_IOASC_HW_DEV_BUS_STATUS:
6457 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6458 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
6459 if (!ipr_get_autosense(ipr_cmd)) {
6460 if (!ipr_is_naca_model(res)) {
6461 ipr_erp_cancel_all(ipr_cmd);
6462 return;
6463 }
6464 }
1da177e4 6465 }
ee0a90fa
BK
6466 if (!ipr_is_naca_model(res))
6467 res->needs_sync_complete = 1;
1da177e4
LT
6468 break;
6469 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6470 break;
f8ee25d7
WX
6471 case IPR_IOASC_IR_NON_OPTIMIZED:
6472 if (res->raw_mode) {
6473 res->raw_mode = 0;
6474 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6475 } else
6476 scsi_cmd->result |= (DID_ERROR << 16);
6477 break;
1da177e4 6478 default:
5b7304fb
BK
6479 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6480 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6481 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
6482 res->needs_sync_complete = 1;
6483 break;
6484 }
6485
63015bc9 6486 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 6487 scsi_cmd->scsi_done(scsi_cmd);
66a0d59c
BK
6488 if (ipr_cmd->eh_comp)
6489 complete(ipr_cmd->eh_comp);
6490 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6491}
6492
6493/**
6494 * ipr_scsi_done - mid-layer done function
6495 * @ipr_cmd: ipr command struct
6496 *
6497 * This function is invoked by the interrupt handler for
6498 * ops generated by the SCSI mid-layer
6499 *
6500 * Return value:
6501 * none
6502 **/
6503static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6504{
6505 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6506 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 6507 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
36b8e180 6508 unsigned long lock_flags;
1da177e4 6509
96d21f00 6510 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
6511
6512 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
6513 scsi_dma_unmap(scsi_cmd);
6514
36b8e180 6515 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
1da177e4 6516 scsi_cmd->scsi_done(scsi_cmd);
66a0d59c
BK
6517 if (ipr_cmd->eh_comp)
6518 complete(ipr_cmd->eh_comp);
6519 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
36b8e180 6520 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
172cd6e1 6521 } else {
36b8e180
BK
6522 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6523 spin_lock(&ipr_cmd->hrrq->_lock);
1da177e4 6524 ipr_erp_start(ioa_cfg, ipr_cmd);
36b8e180
BK
6525 spin_unlock(&ipr_cmd->hrrq->_lock);
6526 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
172cd6e1 6527 }
1da177e4
LT
6528}
6529
1da177e4
LT
6530/**
6531 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6532 * @shost: scsi host struct
1da177e4 6533 * @scsi_cmd: scsi command struct
1da177e4
LT
6534 *
6535 * This function queues a request generated by the mid-layer.
6536 *
6537 * Return value:
6538 * 0 on success
6539 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6540 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6541 **/
00bfef2c
BK
6542static int ipr_queuecommand(struct Scsi_Host *shost,
6543 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6544{
6545 struct ipr_ioa_cfg *ioa_cfg;
6546 struct ipr_resource_entry *res;
6547 struct ipr_ioarcb *ioarcb;
6548 struct ipr_cmnd *ipr_cmd;
56d6aa33 6549 unsigned long hrrq_flags, lock_flags;
d12f1576 6550 int rc;
05a6538a 6551 struct ipr_hrr_queue *hrrq;
6552 int hrrq_id;
1da177e4 6553
00bfef2c
BK
6554 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6555
1da177e4 6556 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6557 res = scsi_cmd->device->hostdata;
56d6aa33 6558
6559 if (ipr_is_gata(res) && res->sata_port) {
6560 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6561 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6562 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6563 return rc;
6564 }
6565
05a6538a 6566 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6567 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6568
56d6aa33 6569 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6570 /*
6571 * We are currently blocking all devices due to a host reset
6572 * We have told the host to stop giving us new requests, but
6573 * ERP ops don't count. FIXME
6574 */
bfae7820 6575 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
56d6aa33 6576 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6577 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6578 }
1da177e4
LT
6579
6580 /*
6581 * FIXME - Create scsi_set_host_offline interface
6582 * and the ioa_is_dead check can be removed
6583 */
bfae7820 6584 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
56d6aa33 6585 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6586 goto err_nodev;
1da177e4
LT
6587 }
6588
05a6538a 6589 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6590 if (ipr_cmd == NULL) {
56d6aa33 6591 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6592 return SCSI_MLQUEUE_HOST_BUSY;
6593 }
56d6aa33 6594 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6595
172cd6e1 6596 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6597 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6598
6599 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6600 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6601 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4 6602
4f92d01a 6603 if (ipr_is_gscsi(res)) {
1da177e4
LT
6604 if (scsi_cmd->underflow == 0)
6605 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6606
4f92d01a 6607 if (res->reset_occurred) {
0b1f8d44 6608 res->reset_occurred = 0;
ab6c10b1 6609 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
0b1f8d44 6610 }
4f92d01a
GKB
6611 }
6612
6613 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6614 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6615
1da177e4 6616 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
50668633
CH
6617 if (scsi_cmd->flags & SCMD_TAGGED)
6618 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6619 else
6620 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
1da177e4
LT
6621 }
6622
6623 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6624 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6625 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6626 }
3cb4fc1f 6627 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
f8ee25d7 6628 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
1da177e4 6629
3cb4fc1f
GKB
6630 if (scsi_cmd->underflow == 0)
6631 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6632 }
6633
d12f1576
DC
6634 if (ioa_cfg->sis64)
6635 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6636 else
6637 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6638
56d6aa33 6639 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6640 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6641 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6642 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6643 if (!rc)
6644 scsi_dma_unmap(scsi_cmd);
a5fb407e 6645 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6646 }
6647
56d6aa33 6648 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6649 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6650 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6651 scsi_dma_unmap(scsi_cmd);
6652 goto err_nodev;
6653 }
6654
6655 ioarcb->res_handle = res->res_handle;
6656 if (res->needs_sync_complete) {
6657 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6658 res->needs_sync_complete = 0;
6659 }
05a6538a 6660 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6661 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6662 ipr_send_command(ipr_cmd);
56d6aa33 6663 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6664 return 0;
1da177e4 6665
00bfef2c 6666err_nodev:
56d6aa33 6667 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6668 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6669 scsi_cmd->result = (DID_NO_CONNECT << 16);
6670 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6671 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6672 return 0;
6673}
f281233d 6674
35a39691
BK
6675/**
6676 * ipr_ioctl - IOCTL handler
6677 * @sdev: scsi device struct
6678 * @cmd: IOCTL cmd
6679 * @arg: IOCTL arg
6680 *
6681 * Return value:
6682 * 0 on success / other on failure
6683 **/
bd705f2d 6684static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6685{
6686 struct ipr_resource_entry *res;
6687
6688 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6689 if (res && ipr_is_gata(res)) {
6690 if (cmd == HDIO_GET_IDENTITY)
6691 return -ENOTTY;
94be9a58 6692 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6693 }
35a39691
BK
6694
6695 return -EINVAL;
6696}
6697
1da177e4
LT
6698/**
6699 * ipr_info - Get information about the card/driver
6700 * @scsi_host: scsi host struct
6701 *
6702 * Return value:
6703 * pointer to buffer with description string
6704 **/
203fa3fe 6705static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6706{
6707 static char buffer[512];
6708 struct ipr_ioa_cfg *ioa_cfg;
6709 unsigned long lock_flags = 0;
6710
6711 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6712
6713 spin_lock_irqsave(host->host_lock, lock_flags);
6714 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6715 spin_unlock_irqrestore(host->host_lock, lock_flags);
6716
6717 return buffer;
6718}
6719
6720static struct scsi_host_template driver_template = {
6721 .module = THIS_MODULE,
6722 .name = "IPR",
6723 .info = ipr_ioa_info,
35a39691 6724 .ioctl = ipr_ioctl,
1da177e4
LT
6725 .queuecommand = ipr_queuecommand,
6726 .eh_abort_handler = ipr_eh_abort,
6727 .eh_device_reset_handler = ipr_eh_dev_reset,
6728 .eh_host_reset_handler = ipr_eh_host_reset,
6729 .slave_alloc = ipr_slave_alloc,
6730 .slave_configure = ipr_slave_configure,
6731 .slave_destroy = ipr_slave_destroy,
f688f96d 6732 .scan_finished = ipr_scan_finished,
35a39691
BK
6733 .target_alloc = ipr_target_alloc,
6734 .target_destroy = ipr_target_destroy,
1da177e4 6735 .change_queue_depth = ipr_change_queue_depth,
1da177e4
LT
6736 .bios_param = ipr_biosparam,
6737 .can_queue = IPR_MAX_COMMANDS,
6738 .this_id = -1,
6739 .sg_tablesize = IPR_MAX_SGLIST,
6740 .max_sectors = IPR_IOA_MAX_SECTORS,
6741 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6742 .use_clustering = ENABLE_CLUSTERING,
6743 .shost_attrs = ipr_ioa_attrs,
6744 .sdev_attrs = ipr_dev_attrs,
54b2b50c 6745 .proc_name = IPR_NAME,
1da177e4
LT
6746};
6747
35a39691
BK
6748/**
6749 * ipr_ata_phy_reset - libata phy_reset handler
6750 * @ap: ata port to reset
6751 *
6752 **/
6753static void ipr_ata_phy_reset(struct ata_port *ap)
6754{
6755 unsigned long flags;
6756 struct ipr_sata_port *sata_port = ap->private_data;
6757 struct ipr_resource_entry *res = sata_port->res;
6758 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6759 int rc;
6760
6761 ENTER;
6762 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6763 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6764 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6765 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6766 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6767 }
6768
56d6aa33 6769 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6770 goto out_unlock;
6771
6772 rc = ipr_device_reset(ioa_cfg, res);
6773
6774 if (rc) {
3e4ec344 6775 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6776 goto out_unlock;
6777 }
6778
3e7ebdfa
WB
6779 ap->link.device[0].class = res->ata_class;
6780 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6781 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6782
6783out_unlock:
6784 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6785 LEAVE;
6786}
6787
6788/**
6789 * ipr_ata_post_internal - Cleanup after an internal command
6790 * @qc: ATA queued command
6791 *
6792 * Return value:
6793 * none
6794 **/
6795static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6796{
6797 struct ipr_sata_port *sata_port = qc->ap->private_data;
6798 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6799 struct ipr_cmnd *ipr_cmd;
05a6538a 6800 struct ipr_hrr_queue *hrrq;
35a39691
BK
6801 unsigned long flags;
6802
6803 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6804 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6805 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6806 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6807 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6808 }
6809
05a6538a 6810 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6811 spin_lock(&hrrq->_lock);
05a6538a 6812 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6813 if (ipr_cmd->qc == qc) {
6814 ipr_device_reset(ioa_cfg, sata_port->res);
6815 break;
6816 }
35a39691 6817 }
56d6aa33 6818 spin_unlock(&hrrq->_lock);
35a39691
BK
6819 }
6820 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6821}
6822
35a39691
BK
6823/**
6824 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6825 * @regs: destination
6826 * @tf: source ATA taskfile
6827 *
6828 * Return value:
6829 * none
6830 **/
6831static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6832 struct ata_taskfile *tf)
6833{
6834 regs->feature = tf->feature;
6835 regs->nsect = tf->nsect;
6836 regs->lbal = tf->lbal;
6837 regs->lbam = tf->lbam;
6838 regs->lbah = tf->lbah;
6839 regs->device = tf->device;
6840 regs->command = tf->command;
6841 regs->hob_feature = tf->hob_feature;
6842 regs->hob_nsect = tf->hob_nsect;
6843 regs->hob_lbal = tf->hob_lbal;
6844 regs->hob_lbam = tf->hob_lbam;
6845 regs->hob_lbah = tf->hob_lbah;
6846 regs->ctl = tf->ctl;
6847}
6848
6849/**
6850 * ipr_sata_done - done function for SATA commands
6851 * @ipr_cmd: ipr command struct
6852 *
6853 * This function is invoked by the interrupt handler for
6854 * ops generated by the SCSI mid-layer to SATA devices
6855 *
6856 * Return value:
6857 * none
6858 **/
6859static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6860{
6861 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6862 struct ata_queued_cmd *qc = ipr_cmd->qc;
6863 struct ipr_sata_port *sata_port = qc->ap->private_data;
6864 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6865 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6866
56d6aa33 6867 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6868 if (ipr_cmd->ioa_cfg->sis64)
6869 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6870 sizeof(struct ipr_ioasa_gata));
6871 else
6872 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6873 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6874 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6875
96d21f00 6876 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6877 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6878
6879 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6880 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6881 else
96d21f00 6882 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6883 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6884 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6885 ata_qc_complete(qc);
6886}
6887
a32c055f
WB
6888/**
6889 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6890 * @ipr_cmd: ipr command struct
6891 * @qc: ATA queued command
6892 *
6893 **/
6894static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6895 struct ata_queued_cmd *qc)
6896{
6897 u32 ioadl_flags = 0;
6898 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1ac7c26d 6899 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
a32c055f
WB
6900 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6901 int len = qc->nbytes;
6902 struct scatterlist *sg;
6903 unsigned int si;
6904 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6905
6906 if (len == 0)
6907 return;
6908
6909 if (qc->dma_dir == DMA_TO_DEVICE) {
6910 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6911 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6912 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6913 ioadl_flags = IPR_IOADL_FLAGS_READ;
6914
6915 ioarcb->data_transfer_length = cpu_to_be32(len);
6916 ioarcb->ioadl_len =
6917 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6918 ioarcb->u.sis64_addr_data.data_ioadl_addr =
1ac7c26d 6919 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
a32c055f
WB
6920
6921 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6922 ioadl64->flags = cpu_to_be32(ioadl_flags);
6923 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6924 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6925
6926 last_ioadl64 = ioadl64;
6927 ioadl64++;
6928 }
6929
6930 if (likely(last_ioadl64))
6931 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6932}
6933
35a39691
BK
6934/**
6935 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6936 * @ipr_cmd: ipr command struct
6937 * @qc: ATA queued command
6938 *
6939 **/
6940static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6941 struct ata_queued_cmd *qc)
6942{
6943 u32 ioadl_flags = 0;
6944 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6945 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6946 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6947 int len = qc->nbytes;
35a39691 6948 struct scatterlist *sg;
ff2aeb1e 6949 unsigned int si;
35a39691
BK
6950
6951 if (len == 0)
6952 return;
6953
6954 if (qc->dma_dir == DMA_TO_DEVICE) {
6955 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6956 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6957 ioarcb->data_transfer_length = cpu_to_be32(len);
6958 ioarcb->ioadl_len =
35a39691
BK
6959 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6960 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6961 ioadl_flags = IPR_IOADL_FLAGS_READ;
6962 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6963 ioarcb->read_ioadl_len =
6964 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6965 }
6966
ff2aeb1e 6967 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6968 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6969 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6970
6971 last_ioadl = ioadl;
6972 ioadl++;
35a39691 6973 }
3be6cbd7
JG
6974
6975 if (likely(last_ioadl))
6976 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6977}
6978
56d6aa33 6979/**
6980 * ipr_qc_defer - Get a free ipr_cmd
6981 * @qc: queued command
6982 *
6983 * Return value:
6984 * 0 if success
6985 **/
6986static int ipr_qc_defer(struct ata_queued_cmd *qc)
6987{
6988 struct ata_port *ap = qc->ap;
6989 struct ipr_sata_port *sata_port = ap->private_data;
6990 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6991 struct ipr_cmnd *ipr_cmd;
6992 struct ipr_hrr_queue *hrrq;
6993 int hrrq_id;
6994
6995 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6996 hrrq = &ioa_cfg->hrrq[hrrq_id];
6997
6998 qc->lldd_task = NULL;
6999 spin_lock(&hrrq->_lock);
7000 if (unlikely(hrrq->ioa_is_dead)) {
7001 spin_unlock(&hrrq->_lock);
7002 return 0;
7003 }
7004
7005 if (unlikely(!hrrq->allow_cmds)) {
7006 spin_unlock(&hrrq->_lock);
7007 return ATA_DEFER_LINK;
7008 }
7009
7010 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7011 if (ipr_cmd == NULL) {
7012 spin_unlock(&hrrq->_lock);
7013 return ATA_DEFER_LINK;
7014 }
7015
7016 qc->lldd_task = ipr_cmd;
7017 spin_unlock(&hrrq->_lock);
7018 return 0;
7019}
7020
35a39691
BK
7021/**
7022 * ipr_qc_issue - Issue a SATA qc to a device
7023 * @qc: queued command
7024 *
7025 * Return value:
7026 * 0 if success
7027 **/
7028static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7029{
7030 struct ata_port *ap = qc->ap;
7031 struct ipr_sata_port *sata_port = ap->private_data;
7032 struct ipr_resource_entry *res = sata_port->res;
7033 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7034 struct ipr_cmnd *ipr_cmd;
7035 struct ipr_ioarcb *ioarcb;
7036 struct ipr_ioarcb_ata_regs *regs;
7037
56d6aa33 7038 if (qc->lldd_task == NULL)
7039 ipr_qc_defer(qc);
7040
7041 ipr_cmd = qc->lldd_task;
7042 if (ipr_cmd == NULL)
0feeed82 7043 return AC_ERR_SYSTEM;
35a39691 7044
56d6aa33 7045 qc->lldd_task = NULL;
7046 spin_lock(&ipr_cmd->hrrq->_lock);
7047 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7048 ipr_cmd->hrrq->ioa_is_dead)) {
7049 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7050 spin_unlock(&ipr_cmd->hrrq->_lock);
7051 return AC_ERR_SYSTEM;
7052 }
7053
05a6538a 7054 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 7055 ioarcb = &ipr_cmd->ioarcb;
35a39691 7056
a32c055f
WB
7057 if (ioa_cfg->sis64) {
7058 regs = &ipr_cmd->i.ata_ioadl.regs;
7059 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7060 } else
7061 regs = &ioarcb->u.add_data.u.regs;
7062
7063 memset(regs, 0, sizeof(*regs));
7064 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 7065
56d6aa33 7066 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
7067 ipr_cmd->qc = qc;
7068 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 7069 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
7070 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7071 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7072 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 7073 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 7074
a32c055f
WB
7075 if (ioa_cfg->sis64)
7076 ipr_build_ata_ioadl64(ipr_cmd, qc);
7077 else
7078 ipr_build_ata_ioadl(ipr_cmd, qc);
7079
35a39691
BK
7080 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7081 ipr_copy_sata_tf(regs, &qc->tf);
7082 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 7083 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
7084
7085 switch (qc->tf.protocol) {
7086 case ATA_PROT_NODATA:
7087 case ATA_PROT_PIO:
7088 break;
7089
7090 case ATA_PROT_DMA:
7091 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7092 break;
7093
0dc36888
TH
7094 case ATAPI_PROT_PIO:
7095 case ATAPI_PROT_NODATA:
35a39691
BK
7096 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7097 break;
7098
0dc36888 7099 case ATAPI_PROT_DMA:
35a39691
BK
7100 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7101 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7102 break;
7103
7104 default:
7105 WARN_ON(1);
56d6aa33 7106 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 7107 return AC_ERR_INVALID;
35a39691
BK
7108 }
7109
a32c055f 7110 ipr_send_command(ipr_cmd);
56d6aa33 7111 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 7112
35a39691
BK
7113 return 0;
7114}
7115
4c9bf4e7
TH
7116/**
7117 * ipr_qc_fill_rtf - Read result TF
7118 * @qc: ATA queued command
7119 *
7120 * Return value:
7121 * true
7122 **/
7123static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7124{
7125 struct ipr_sata_port *sata_port = qc->ap->private_data;
7126 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7127 struct ata_taskfile *tf = &qc->result_tf;
7128
7129 tf->feature = g->error;
7130 tf->nsect = g->nsect;
7131 tf->lbal = g->lbal;
7132 tf->lbam = g->lbam;
7133 tf->lbah = g->lbah;
7134 tf->device = g->device;
7135 tf->command = g->status;
7136 tf->hob_nsect = g->hob_nsect;
7137 tf->hob_lbal = g->hob_lbal;
7138 tf->hob_lbam = g->hob_lbam;
7139 tf->hob_lbah = g->hob_lbah;
4c9bf4e7
TH
7140
7141 return true;
7142}
7143
35a39691 7144static struct ata_port_operations ipr_sata_ops = {
35a39691 7145 .phy_reset = ipr_ata_phy_reset,
a1efdaba 7146 .hardreset = ipr_sata_reset,
35a39691 7147 .post_internal_cmd = ipr_ata_post_internal,
35a39691 7148 .qc_prep = ata_noop_qc_prep,
56d6aa33 7149 .qc_defer = ipr_qc_defer,
35a39691 7150 .qc_issue = ipr_qc_issue,
4c9bf4e7 7151 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
7152 .port_start = ata_sas_port_start,
7153 .port_stop = ata_sas_port_stop
7154};
7155
7156static struct ata_port_info sata_port_info = {
5067c046
SL
7157 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7158 ATA_FLAG_SAS_HOST,
0f2e0330
SS
7159 .pio_mask = ATA_PIO4_ONLY,
7160 .mwdma_mask = ATA_MWDMA2,
7161 .udma_mask = ATA_UDMA6,
35a39691
BK
7162 .port_ops = &ipr_sata_ops
7163};
7164
1da177e4
LT
7165#ifdef CONFIG_PPC_PSERIES
7166static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
7167 PVR_NORTHSTAR,
7168 PVR_PULSAR,
7169 PVR_POWER4,
7170 PVR_ICESTAR,
7171 PVR_SSTAR,
7172 PVR_POWER4p,
7173 PVR_630,
7174 PVR_630p
1da177e4
LT
7175};
7176
7177/**
7178 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7179 * @ioa_cfg: ioa cfg struct
7180 *
7181 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7182 * certain pSeries hardware. This function determines if the given
7183 * adapter is in one of these confgurations or not.
7184 *
7185 * Return value:
7186 * 1 if adapter is not supported / 0 if adapter is supported
7187 **/
7188static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7189{
1da177e4
LT
7190 int i;
7191
44c10138 7192 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 7193 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 7194 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 7195 return 1;
1da177e4
LT
7196 }
7197 }
7198 return 0;
7199}
7200#else
7201#define ipr_invalid_adapter(ioa_cfg) 0
7202#endif
7203
7204/**
7205 * ipr_ioa_bringdown_done - IOA bring down completion.
7206 * @ipr_cmd: ipr command struct
7207 *
7208 * This function processes the completion of an adapter bring down.
7209 * It wakes any reset sleepers.
7210 *
7211 * Return value:
7212 * IPR_RC_JOB_RETURN
7213 **/
7214static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7215{
7216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96b04db9 7217 int i;
1da177e4
LT
7218
7219 ENTER;
bfae7820
BK
7220 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7221 ipr_trace;
b0e17a9b
BK
7222 ioa_cfg->scsi_unblock = 1;
7223 schedule_work(&ioa_cfg->work_q);
bfae7820
BK
7224 }
7225
1da177e4
LT
7226 ioa_cfg->in_reset_reload = 0;
7227 ioa_cfg->reset_retries = 0;
96b04db9 7228 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7229 spin_lock(&ioa_cfg->hrrq[i]._lock);
7230 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7231 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7232 }
7233 wmb();
7234
05a6538a 7235 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 7236 wake_up_all(&ioa_cfg->reset_wait_q);
1da177e4
LT
7237 LEAVE;
7238
7239 return IPR_RC_JOB_RETURN;
7240}
7241
7242/**
7243 * ipr_ioa_reset_done - IOA reset completion.
7244 * @ipr_cmd: ipr command struct
7245 *
7246 * This function processes the completion of an adapter reset.
7247 * It schedules any necessary mid-layer add/removes and
7248 * wakes any reset sleepers.
7249 *
7250 * Return value:
7251 * IPR_RC_JOB_RETURN
7252 **/
7253static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7254{
7255 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7256 struct ipr_resource_entry *res;
afc3f83c 7257 int j;
1da177e4
LT
7258
7259 ENTER;
7260 ioa_cfg->in_reset_reload = 0;
56d6aa33 7261 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7262 spin_lock(&ioa_cfg->hrrq[j]._lock);
7263 ioa_cfg->hrrq[j].allow_cmds = 1;
7264 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7265 }
7266 wmb();
1da177e4 7267 ioa_cfg->reset_cmd = NULL;
3d1d0da6 7268 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
7269
7270 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
f688f96d 7271 if (res->add_to_ml || res->del_from_ml) {
1da177e4
LT
7272 ipr_trace;
7273 break;
7274 }
7275 }
7276 schedule_work(&ioa_cfg->work_q);
7277
afc3f83c
BK
7278 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7279 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7280 if (j < IPR_NUM_LOG_HCAMS)
7281 ipr_send_hcam(ioa_cfg,
7282 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7283 ioa_cfg->hostrcb[j]);
1da177e4 7284 else
afc3f83c
BK
7285 ipr_send_hcam(ioa_cfg,
7286 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7287 ioa_cfg->hostrcb[j]);
1da177e4
LT
7288 }
7289
6bb04170 7290 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
7291 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7292
7293 ioa_cfg->reset_retries = 0;
05a6538a 7294 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
7295 wake_up_all(&ioa_cfg->reset_wait_q);
7296
b0e17a9b 7297 ioa_cfg->scsi_unblock = 1;
f688f96d 7298 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
7299 LEAVE;
7300 return IPR_RC_JOB_RETURN;
7301}
7302
7303/**
7304 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7305 * @supported_dev: supported device struct
7306 * @vpids: vendor product id struct
7307 *
7308 * Return value:
7309 * none
7310 **/
7311static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7312 struct ipr_std_inq_vpids *vpids)
7313{
7314 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7315 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7316 supported_dev->num_records = 1;
7317 supported_dev->data_length =
7318 cpu_to_be16(sizeof(struct ipr_supported_device));
7319 supported_dev->reserved = 0;
7320}
7321
7322/**
7323 * ipr_set_supported_devs - Send Set Supported Devices for a device
7324 * @ipr_cmd: ipr command struct
7325 *
a32c055f 7326 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
7327 *
7328 * Return value:
7329 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7330 **/
7331static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7332{
7333 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7334 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
7335 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7336 struct ipr_resource_entry *res = ipr_cmd->u.res;
7337
7338 ipr_cmd->job_step = ipr_ioa_reset_done;
7339
7340 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 7341 if (!ipr_is_scsi_disk(res))
1da177e4
LT
7342 continue;
7343
7344 ipr_cmd->u.res = res;
3e7ebdfa 7345 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
7346
7347 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7348 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7349 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7350
7351 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 7352 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
7353 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7354 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7355
a32c055f
WB
7356 ipr_init_ioadl(ipr_cmd,
7357 ioa_cfg->vpd_cbs_dma +
7358 offsetof(struct ipr_misc_cbs, supp_dev),
7359 sizeof(struct ipr_supported_device),
7360 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7361
7362 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7363 IPR_SET_SUP_DEVICE_TIMEOUT);
7364
3e7ebdfa
WB
7365 if (!ioa_cfg->sis64)
7366 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 7367 LEAVE;
1da177e4
LT
7368 return IPR_RC_JOB_RETURN;
7369 }
7370
05a6538a 7371 LEAVE;
1da177e4
LT
7372 return IPR_RC_JOB_CONTINUE;
7373}
7374
7375/**
7376 * ipr_get_mode_page - Locate specified mode page
7377 * @mode_pages: mode page buffer
7378 * @page_code: page code to find
7379 * @len: minimum required length for mode page
7380 *
7381 * Return value:
7382 * pointer to mode page / NULL on failure
7383 **/
7384static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7385 u32 page_code, u32 len)
7386{
7387 struct ipr_mode_page_hdr *mode_hdr;
7388 u32 page_length;
7389 u32 length;
7390
7391 if (!mode_pages || (mode_pages->hdr.length == 0))
7392 return NULL;
7393
7394 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7395 mode_hdr = (struct ipr_mode_page_hdr *)
7396 (mode_pages->data + mode_pages->hdr.block_desc_len);
7397
7398 while (length) {
7399 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7400 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7401 return mode_hdr;
7402 break;
7403 } else {
7404 page_length = (sizeof(struct ipr_mode_page_hdr) +
7405 mode_hdr->page_length);
7406 length -= page_length;
7407 mode_hdr = (struct ipr_mode_page_hdr *)
7408 ((unsigned long)mode_hdr + page_length);
7409 }
7410 }
7411 return NULL;
7412}
7413
7414/**
7415 * ipr_check_term_power - Check for term power errors
7416 * @ioa_cfg: ioa config struct
7417 * @mode_pages: IOAFP mode pages buffer
7418 *
7419 * Check the IOAFP's mode page 28 for term power errors
7420 *
7421 * Return value:
7422 * nothing
7423 **/
7424static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7425 struct ipr_mode_pages *mode_pages)
7426{
7427 int i;
7428 int entry_length;
7429 struct ipr_dev_bus_entry *bus;
7430 struct ipr_mode_page28 *mode_page;
7431
7432 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7433 sizeof(struct ipr_mode_page28));
7434
7435 entry_length = mode_page->entry_length;
7436
7437 bus = mode_page->bus;
7438
7439 for (i = 0; i < mode_page->num_entries; i++) {
7440 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7441 dev_err(&ioa_cfg->pdev->dev,
7442 "Term power is absent on scsi bus %d\n",
7443 bus->res_addr.bus);
7444 }
7445
7446 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7447 }
7448}
7449
7450/**
7451 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7452 * @ioa_cfg: ioa config struct
7453 *
7454 * Looks through the config table checking for SES devices. If
7455 * the SES device is in the SES table indicating a maximum SCSI
7456 * bus speed, the speed is limited for the bus.
7457 *
7458 * Return value:
7459 * none
7460 **/
7461static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7462{
7463 u32 max_xfer_rate;
7464 int i;
7465
7466 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7467 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7468 ioa_cfg->bus_attr[i].bus_width);
7469
7470 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7471 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7472 }
7473}
7474
7475/**
7476 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7477 * @ioa_cfg: ioa config struct
7478 * @mode_pages: mode page 28 buffer
7479 *
7480 * Updates mode page 28 based on driver configuration
7481 *
7482 * Return value:
7483 * none
7484 **/
7485static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 7486 struct ipr_mode_pages *mode_pages)
1da177e4
LT
7487{
7488 int i, entry_length;
7489 struct ipr_dev_bus_entry *bus;
7490 struct ipr_bus_attributes *bus_attr;
7491 struct ipr_mode_page28 *mode_page;
7492
7493 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7494 sizeof(struct ipr_mode_page28));
7495
7496 entry_length = mode_page->entry_length;
7497
7498 /* Loop for each device bus entry */
7499 for (i = 0, bus = mode_page->bus;
7500 i < mode_page->num_entries;
7501 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7502 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7503 dev_err(&ioa_cfg->pdev->dev,
7504 "Invalid resource address reported: 0x%08X\n",
7505 IPR_GET_PHYS_LOC(bus->res_addr));
7506 continue;
7507 }
7508
7509 bus_attr = &ioa_cfg->bus_attr[i];
7510 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7511 bus->bus_width = bus_attr->bus_width;
7512 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7513 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7514 if (bus_attr->qas_enabled)
7515 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7516 else
7517 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7518 }
7519}
7520
7521/**
7522 * ipr_build_mode_select - Build a mode select command
7523 * @ipr_cmd: ipr command struct
7524 * @res_handle: resource handle to send command to
7525 * @parm: Byte 2 of Mode Sense command
7526 * @dma_addr: DMA buffer address
7527 * @xfer_len: data transfer length
7528 *
7529 * Return value:
7530 * none
7531 **/
7532static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
7533 __be32 res_handle, u8 parm,
7534 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7535{
1da177e4
LT
7536 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7537
7538 ioarcb->res_handle = res_handle;
7539 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7540 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7541 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7542 ioarcb->cmd_pkt.cdb[1] = parm;
7543 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7544
a32c055f 7545 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7546}
7547
7548/**
7549 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7550 * @ipr_cmd: ipr command struct
7551 *
7552 * This function sets up the SCSI bus attributes and sends
7553 * a Mode Select for Page 28 to activate them.
7554 *
7555 * Return value:
7556 * IPR_RC_JOB_RETURN
7557 **/
7558static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7559{
7560 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7561 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7562 int length;
7563
7564 ENTER;
4733804c
BK
7565 ipr_scsi_bus_speed_limit(ioa_cfg);
7566 ipr_check_term_power(ioa_cfg, mode_pages);
7567 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7568 length = mode_pages->hdr.length + 1;
7569 mode_pages->hdr.length = 0;
1da177e4
LT
7570
7571 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7572 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7573 length);
7574
f72919ec
WB
7575 ipr_cmd->job_step = ipr_set_supported_devs;
7576 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7577 struct ipr_resource_entry, queue);
1da177e4
LT
7578 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7579
7580 LEAVE;
7581 return IPR_RC_JOB_RETURN;
7582}
7583
7584/**
7585 * ipr_build_mode_sense - Builds a mode sense command
7586 * @ipr_cmd: ipr command struct
7587 * @res: resource entry struct
7588 * @parm: Byte 2 of mode sense command
7589 * @dma_addr: DMA address of mode sense buffer
7590 * @xfer_len: Size of DMA buffer
7591 *
7592 * Return value:
7593 * none
7594 **/
7595static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7596 __be32 res_handle,
a32c055f 7597 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7598{
1da177e4
LT
7599 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7600
7601 ioarcb->res_handle = res_handle;
7602 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7603 ioarcb->cmd_pkt.cdb[2] = parm;
7604 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7605 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7606
a32c055f 7607 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7608}
7609
dfed823e
BK
7610/**
7611 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7612 * @ipr_cmd: ipr command struct
7613 *
7614 * This function handles the failure of an IOA bringup command.
7615 *
7616 * Return value:
7617 * IPR_RC_JOB_RETURN
7618 **/
7619static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7620{
7621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7622 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7623
7624 dev_err(&ioa_cfg->pdev->dev,
7625 "0x%02X failed with IOASC: 0x%08X\n",
7626 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7627
7628 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7629 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e
BK
7630 return IPR_RC_JOB_RETURN;
7631}
7632
7633/**
7634 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7635 * @ipr_cmd: ipr command struct
7636 *
7637 * This function handles the failure of a Mode Sense to the IOAFP.
7638 * Some adapters do not handle all mode pages.
7639 *
7640 * Return value:
7641 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7642 **/
7643static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7644{
f72919ec 7645 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7646 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7647
7648 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7649 ipr_cmd->job_step = ipr_set_supported_devs;
7650 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7651 struct ipr_resource_entry, queue);
dfed823e
BK
7652 return IPR_RC_JOB_CONTINUE;
7653 }
7654
7655 return ipr_reset_cmd_failed(ipr_cmd);
7656}
7657
1da177e4
LT
7658/**
7659 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7660 * @ipr_cmd: ipr command struct
7661 *
7662 * This function send a Page 28 mode sense to the IOA to
7663 * retrieve SCSI bus attributes.
7664 *
7665 * Return value:
7666 * IPR_RC_JOB_RETURN
7667 **/
7668static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7669{
7670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7671
7672 ENTER;
7673 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7674 0x28, ioa_cfg->vpd_cbs_dma +
7675 offsetof(struct ipr_misc_cbs, mode_pages),
7676 sizeof(struct ipr_mode_pages));
7677
7678 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7679 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7680
7681 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7682
7683 LEAVE;
7684 return IPR_RC_JOB_RETURN;
7685}
7686
ac09c349
BK
7687/**
7688 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7689 * @ipr_cmd: ipr command struct
7690 *
7691 * This function enables dual IOA RAID support if possible.
7692 *
7693 * Return value:
7694 * IPR_RC_JOB_RETURN
7695 **/
7696static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7697{
7698 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7699 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7700 struct ipr_mode_page24 *mode_page;
7701 int length;
7702
7703 ENTER;
7704 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7705 sizeof(struct ipr_mode_page24));
7706
7707 if (mode_page)
7708 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7709
7710 length = mode_pages->hdr.length + 1;
7711 mode_pages->hdr.length = 0;
7712
7713 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7714 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7715 length);
7716
7717 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7718 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7719
7720 LEAVE;
7721 return IPR_RC_JOB_RETURN;
7722}
7723
7724/**
7725 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7726 * @ipr_cmd: ipr command struct
7727 *
7728 * This function handles the failure of a Mode Sense to the IOAFP.
7729 * Some adapters do not handle all mode pages.
7730 *
7731 * Return value:
7732 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7733 **/
7734static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7735{
96d21f00 7736 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7737
7738 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7739 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7740 return IPR_RC_JOB_CONTINUE;
7741 }
7742
7743 return ipr_reset_cmd_failed(ipr_cmd);
7744}
7745
7746/**
7747 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7748 * @ipr_cmd: ipr command struct
7749 *
7750 * This function send a mode sense to the IOA to retrieve
7751 * the IOA Advanced Function Control mode page.
7752 *
7753 * Return value:
7754 * IPR_RC_JOB_RETURN
7755 **/
7756static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7757{
7758 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7759
7760 ENTER;
7761 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7762 0x24, ioa_cfg->vpd_cbs_dma +
7763 offsetof(struct ipr_misc_cbs, mode_pages),
7764 sizeof(struct ipr_mode_pages));
7765
7766 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7767 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7768
7769 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7770
7771 LEAVE;
7772 return IPR_RC_JOB_RETURN;
7773}
7774
1da177e4
LT
7775/**
7776 * ipr_init_res_table - Initialize the resource table
7777 * @ipr_cmd: ipr command struct
7778 *
7779 * This function looks through the existing resource table, comparing
7780 * it with the config table. This function will take care of old/new
7781 * devices and schedule adding/removing them from the mid-layer
7782 * as appropriate.
7783 *
7784 * Return value:
7785 * IPR_RC_JOB_CONTINUE
7786 **/
7787static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7788{
7789 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7790 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7791 struct ipr_config_table_entry_wrapper cfgtew;
7792 int entries, found, flag, i;
1da177e4
LT
7793 LIST_HEAD(old_res);
7794
7795 ENTER;
3e7ebdfa
WB
7796 if (ioa_cfg->sis64)
7797 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7798 else
7799 flag = ioa_cfg->u.cfg_table->hdr.flags;
7800
7801 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7802 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7803
7804 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7805 list_move_tail(&res->queue, &old_res);
7806
3e7ebdfa 7807 if (ioa_cfg->sis64)
438b0331 7808 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7809 else
7810 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7811
7812 for (i = 0; i < entries; i++) {
7813 if (ioa_cfg->sis64)
7814 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7815 else
7816 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7817 found = 0;
7818
7819 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7820 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7821 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7822 found = 1;
7823 break;
7824 }
7825 }
7826
7827 if (!found) {
7828 if (list_empty(&ioa_cfg->free_res_q)) {
7829 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7830 break;
7831 }
7832
7833 found = 1;
7834 res = list_entry(ioa_cfg->free_res_q.next,
7835 struct ipr_resource_entry, queue);
7836 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7837 ipr_init_res_entry(res, &cfgtew);
1da177e4 7838 res->add_to_ml = 1;
56115598
WB
7839 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7840 res->sdev->allow_restart = 1;
1da177e4
LT
7841
7842 if (found)
3e7ebdfa 7843 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7844 }
7845
7846 list_for_each_entry_safe(res, temp, &old_res, queue) {
7847 if (res->sdev) {
7848 res->del_from_ml = 1;
3e7ebdfa 7849 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7850 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7851 }
7852 }
7853
3e7ebdfa
WB
7854 list_for_each_entry_safe(res, temp, &old_res, queue) {
7855 ipr_clear_res_target(res);
7856 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7857 }
7858
ac09c349
BK
7859 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7860 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7861 else
7862 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7863
7864 LEAVE;
7865 return IPR_RC_JOB_CONTINUE;
7866}
7867
7868/**
7869 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7870 * @ipr_cmd: ipr command struct
7871 *
7872 * This function sends a Query IOA Configuration command
7873 * to the adapter to retrieve the IOA configuration table.
7874 *
7875 * Return value:
7876 * IPR_RC_JOB_RETURN
7877 **/
7878static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7879{
7880 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7881 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7882 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7883 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7884
7885 ENTER;
ac09c349
BK
7886 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7887 ioa_cfg->dual_raid = 1;
1da177e4
LT
7888 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7889 ucode_vpd->major_release, ucode_vpd->card_type,
7890 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7891 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7892 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7893
7894 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7895 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7896 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7897 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7898
3e7ebdfa 7899 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7900 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7901
7902 ipr_cmd->job_step = ipr_init_res_table;
7903
7904 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7905
7906 LEAVE;
7907 return IPR_RC_JOB_RETURN;
7908}
7909
1a47af26
GKB
7910static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7911{
7912 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7913
7914 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7915 return IPR_RC_JOB_CONTINUE;
7916
7917 return ipr_reset_cmd_failed(ipr_cmd);
7918}
7919
7920static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7921 __be32 res_handle, u8 sa_code)
7922{
7923 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7924
7925 ioarcb->res_handle = res_handle;
7926 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7927 ioarcb->cmd_pkt.cdb[1] = sa_code;
7928 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7929}
7930
7931/**
7932 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7933 * action
7934 *
7935 * Return value:
7936 * none
7937 **/
7938static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7939{
7940 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7941 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7942 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7943
7944 ENTER;
7945
7946 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7947
7948 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7949 ipr_build_ioa_service_action(ipr_cmd,
7950 cpu_to_be32(IPR_IOA_RES_HANDLE),
7951 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7952
7953 ioarcb->cmd_pkt.cdb[2] = 0x40;
7954
7955 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7956 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7957 IPR_SET_SUP_DEVICE_TIMEOUT);
7958
7959 LEAVE;
7960 return IPR_RC_JOB_RETURN;
7961 }
7962
7963 LEAVE;
7964 return IPR_RC_JOB_CONTINUE;
7965}
7966
1da177e4
LT
7967/**
7968 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7969 * @ipr_cmd: ipr command struct
7970 *
7971 * This utility function sends an inquiry to the adapter.
7972 *
7973 * Return value:
7974 * none
7975 **/
7976static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7977 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7978{
7979 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7980
7981 ENTER;
7982 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7983 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7984
7985 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7986 ioarcb->cmd_pkt.cdb[1] = flags;
7987 ioarcb->cmd_pkt.cdb[2] = page;
7988 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7989
a32c055f 7990 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7991
7992 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7993 LEAVE;
7994}
7995
62275040
BK
7996/**
7997 * ipr_inquiry_page_supported - Is the given inquiry page supported
7998 * @page0: inquiry page 0 buffer
7999 * @page: page code.
8000 *
8001 * This function determines if the specified inquiry page is supported.
8002 *
8003 * Return value:
8004 * 1 if page is supported / 0 if not
8005 **/
8006static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8007{
8008 int i;
8009
8010 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8011 if (page0->page[i] == page)
8012 return 1;
8013
8014 return 0;
8015}
8016
1021b3ff
GKB
8017/**
8018 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8019 * @ipr_cmd: ipr command struct
8020 *
8021 * This function sends a Page 0xC4 inquiry to the adapter
8022 * to retrieve software VPD information.
8023 *
8024 * Return value:
8025 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8026 **/
8027static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8028{
8029 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8030 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8031 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8032
8033 ENTER;
1a47af26 8034 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
1021b3ff
GKB
8035 memset(pageC4, 0, sizeof(*pageC4));
8036
8037 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8038 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8039 (ioa_cfg->vpd_cbs_dma
8040 + offsetof(struct ipr_misc_cbs,
8041 pageC4_data)),
8042 sizeof(struct ipr_inquiry_pageC4));
8043 return IPR_RC_JOB_RETURN;
8044 }
8045
8046 LEAVE;
8047 return IPR_RC_JOB_CONTINUE;
8048}
8049
ac09c349
BK
8050/**
8051 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8052 * @ipr_cmd: ipr command struct
8053 *
8054 * This function sends a Page 0xD0 inquiry to the adapter
8055 * to retrieve adapter capabilities.
8056 *
8057 * Return value:
8058 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8059 **/
8060static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8061{
8062 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8063 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8064 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8065
8066 ENTER;
1021b3ff 8067 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
ac09c349
BK
8068 memset(cap, 0, sizeof(*cap));
8069
8070 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8071 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8072 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8073 sizeof(struct ipr_inquiry_cap));
8074 return IPR_RC_JOB_RETURN;
8075 }
8076
8077 LEAVE;
8078 return IPR_RC_JOB_CONTINUE;
8079}
8080
1da177e4
LT
8081/**
8082 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8083 * @ipr_cmd: ipr command struct
8084 *
8085 * This function sends a Page 3 inquiry to the adapter
8086 * to retrieve software VPD information.
8087 *
8088 * Return value:
8089 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8090 **/
8091static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
8092{
8093 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
8094
8095 ENTER;
8096
ac09c349 8097 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
8098
8099 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8100 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8101 sizeof(struct ipr_inquiry_page3));
8102
8103 LEAVE;
8104 return IPR_RC_JOB_RETURN;
8105}
8106
8107/**
8108 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8109 * @ipr_cmd: ipr command struct
8110 *
8111 * This function sends a Page 0 inquiry to the adapter
8112 * to retrieve supported inquiry pages.
8113 *
8114 * Return value:
8115 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8116 **/
8117static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
8118{
8119 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8120 char type[5];
8121
8122 ENTER;
8123
8124 /* Grab the type out of the VPD and store it away */
8125 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8126 type[4] = '\0';
8127 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8128
f688f96d
BK
8129 if (ipr_invalid_adapter(ioa_cfg)) {
8130 dev_err(&ioa_cfg->pdev->dev,
8131 "Adapter not supported in this hardware configuration.\n");
8132
8133 if (!ipr_testmode) {
8134 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8135 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8136 list_add_tail(&ipr_cmd->queue,
8137 &ioa_cfg->hrrq->hrrq_free_q);
8138 return IPR_RC_JOB_RETURN;
8139 }
8140 }
8141
62275040 8142 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 8143
62275040
BK
8144 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8145 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8146 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
8147
8148 LEAVE;
8149 return IPR_RC_JOB_RETURN;
8150}
8151
8152/**
8153 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8154 * @ipr_cmd: ipr command struct
8155 *
8156 * This function sends a standard inquiry to the adapter.
8157 *
8158 * Return value:
8159 * IPR_RC_JOB_RETURN
8160 **/
8161static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8162{
8163 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8164
8165 ENTER;
62275040 8166 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
8167
8168 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8169 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8170 sizeof(struct ipr_ioa_vpd));
8171
8172 LEAVE;
8173 return IPR_RC_JOB_RETURN;
8174}
8175
8176/**
214777ba 8177 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
8178 * @ipr_cmd: ipr command struct
8179 *
8180 * This function send an Identify Host Request Response Queue
8181 * command to establish the HRRQ with the adapter.
8182 *
8183 * Return value:
8184 * IPR_RC_JOB_RETURN
8185 **/
214777ba 8186static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
8187{
8188 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8189 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 8190 struct ipr_hrr_queue *hrrq;
1da177e4
LT
8191
8192 ENTER;
05a6538a 8193 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
87adbe08
BK
8194 if (ioa_cfg->identify_hrrq_index == 0)
8195 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
1da177e4 8196
56d6aa33 8197 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8198 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 8199
05a6538a 8200 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8201 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 8202
05a6538a 8203 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8204 if (ioa_cfg->sis64)
8205 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 8206
05a6538a 8207 if (ioa_cfg->nvectors == 1)
8208 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8209 else
8210 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8211
8212 ioarcb->cmd_pkt.cdb[2] =
8213 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8214 ioarcb->cmd_pkt.cdb[3] =
8215 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8216 ioarcb->cmd_pkt.cdb[4] =
8217 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8218 ioarcb->cmd_pkt.cdb[5] =
8219 ((u64) hrrq->host_rrq_dma) & 0xff;
8220 ioarcb->cmd_pkt.cdb[7] =
8221 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8222 ioarcb->cmd_pkt.cdb[8] =
8223 (sizeof(u32) * hrrq->size) & 0xff;
8224
8225 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 8226 ioarcb->cmd_pkt.cdb[9] =
8227 ioa_cfg->identify_hrrq_index;
1da177e4 8228
05a6538a 8229 if (ioa_cfg->sis64) {
8230 ioarcb->cmd_pkt.cdb[10] =
8231 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8232 ioarcb->cmd_pkt.cdb[11] =
8233 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8234 ioarcb->cmd_pkt.cdb[12] =
8235 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8236 ioarcb->cmd_pkt.cdb[13] =
8237 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8238 }
8239
8240 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 8241 ioarcb->cmd_pkt.cdb[14] =
8242 ioa_cfg->identify_hrrq_index;
05a6538a 8243
8244 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8245 IPR_INTERNAL_TIMEOUT);
8246
56d6aa33 8247 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8248 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 8249
8250 LEAVE;
8251 return IPR_RC_JOB_RETURN;
05a6538a 8252 }
8253
1da177e4 8254 LEAVE;
05a6538a 8255 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
8256}
8257
8258/**
8259 * ipr_reset_timer_done - Adapter reset timer function
8260 * @ipr_cmd: ipr command struct
8261 *
8262 * Description: This function is used in adapter reset processing
8263 * for timing events. If the reset_cmd pointer in the IOA
8264 * config struct is not this adapter's we are doing nested
8265 * resets and fail_all_ops will take care of freeing the
8266 * command block.
8267 *
8268 * Return value:
8269 * none
8270 **/
738c6ec5 8271static void ipr_reset_timer_done(struct timer_list *t)
1da177e4 8272{
738c6ec5 8273 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
8274 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8275 unsigned long lock_flags = 0;
8276
8277 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8278
8279 if (ioa_cfg->reset_cmd == ipr_cmd) {
8280 list_del(&ipr_cmd->queue);
8281 ipr_cmd->done(ipr_cmd);
8282 }
8283
8284 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8285}
8286
8287/**
8288 * ipr_reset_start_timer - Start a timer for adapter reset job
8289 * @ipr_cmd: ipr command struct
8290 * @timeout: timeout value
8291 *
8292 * Description: This function is used in adapter reset processing
8293 * for timing events. If the reset_cmd pointer in the IOA
8294 * config struct is not this adapter's we are doing nested
8295 * resets and fail_all_ops will take care of freeing the
8296 * command block.
8297 *
8298 * Return value:
8299 * none
8300 **/
8301static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8302 unsigned long timeout)
8303{
05a6538a 8304
8305 ENTER;
8306 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8307 ipr_cmd->done = ipr_reset_ioa_job;
8308
1da177e4 8309 ipr_cmd->timer.expires = jiffies + timeout;
841b86f3 8310 ipr_cmd->timer.function = ipr_reset_timer_done;
1da177e4
LT
8311 add_timer(&ipr_cmd->timer);
8312}
8313
8314/**
8315 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8316 * @ioa_cfg: ioa cfg struct
8317 *
8318 * Return value:
8319 * nothing
8320 **/
8321static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8322{
05a6538a 8323 struct ipr_hrr_queue *hrrq;
1da177e4 8324
05a6538a 8325 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 8326 spin_lock(&hrrq->_lock);
05a6538a 8327 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8328
8329 /* Initialize Host RRQ pointers */
8330 hrrq->hrrq_start = hrrq->host_rrq;
8331 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8332 hrrq->hrrq_curr = hrrq->hrrq_start;
8333 hrrq->toggle_bit = 1;
56d6aa33 8334 spin_unlock(&hrrq->_lock);
05a6538a 8335 }
56d6aa33 8336 wmb();
05a6538a 8337
56d6aa33 8338 ioa_cfg->identify_hrrq_index = 0;
8339 if (ioa_cfg->hrrq_num == 1)
8340 atomic_set(&ioa_cfg->hrrq_index, 0);
8341 else
8342 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
8343
8344 /* Zero out config table */
3e7ebdfa 8345 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
8346}
8347
214777ba
WB
8348/**
8349 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8350 * @ipr_cmd: ipr command struct
8351 *
8352 * Return value:
8353 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8354 **/
8355static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8356{
8357 unsigned long stage, stage_time;
8358 u32 feedback;
8359 volatile u32 int_reg;
8360 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8361 u64 maskval = 0;
8362
8363 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8364 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8365 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8366
8367 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8368
8369 /* sanity check the stage_time value */
438b0331
WB
8370 if (stage_time == 0)
8371 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8372 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
8373 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8374 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8375 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8376
8377 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8378 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8379 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8380 stage_time = ioa_cfg->transop_timeout;
8381 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8382 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
8383 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8384 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8385 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8386 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8387 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8388 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8389 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8390 return IPR_RC_JOB_CONTINUE;
8391 }
214777ba
WB
8392 }
8393
214777ba 8394 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
841b86f3 8395 ipr_cmd->timer.function = ipr_oper_timeout;
214777ba
WB
8396 ipr_cmd->done = ipr_reset_ioa_job;
8397 add_timer(&ipr_cmd->timer);
05a6538a 8398
8399 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
8400
8401 return IPR_RC_JOB_RETURN;
8402}
8403
1da177e4
LT
8404/**
8405 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8406 * @ipr_cmd: ipr command struct
8407 *
8408 * This function reinitializes some control blocks and
8409 * enables destructive diagnostics on the adapter.
8410 *
8411 * Return value:
8412 * IPR_RC_JOB_RETURN
8413 **/
8414static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8415{
8416 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8417 volatile u32 int_reg;
7be96900 8418 volatile u64 maskval;
56d6aa33 8419 int i;
1da177e4
LT
8420
8421 ENTER;
214777ba 8422 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
8423 ipr_init_ioa_mem(ioa_cfg);
8424
56d6aa33 8425 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8426 spin_lock(&ioa_cfg->hrrq[i]._lock);
8427 ioa_cfg->hrrq[i].allow_interrupts = 1;
8428 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8429 }
8701f185
WB
8430 if (ioa_cfg->sis64) {
8431 /* Set the adapter to the correct endian mode. */
8432 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8433 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8434 }
8435
7be96900 8436 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
8437
8438 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8439 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 8440 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
8441 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8442 return IPR_RC_JOB_CONTINUE;
8443 }
8444
8445 /* Enable destructive diagnostics on IOA */
214777ba
WB
8446 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8447
7be96900
WB
8448 if (ioa_cfg->sis64) {
8449 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8450 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8451 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8452 } else
8453 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 8454
1da177e4
LT
8455 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8456
8457 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8458
214777ba
WB
8459 if (ioa_cfg->sis64) {
8460 ipr_cmd->job_step = ipr_reset_next_stage;
8461 return IPR_RC_JOB_CONTINUE;
8462 }
8463
5469cb5b 8464 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
841b86f3 8465 ipr_cmd->timer.function = ipr_oper_timeout;
1da177e4
LT
8466 ipr_cmd->done = ipr_reset_ioa_job;
8467 add_timer(&ipr_cmd->timer);
05a6538a 8468 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8469
8470 LEAVE;
8471 return IPR_RC_JOB_RETURN;
8472}
8473
8474/**
8475 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8476 * @ipr_cmd: ipr command struct
8477 *
8478 * This function is invoked when an adapter dump has run out
8479 * of processing time.
8480 *
8481 * Return value:
8482 * IPR_RC_JOB_CONTINUE
8483 **/
8484static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8485{
8486 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8487
8488 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
8489 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8490 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
8491 ioa_cfg->sdt_state = ABORT_DUMP;
8492
4c647e90 8493 ioa_cfg->dump_timeout = 1;
1da177e4
LT
8494 ipr_cmd->job_step = ipr_reset_alert;
8495
8496 return IPR_RC_JOB_CONTINUE;
8497}
8498
8499/**
8500 * ipr_unit_check_no_data - Log a unit check/no data error log
8501 * @ioa_cfg: ioa config struct
8502 *
8503 * Logs an error indicating the adapter unit checked, but for some
8504 * reason, we were unable to fetch the unit check buffer.
8505 *
8506 * Return value:
8507 * nothing
8508 **/
8509static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8510{
8511 ioa_cfg->errors_logged++;
8512 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8513}
8514
8515/**
8516 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8517 * @ioa_cfg: ioa config struct
8518 *
8519 * Fetches the unit check buffer from the adapter by clocking the data
8520 * through the mailbox register.
8521 *
8522 * Return value:
8523 * nothing
8524 **/
8525static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8526{
8527 unsigned long mailbox;
8528 struct ipr_hostrcb *hostrcb;
8529 struct ipr_uc_sdt sdt;
8530 int rc, length;
65f56475 8531 u32 ioasc;
1da177e4
LT
8532
8533 mailbox = readl(ioa_cfg->ioa_mailbox);
8534
dcbad00e 8535 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
8536 ipr_unit_check_no_data(ioa_cfg);
8537 return;
8538 }
8539
8540 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8541 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8542 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8543
dcbad00e
WB
8544 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8545 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8546 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
8547 ipr_unit_check_no_data(ioa_cfg);
8548 return;
8549 }
8550
8551 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
8552 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8553 length = be32_to_cpu(sdt.entry[0].end_token);
8554 else
8555 length = (be32_to_cpu(sdt.entry[0].end_token) -
8556 be32_to_cpu(sdt.entry[0].start_token)) &
8557 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
8558
8559 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8560 struct ipr_hostrcb, queue);
afc3f83c 8561 list_del_init(&hostrcb->queue);
1da177e4
LT
8562 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8563
8564 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 8565 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
8566 (__be32 *)&hostrcb->hcam,
8567 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8568
65f56475 8569 if (!rc) {
1da177e4 8570 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 8571 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
8572 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8573 ioa_cfg->sdt_state == GET_DUMP)
8574 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8575 } else
1da177e4
LT
8576 ipr_unit_check_no_data(ioa_cfg);
8577
8578 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8579}
8580
110def85
WB
8581/**
8582 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8583 * @ipr_cmd: ipr command struct
8584 *
8585 * Description: This function will call to get the unit check buffer.
8586 *
8587 * Return value:
8588 * IPR_RC_JOB_RETURN
8589 **/
8590static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8591{
8592 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8593
8594 ENTER;
8595 ioa_cfg->ioa_unit_checked = 0;
8596 ipr_get_unit_check_buffer(ioa_cfg);
8597 ipr_cmd->job_step = ipr_reset_alert;
8598 ipr_reset_start_timer(ipr_cmd, 0);
8599
8600 LEAVE;
8601 return IPR_RC_JOB_RETURN;
8602}
8603
f41f1d99
GKB
8604static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8605{
8606 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8607
8608 ENTER;
8609
8610 if (ioa_cfg->sdt_state != GET_DUMP)
8611 return IPR_RC_JOB_RETURN;
8612
8613 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8614 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8615 IPR_PCII_MAILBOX_STABLE)) {
8616
8617 if (!ipr_cmd->u.time_left)
8618 dev_err(&ioa_cfg->pdev->dev,
8619 "Timed out waiting for Mailbox register.\n");
8620
8621 ioa_cfg->sdt_state = READ_DUMP;
8622 ioa_cfg->dump_timeout = 0;
8623 if (ioa_cfg->sis64)
8624 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8625 else
8626 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8627 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8628 schedule_work(&ioa_cfg->work_q);
8629
8630 } else {
8631 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8632 ipr_reset_start_timer(ipr_cmd,
8633 IPR_CHECK_FOR_RESET_TIMEOUT);
8634 }
8635
8636 LEAVE;
8637 return IPR_RC_JOB_RETURN;
8638}
8639
1da177e4
LT
8640/**
8641 * ipr_reset_restore_cfg_space - Restore PCI config space.
8642 * @ipr_cmd: ipr command struct
8643 *
8644 * Description: This function restores the saved PCI config space of
8645 * the adapter, fails all outstanding ops back to the callers, and
8646 * fetches the dump/unit check if applicable to this reset.
8647 *
8648 * Return value:
8649 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8650 **/
8651static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8652{
8653 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 8654 u32 int_reg;
1da177e4
LT
8655
8656 ENTER;
99c965dd 8657 ioa_cfg->pdev->state_saved = true;
1d3c16a8 8658 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
8659
8660 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 8661 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
8662 return IPR_RC_JOB_CONTINUE;
8663 }
8664
8665 ipr_fail_all_ops(ioa_cfg);
8666
8701f185
WB
8667 if (ioa_cfg->sis64) {
8668 /* Set the adapter to the correct endian mode. */
8669 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8670 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8671 }
8672
1da177e4 8673 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
8674 if (ioa_cfg->sis64) {
8675 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8676 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8677 return IPR_RC_JOB_RETURN;
8678 } else {
8679 ioa_cfg->ioa_unit_checked = 0;
8680 ipr_get_unit_check_buffer(ioa_cfg);
8681 ipr_cmd->job_step = ipr_reset_alert;
8682 ipr_reset_start_timer(ipr_cmd, 0);
8683 return IPR_RC_JOB_RETURN;
8684 }
1da177e4
LT
8685 }
8686
8687 if (ioa_cfg->in_ioa_bringdown) {
8688 ipr_cmd->job_step = ipr_ioa_bringdown_done;
f41f1d99
GKB
8689 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8690 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8691 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
1da177e4
LT
8692 } else {
8693 ipr_cmd->job_step = ipr_reset_enable_ioa;
1da177e4
LT
8694 }
8695
438b0331 8696 LEAVE;
1da177e4
LT
8697 return IPR_RC_JOB_CONTINUE;
8698}
8699
e619e1a7
BK
8700/**
8701 * ipr_reset_bist_done - BIST has completed on the adapter.
8702 * @ipr_cmd: ipr command struct
8703 *
8704 * Description: Unblock config space and resume the reset process.
8705 *
8706 * Return value:
8707 * IPR_RC_JOB_CONTINUE
8708 **/
8709static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8710{
fb51ccbf
JK
8711 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8712
e619e1a7 8713 ENTER;
fb51ccbf
JK
8714 if (ioa_cfg->cfg_locked)
8715 pci_cfg_access_unlock(ioa_cfg->pdev);
8716 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8717 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8718 LEAVE;
8719 return IPR_RC_JOB_CONTINUE;
8720}
8721
1da177e4
LT
8722/**
8723 * ipr_reset_start_bist - Run BIST on the adapter.
8724 * @ipr_cmd: ipr command struct
8725 *
8726 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8727 *
8728 * Return value:
8729 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8730 **/
8731static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8732{
8733 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8734 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8735
8736 ENTER;
cb237ef7
WB
8737 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8738 writel(IPR_UPROCI_SIS64_START_BIST,
8739 ioa_cfg->regs.set_uproc_interrupt_reg32);
8740 else
8741 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8742
8743 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8744 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8745 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8746 rc = IPR_RC_JOB_RETURN;
cb237ef7 8747 } else {
fb51ccbf
JK
8748 if (ioa_cfg->cfg_locked)
8749 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8750 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8751 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8752 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8753 }
8754
8755 LEAVE;
8756 return rc;
8757}
8758
463fc696
BK
8759/**
8760 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8761 * @ipr_cmd: ipr command struct
8762 *
8763 * Description: This clears PCI reset to the adapter and delays two seconds.
8764 *
8765 * Return value:
8766 * IPR_RC_JOB_RETURN
8767 **/
8768static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8769{
8770 ENTER;
463fc696
BK
8771 ipr_cmd->job_step = ipr_reset_bist_done;
8772 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8773 LEAVE;
8774 return IPR_RC_JOB_RETURN;
8775}
8776
2796ca5e
BK
8777/**
8778 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8779 * @work: work struct
8780 *
8781 * Description: This pulses warm reset to a slot.
8782 *
8783 **/
8784static void ipr_reset_reset_work(struct work_struct *work)
8785{
8786 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8787 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8788 struct pci_dev *pdev = ioa_cfg->pdev;
8789 unsigned long lock_flags = 0;
8790
8791 ENTER;
8792 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8793 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8794 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8795
8796 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8797 if (ioa_cfg->reset_cmd == ipr_cmd)
8798 ipr_reset_ioa_job(ipr_cmd);
8799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8800 LEAVE;
8801}
8802
463fc696
BK
8803/**
8804 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8805 * @ipr_cmd: ipr command struct
8806 *
8807 * Description: This asserts PCI reset to the adapter.
8808 *
8809 * Return value:
8810 * IPR_RC_JOB_RETURN
8811 **/
8812static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8813{
8814 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
463fc696
BK
8815
8816 ENTER;
2796ca5e
BK
8817 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8818 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
463fc696 8819 ipr_cmd->job_step = ipr_reset_slot_reset_done;
463fc696
BK
8820 LEAVE;
8821 return IPR_RC_JOB_RETURN;
8822}
8823
fb51ccbf
JK
8824/**
8825 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8826 * @ipr_cmd: ipr command struct
8827 *
8828 * Description: This attempts to block config access to the IOA.
8829 *
8830 * Return value:
8831 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8832 **/
8833static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8834{
8835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8836 int rc = IPR_RC_JOB_CONTINUE;
8837
8838 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8839 ioa_cfg->cfg_locked = 1;
8840 ipr_cmd->job_step = ioa_cfg->reset;
8841 } else {
8842 if (ipr_cmd->u.time_left) {
8843 rc = IPR_RC_JOB_RETURN;
8844 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8845 ipr_reset_start_timer(ipr_cmd,
8846 IPR_CHECK_FOR_RESET_TIMEOUT);
8847 } else {
8848 ipr_cmd->job_step = ioa_cfg->reset;
8849 dev_err(&ioa_cfg->pdev->dev,
8850 "Timed out waiting to lock config access. Resetting anyway.\n");
8851 }
8852 }
8853
8854 return rc;
8855}
8856
8857/**
8858 * ipr_reset_block_config_access - Block config access to the IOA
8859 * @ipr_cmd: ipr command struct
8860 *
8861 * Description: This attempts to block config access to the IOA
8862 *
8863 * Return value:
8864 * IPR_RC_JOB_CONTINUE
8865 **/
8866static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8867{
8868 ipr_cmd->ioa_cfg->cfg_locked = 0;
8869 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8870 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8871 return IPR_RC_JOB_CONTINUE;
8872}
8873
1da177e4
LT
8874/**
8875 * ipr_reset_allowed - Query whether or not IOA can be reset
8876 * @ioa_cfg: ioa config struct
8877 *
8878 * Return value:
8879 * 0 if reset not allowed / non-zero if reset is allowed
8880 **/
8881static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8882{
8883 volatile u32 temp_reg;
8884
8885 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8886 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8887}
8888
8889/**
8890 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8891 * @ipr_cmd: ipr command struct
8892 *
8893 * Description: This function waits for adapter permission to run BIST,
8894 * then runs BIST. If the adapter does not give permission after a
8895 * reasonable time, we will reset the adapter anyway. The impact of
8896 * resetting the adapter without warning the adapter is the risk of
8897 * losing the persistent error log on the adapter. If the adapter is
8898 * reset while it is writing to the flash on the adapter, the flash
8899 * segment will have bad ECC and be zeroed.
8900 *
8901 * Return value:
8902 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8903 **/
8904static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8905{
8906 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8907 int rc = IPR_RC_JOB_RETURN;
8908
8909 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8910 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8911 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8912 } else {
fb51ccbf 8913 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8914 rc = IPR_RC_JOB_CONTINUE;
8915 }
8916
8917 return rc;
8918}
8919
8920/**
8701f185 8921 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8922 * @ipr_cmd: ipr command struct
8923 *
8924 * Description: This function alerts the adapter that it will be reset.
8925 * If memory space is not currently enabled, proceed directly
8926 * to running BIST on the adapter. The timer must always be started
8927 * so we guarantee we do not run BIST from ipr_isr.
8928 *
8929 * Return value:
8930 * IPR_RC_JOB_RETURN
8931 **/
8932static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8933{
8934 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8935 u16 cmd_reg;
8936 int rc;
8937
8938 ENTER;
8939 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8940
8941 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8942 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8943 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8944 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8945 } else {
fb51ccbf 8946 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8947 }
8948
8949 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8950 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8951
8952 LEAVE;
8953 return IPR_RC_JOB_RETURN;
8954}
8955
4fdd7c7a
BK
8956/**
8957 * ipr_reset_quiesce_done - Complete IOA disconnect
8958 * @ipr_cmd: ipr command struct
8959 *
8960 * Description: Freeze the adapter to complete quiesce processing
8961 *
8962 * Return value:
8963 * IPR_RC_JOB_CONTINUE
8964 **/
8965static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8966{
8967 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8968
8969 ENTER;
8970 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8971 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8972 LEAVE;
8973 return IPR_RC_JOB_CONTINUE;
8974}
8975
8976/**
8977 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8978 * @ipr_cmd: ipr command struct
8979 *
8980 * Description: Ensure nothing is outstanding to the IOA and
8981 * proceed with IOA disconnect. Otherwise reset the IOA.
8982 *
8983 * Return value:
8984 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8985 **/
8986static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8987{
8988 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8989 struct ipr_cmnd *loop_cmd;
8990 struct ipr_hrr_queue *hrrq;
8991 int rc = IPR_RC_JOB_CONTINUE;
8992 int count = 0;
8993
8994 ENTER;
8995 ipr_cmd->job_step = ipr_reset_quiesce_done;
8996
8997 for_each_hrrq(hrrq, ioa_cfg) {
8998 spin_lock(&hrrq->_lock);
8999 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9000 count++;
9001 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9002 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9003 rc = IPR_RC_JOB_RETURN;
9004 break;
9005 }
9006 spin_unlock(&hrrq->_lock);
9007
9008 if (count)
9009 break;
9010 }
9011
9012 LEAVE;
9013 return rc;
9014}
9015
9016/**
9017 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9018 * @ipr_cmd: ipr command struct
9019 *
9020 * Description: Cancel any oustanding HCAMs to the IOA.
9021 *
9022 * Return value:
9023 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9024 **/
9025static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9026{
9027 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9028 int rc = IPR_RC_JOB_CONTINUE;
9029 struct ipr_cmd_pkt *cmd_pkt;
9030 struct ipr_cmnd *hcam_cmd;
9031 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9032
9033 ENTER;
9034 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9035
9036 if (!hrrq->ioa_is_dead) {
9037 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9038 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9039 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9040 continue;
9041
9042 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9043 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9044 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9045 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9046 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9047 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9048 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9049 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9050 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9051 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9052 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9053 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9054 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9055 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9056
9057 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9058 IPR_CANCEL_TIMEOUT);
9059
9060 rc = IPR_RC_JOB_RETURN;
9061 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9062 break;
9063 }
9064 }
9065 } else
9066 ipr_cmd->job_step = ipr_reset_alert;
9067
9068 LEAVE;
9069 return rc;
9070}
9071
1da177e4
LT
9072/**
9073 * ipr_reset_ucode_download_done - Microcode download completion
9074 * @ipr_cmd: ipr command struct
9075 *
9076 * Description: This function unmaps the microcode download buffer.
9077 *
9078 * Return value:
9079 * IPR_RC_JOB_CONTINUE
9080 **/
9081static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9082{
9083 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9084 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9085
d73341bf 9086 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
1da177e4
LT
9087 sglist->num_sg, DMA_TO_DEVICE);
9088
9089 ipr_cmd->job_step = ipr_reset_alert;
9090 return IPR_RC_JOB_CONTINUE;
9091}
9092
9093/**
9094 * ipr_reset_ucode_download - Download microcode to the adapter
9095 * @ipr_cmd: ipr command struct
9096 *
9097 * Description: This function checks to see if it there is microcode
9098 * to download to the adapter. If there is, a download is performed.
9099 *
9100 * Return value:
9101 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9102 **/
9103static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9104{
9105 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9106 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9107
9108 ENTER;
9109 ipr_cmd->job_step = ipr_reset_alert;
9110
9111 if (!sglist)
9112 return IPR_RC_JOB_CONTINUE;
9113
9114 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9115 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9116 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9117 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9118 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9119 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9120 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9121
a32c055f
WB
9122 if (ioa_cfg->sis64)
9123 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9124 else
9125 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
9126 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9127
9128 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9129 IPR_WRITE_BUFFER_TIMEOUT);
9130
9131 LEAVE;
9132 return IPR_RC_JOB_RETURN;
9133}
9134
9135/**
9136 * ipr_reset_shutdown_ioa - Shutdown the adapter
9137 * @ipr_cmd: ipr command struct
9138 *
9139 * Description: This function issues an adapter shutdown of the
9140 * specified type to the specified adapter as part of the
9141 * adapter reset job.
9142 *
9143 * Return value:
9144 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9145 **/
9146static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9147{
9148 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9149 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9150 unsigned long timeout;
9151 int rc = IPR_RC_JOB_CONTINUE;
9152
9153 ENTER;
4fdd7c7a
BK
9154 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9155 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9156 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
56d6aa33 9157 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
9158 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9159 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9160 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9161 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9162
ac09c349
BK
9163 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9164 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
9165 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9166 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
9167 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9168 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 9169 else
ac09c349 9170 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
9171
9172 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9173
9174 rc = IPR_RC_JOB_RETURN;
9175 ipr_cmd->job_step = ipr_reset_ucode_download;
9176 } else
9177 ipr_cmd->job_step = ipr_reset_alert;
9178
9179 LEAVE;
9180 return rc;
9181}
9182
9183/**
9184 * ipr_reset_ioa_job - Adapter reset job
9185 * @ipr_cmd: ipr command struct
9186 *
9187 * Description: This function is the job router for the adapter reset job.
9188 *
9189 * Return value:
9190 * none
9191 **/
9192static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9193{
9194 u32 rc, ioasc;
1da177e4
LT
9195 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9196
9197 do {
96d21f00 9198 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
9199
9200 if (ioa_cfg->reset_cmd != ipr_cmd) {
9201 /*
9202 * We are doing nested adapter resets and this is
9203 * not the current reset job.
9204 */
05a6538a 9205 list_add_tail(&ipr_cmd->queue,
9206 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
9207 return;
9208 }
9209
9210 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
9211 rc = ipr_cmd->job_step_failed(ipr_cmd);
9212 if (rc == IPR_RC_JOB_RETURN)
9213 return;
1da177e4
LT
9214 }
9215
9216 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 9217 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 9218 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 9219 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
9220}
9221
9222/**
9223 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9224 * @ioa_cfg: ioa config struct
9225 * @job_step: first job step of reset job
9226 * @shutdown_type: shutdown type
9227 *
9228 * Description: This function will initiate the reset of the given adapter
9229 * starting at the selected job step.
9230 * If the caller needs to wait on the completion of the reset,
9231 * the caller must sleep on the reset_wait_q.
9232 *
9233 * Return value:
9234 * none
9235 **/
9236static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9237 int (*job_step) (struct ipr_cmnd *),
9238 enum ipr_shutdown_type shutdown_type)
9239{
9240 struct ipr_cmnd *ipr_cmd;
56d6aa33 9241 int i;
1da177e4
LT
9242
9243 ioa_cfg->in_reset_reload = 1;
56d6aa33 9244 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9245 spin_lock(&ioa_cfg->hrrq[i]._lock);
9246 ioa_cfg->hrrq[i].allow_cmds = 0;
9247 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9248 }
9249 wmb();
b0e17a9b
BK
9250 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9251 ioa_cfg->scsi_unblock = 0;
9252 ioa_cfg->scsi_blocked = 1;
bfae7820 9253 scsi_block_requests(ioa_cfg->host);
b0e17a9b 9254 }
1da177e4
LT
9255
9256 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9257 ioa_cfg->reset_cmd = ipr_cmd;
9258 ipr_cmd->job_step = job_step;
9259 ipr_cmd->u.shutdown_type = shutdown_type;
9260
9261 ipr_reset_ioa_job(ipr_cmd);
9262}
9263
9264/**
9265 * ipr_initiate_ioa_reset - Initiate an adapter reset
9266 * @ioa_cfg: ioa config struct
9267 * @shutdown_type: shutdown type
9268 *
9269 * Description: This function will initiate the reset of the given adapter.
9270 * If the caller needs to wait on the completion of the reset,
9271 * the caller must sleep on the reset_wait_q.
9272 *
9273 * Return value:
9274 * none
9275 **/
9276static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9277 enum ipr_shutdown_type shutdown_type)
9278{
56d6aa33 9279 int i;
9280
9281 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
9282 return;
9283
41e9a696
BK
9284 if (ioa_cfg->in_reset_reload) {
9285 if (ioa_cfg->sdt_state == GET_DUMP)
9286 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9287 else if (ioa_cfg->sdt_state == READ_DUMP)
9288 ioa_cfg->sdt_state = ABORT_DUMP;
9289 }
1da177e4
LT
9290
9291 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9292 dev_err(&ioa_cfg->pdev->dev,
9293 "IOA taken offline - error recovery failed\n");
9294
9295 ioa_cfg->reset_retries = 0;
56d6aa33 9296 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9297 spin_lock(&ioa_cfg->hrrq[i]._lock);
9298 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9299 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9300 }
9301 wmb();
1da177e4
LT
9302
9303 if (ioa_cfg->in_ioa_bringdown) {
9304 ioa_cfg->reset_cmd = NULL;
9305 ioa_cfg->in_reset_reload = 0;
9306 ipr_fail_all_ops(ioa_cfg);
9307 wake_up_all(&ioa_cfg->reset_wait_q);
9308
bfae7820 9309 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
b0e17a9b
BK
9310 ioa_cfg->scsi_unblock = 1;
9311 schedule_work(&ioa_cfg->work_q);
bfae7820 9312 }
1da177e4
LT
9313 return;
9314 } else {
9315 ioa_cfg->in_ioa_bringdown = 1;
9316 shutdown_type = IPR_SHUTDOWN_NONE;
9317 }
9318 }
9319
9320 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9321 shutdown_type);
9322}
9323
f8a88b19
LV
9324/**
9325 * ipr_reset_freeze - Hold off all I/O activity
9326 * @ipr_cmd: ipr command struct
9327 *
9328 * Description: If the PCI slot is frozen, hold off all I/O
9329 * activity; then, as soon as the slot is available again,
9330 * initiate an adapter reset.
9331 */
9332static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9333{
56d6aa33 9334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9335 int i;
9336
f8a88b19 9337 /* Disallow new interrupts, avoid loop */
56d6aa33 9338 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9339 spin_lock(&ioa_cfg->hrrq[i]._lock);
9340 ioa_cfg->hrrq[i].allow_interrupts = 0;
9341 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9342 }
9343 wmb();
05a6538a 9344 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
9345 ipr_cmd->done = ipr_reset_ioa_job;
9346 return IPR_RC_JOB_RETURN;
9347}
9348
6270e593
BK
9349/**
9350 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9351 * @pdev: PCI device struct
9352 *
9353 * Description: This routine is called to tell us that the MMIO
9354 * access to the IOA has been restored
9355 */
9356static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9357{
9358 unsigned long flags = 0;
9359 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9360
9361 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9362 if (!ioa_cfg->probe_done)
9363 pci_save_state(pdev);
9364 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9365 return PCI_ERS_RESULT_NEED_RESET;
9366}
9367
f8a88b19
LV
9368/**
9369 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9370 * @pdev: PCI device struct
9371 *
9372 * Description: This routine is called to tell us that the PCI bus
9373 * is down. Can't do anything here, except put the device driver
9374 * into a holding pattern, waiting for the PCI bus to come back.
9375 */
9376static void ipr_pci_frozen(struct pci_dev *pdev)
9377{
9378 unsigned long flags = 0;
9379 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9380
9381 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9382 if (ioa_cfg->probe_done)
9383 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
f8a88b19
LV
9384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9385}
9386
9387/**
9388 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9389 * @pdev: PCI device struct
9390 *
9391 * Description: This routine is called by the pci error recovery
9392 * code after the PCI slot has been reset, just before we
9393 * should resume normal operations.
9394 */
9395static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9396{
9397 unsigned long flags = 0;
9398 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9399
9400 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9401 if (ioa_cfg->probe_done) {
9402 if (ioa_cfg->needs_warm_reset)
9403 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9404 else
9405 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9406 IPR_SHUTDOWN_NONE);
9407 } else
9408 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9410 return PCI_ERS_RESULT_RECOVERED;
9411}
9412
9413/**
9414 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9415 * @pdev: PCI device struct
9416 *
9417 * Description: This routine is called when the PCI bus has
9418 * permanently failed.
9419 */
9420static void ipr_pci_perm_failure(struct pci_dev *pdev)
9421{
9422 unsigned long flags = 0;
9423 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 9424 int i;
f8a88b19
LV
9425
9426 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9427 if (ioa_cfg->probe_done) {
9428 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9429 ioa_cfg->sdt_state = ABORT_DUMP;
9430 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9431 ioa_cfg->in_ioa_bringdown = 1;
9432 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9433 spin_lock(&ioa_cfg->hrrq[i]._lock);
9434 ioa_cfg->hrrq[i].allow_cmds = 0;
9435 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9436 }
9437 wmb();
9438 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9439 } else
9440 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9441 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9442}
9443
9444/**
9445 * ipr_pci_error_detected - Called when a PCI error is detected.
9446 * @pdev: PCI device struct
9447 * @state: PCI channel state
9448 *
9449 * Description: Called when a PCI error is detected.
9450 *
9451 * Return value:
9452 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9453 */
9454static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9455 pci_channel_state_t state)
9456{
9457 switch (state) {
9458 case pci_channel_io_frozen:
9459 ipr_pci_frozen(pdev);
6270e593 9460 return PCI_ERS_RESULT_CAN_RECOVER;
f8a88b19
LV
9461 case pci_channel_io_perm_failure:
9462 ipr_pci_perm_failure(pdev);
9463 return PCI_ERS_RESULT_DISCONNECT;
9464 break;
9465 default:
9466 break;
9467 }
9468 return PCI_ERS_RESULT_NEED_RESET;
9469}
9470
1da177e4
LT
9471/**
9472 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9473 * @ioa_cfg: ioa cfg struct
9474 *
183b8021 9475 * Description: This is the second phase of adapter initialization
1da177e4
LT
9476 * This function takes care of initilizing the adapter to the point
9477 * where it can accept new commands.
9478
9479 * Return value:
b1c11812 9480 * 0 on success / -EIO on failure
1da177e4 9481 **/
6f039790 9482static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9483{
9484 int rc = 0;
9485 unsigned long host_lock_flags = 0;
9486
9487 ENTER;
9488 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9489 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6270e593 9490 ioa_cfg->probe_done = 1;
ce155cce
BK
9491 if (ioa_cfg->needs_hard_reset) {
9492 ioa_cfg->needs_hard_reset = 0;
9493 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9494 } else
9495 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9496 IPR_SHUTDOWN_NONE);
1da177e4 9497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
1da177e4
LT
9498
9499 LEAVE;
9500 return rc;
9501}
9502
9503/**
9504 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9505 * @ioa_cfg: ioa config struct
9506 *
9507 * Return value:
9508 * none
9509 **/
9510static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9511{
9512 int i;
9513
a65e8f12
BK
9514 if (ioa_cfg->ipr_cmnd_list) {
9515 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9516 if (ioa_cfg->ipr_cmnd_list[i])
9517 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9518 ioa_cfg->ipr_cmnd_list[i],
9519 ioa_cfg->ipr_cmnd_list_dma[i]);
1da177e4 9520
a65e8f12
BK
9521 ioa_cfg->ipr_cmnd_list[i] = NULL;
9522 }
1da177e4
LT
9523 }
9524
9525 if (ioa_cfg->ipr_cmd_pool)
d73341bf 9526 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 9527
89aad428
BK
9528 kfree(ioa_cfg->ipr_cmnd_list);
9529 kfree(ioa_cfg->ipr_cmnd_list_dma);
9530 ioa_cfg->ipr_cmnd_list = NULL;
9531 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
9532 ioa_cfg->ipr_cmd_pool = NULL;
9533}
9534
9535/**
9536 * ipr_free_mem - Frees memory allocated for an adapter
9537 * @ioa_cfg: ioa cfg struct
9538 *
9539 * Return value:
9540 * nothing
9541 **/
9542static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9543{
9544 int i;
9545
9546 kfree(ioa_cfg->res_entries);
d73341bf
AB
9547 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9548 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4 9549 ipr_free_cmd_blks(ioa_cfg);
05a6538a 9550
9551 for (i = 0; i < ioa_cfg->hrrq_num; i++)
d73341bf
AB
9552 dma_free_coherent(&ioa_cfg->pdev->dev,
9553 sizeof(u32) * ioa_cfg->hrrq[i].size,
9554 ioa_cfg->hrrq[i].host_rrq,
9555 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9556
d73341bf
AB
9557 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9558 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9559
afc3f83c 9560 for (i = 0; i < IPR_MAX_HCAMS; i++) {
d73341bf
AB
9561 dma_free_coherent(&ioa_cfg->pdev->dev,
9562 sizeof(struct ipr_hostrcb),
9563 ioa_cfg->hostrcb[i],
9564 ioa_cfg->hostrcb_dma[i]);
1da177e4
LT
9565 }
9566
9567 ipr_free_dump(ioa_cfg);
1da177e4
LT
9568 kfree(ioa_cfg->trace);
9569}
9570
9571/**
2796ca5e
BK
9572 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9573 * @ioa_cfg: ipr cfg struct
1da177e4 9574 *
2796ca5e 9575 * This function frees all allocated IRQs for the
1da177e4
LT
9576 * specified adapter.
9577 *
9578 * Return value:
9579 * none
9580 **/
2796ca5e 9581static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9582{
9583 struct pci_dev *pdev = ioa_cfg->pdev;
a299ee62 9584 int i;
1da177e4 9585
a299ee62
CH
9586 for (i = 0; i < ioa_cfg->nvectors; i++)
9587 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9588 pci_free_irq_vectors(pdev);
2796ca5e 9589}
05a6538a 9590
2796ca5e
BK
9591/**
9592 * ipr_free_all_resources - Free all allocated resources for an adapter.
9593 * @ipr_cmd: ipr command struct
9594 *
9595 * This function frees all allocated resources for the
9596 * specified adapter.
9597 *
9598 * Return value:
9599 * none
9600 **/
9601static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9602{
9603 struct pci_dev *pdev = ioa_cfg->pdev;
05a6538a 9604
2796ca5e
BK
9605 ENTER;
9606 ipr_free_irqs(ioa_cfg);
9607 if (ioa_cfg->reset_work_q)
9608 destroy_workqueue(ioa_cfg->reset_work_q);
1da177e4
LT
9609 iounmap(ioa_cfg->hdw_dma_regs);
9610 pci_release_regions(pdev);
9611 ipr_free_mem(ioa_cfg);
9612 scsi_host_put(ioa_cfg->host);
9613 pci_disable_device(pdev);
9614 LEAVE;
9615}
9616
9617/**
9618 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9619 * @ioa_cfg: ioa config struct
9620 *
9621 * Return value:
9622 * 0 on success / -ENOMEM on allocation failure
9623 **/
6f039790 9624static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9625{
9626 struct ipr_cmnd *ipr_cmd;
9627 struct ipr_ioarcb *ioarcb;
9628 dma_addr_t dma_addr;
05a6538a 9629 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 9630
d73341bf 9631 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
203fa3fe 9632 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
9633
9634 if (!ioa_cfg->ipr_cmd_pool)
9635 return -ENOMEM;
9636
89aad428
BK
9637 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9638 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9639
9640 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9641 ipr_free_cmd_blks(ioa_cfg);
9642 return -ENOMEM;
9643 }
9644
05a6538a 9645 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9646 if (ioa_cfg->hrrq_num > 1) {
9647 if (i == 0) {
9648 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9649 ioa_cfg->hrrq[i].min_cmd_id = 0;
b82378e6
CIK
9650 ioa_cfg->hrrq[i].max_cmd_id =
9651 (entries_each_hrrq - 1);
05a6538a 9652 } else {
9653 entries_each_hrrq =
9654 IPR_NUM_BASE_CMD_BLKS/
9655 (ioa_cfg->hrrq_num - 1);
9656 ioa_cfg->hrrq[i].min_cmd_id =
9657 IPR_NUM_INTERNAL_CMD_BLKS +
9658 (i - 1) * entries_each_hrrq;
9659 ioa_cfg->hrrq[i].max_cmd_id =
9660 (IPR_NUM_INTERNAL_CMD_BLKS +
9661 i * entries_each_hrrq - 1);
9662 }
9663 } else {
9664 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9665 ioa_cfg->hrrq[i].min_cmd_id = 0;
9666 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9667 }
9668 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9669 }
9670
9671 BUG_ON(ioa_cfg->hrrq_num == 0);
9672
9673 i = IPR_NUM_CMD_BLKS -
9674 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9675 if (i > 0) {
9676 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9677 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9678 }
9679
1da177e4 9680 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8b1bb6dc
SJ
9681 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9682 GFP_KERNEL, &dma_addr);
1da177e4
LT
9683
9684 if (!ipr_cmd) {
9685 ipr_free_cmd_blks(ioa_cfg);
9686 return -ENOMEM;
9687 }
9688
1da177e4
LT
9689 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9690 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9691
9692 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
9693 ipr_cmd->dma_addr = dma_addr;
9694 if (ioa_cfg->sis64)
9695 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9696 else
9697 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9698
1da177e4 9699 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
9700 if (ioa_cfg->sis64) {
9701 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9702 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9703 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 9704 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
9705 } else {
9706 ioarcb->write_ioadl_addr =
9707 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9708 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9709 ioarcb->ioasa_host_pci_addr =
96d21f00 9710 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 9711 }
1da177e4
LT
9712 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9713 ipr_cmd->cmd_index = i;
9714 ipr_cmd->ioa_cfg = ioa_cfg;
9715 ipr_cmd->sense_buffer_dma = dma_addr +
9716 offsetof(struct ipr_cmnd, sense_buffer);
9717
05a6538a 9718 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9719 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9720 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9721 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9722 hrrq_id++;
1da177e4
LT
9723 }
9724
9725 return 0;
9726}
9727
9728/**
9729 * ipr_alloc_mem - Allocate memory for an adapter
9730 * @ioa_cfg: ioa config struct
9731 *
9732 * Return value:
9733 * 0 on success / non-zero for error
9734 **/
6f039790 9735static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9736{
9737 struct pci_dev *pdev = ioa_cfg->pdev;
9738 int i, rc = -ENOMEM;
9739
9740 ENTER;
6396bb22
KC
9741 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9742 sizeof(struct ipr_resource_entry),
9743 GFP_KERNEL);
1da177e4
LT
9744
9745 if (!ioa_cfg->res_entries)
9746 goto out;
9747
3e7ebdfa 9748 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 9749 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
9750 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9751 }
1da177e4 9752
d73341bf
AB
9753 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9754 sizeof(struct ipr_misc_cbs),
9755 &ioa_cfg->vpd_cbs_dma,
9756 GFP_KERNEL);
1da177e4
LT
9757
9758 if (!ioa_cfg->vpd_cbs)
9759 goto out_free_res_entries;
9760
9761 if (ipr_alloc_cmd_blks(ioa_cfg))
9762 goto out_free_vpd_cbs;
9763
05a6538a 9764 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf 9765 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
05a6538a 9766 sizeof(u32) * ioa_cfg->hrrq[i].size,
d73341bf
AB
9767 &ioa_cfg->hrrq[i].host_rrq_dma,
9768 GFP_KERNEL);
05a6538a 9769
9770 if (!ioa_cfg->hrrq[i].host_rrq) {
9771 while (--i > 0)
d73341bf 9772 dma_free_coherent(&pdev->dev,
05a6538a 9773 sizeof(u32) * ioa_cfg->hrrq[i].size,
9774 ioa_cfg->hrrq[i].host_rrq,
9775 ioa_cfg->hrrq[i].host_rrq_dma);
9776 goto out_ipr_free_cmd_blocks;
9777 }
9778 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9779 }
1da177e4 9780
d73341bf
AB
9781 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9782 ioa_cfg->cfg_table_size,
9783 &ioa_cfg->cfg_table_dma,
9784 GFP_KERNEL);
1da177e4 9785
3e7ebdfa 9786 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
9787 goto out_free_host_rrq;
9788
afc3f83c 9789 for (i = 0; i < IPR_MAX_HCAMS; i++) {
d73341bf
AB
9790 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9791 sizeof(struct ipr_hostrcb),
9792 &ioa_cfg->hostrcb_dma[i],
9793 GFP_KERNEL);
1da177e4
LT
9794
9795 if (!ioa_cfg->hostrcb[i])
9796 goto out_free_hostrcb_dma;
9797
9798 ioa_cfg->hostrcb[i]->hostrcb_dma =
9799 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 9800 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
9801 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9802 }
9803
6396bb22
KC
9804 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9805 sizeof(struct ipr_trace_entry),
9806 GFP_KERNEL);
1da177e4
LT
9807
9808 if (!ioa_cfg->trace)
9809 goto out_free_hostrcb_dma;
9810
1da177e4
LT
9811 rc = 0;
9812out:
9813 LEAVE;
9814 return rc;
9815
9816out_free_hostrcb_dma:
9817 while (i-- > 0) {
d73341bf
AB
9818 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9819 ioa_cfg->hostrcb[i],
9820 ioa_cfg->hostrcb_dma[i]);
1da177e4 9821 }
d73341bf
AB
9822 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9823 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9824out_free_host_rrq:
05a6538a 9825 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf
AB
9826 dma_free_coherent(&pdev->dev,
9827 sizeof(u32) * ioa_cfg->hrrq[i].size,
9828 ioa_cfg->hrrq[i].host_rrq,
9829 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9830 }
1da177e4
LT
9831out_ipr_free_cmd_blocks:
9832 ipr_free_cmd_blks(ioa_cfg);
9833out_free_vpd_cbs:
d73341bf
AB
9834 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9835 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4
LT
9836out_free_res_entries:
9837 kfree(ioa_cfg->res_entries);
9838 goto out;
9839}
9840
9841/**
9842 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9843 * @ioa_cfg: ioa config struct
9844 *
9845 * Return value:
9846 * none
9847 **/
6f039790 9848static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9849{
9850 int i;
9851
9852 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9853 ioa_cfg->bus_attr[i].bus = i;
9854 ioa_cfg->bus_attr[i].qas_enabled = 0;
9855 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9856 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9857 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9858 else
9859 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9860 }
9861}
9862
6270e593
BK
9863/**
9864 * ipr_init_regs - Initialize IOA registers
9865 * @ioa_cfg: ioa config struct
9866 *
9867 * Return value:
9868 * none
9869 **/
9870static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9871{
9872 const struct ipr_interrupt_offsets *p;
9873 struct ipr_interrupts *t;
9874 void __iomem *base;
9875
9876 p = &ioa_cfg->chip_cfg->regs;
9877 t = &ioa_cfg->regs;
9878 base = ioa_cfg->hdw_dma_regs;
9879
9880 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9881 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9882 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9883 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9884 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9885 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9886 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9887 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9888 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9889 t->ioarrin_reg = base + p->ioarrin_reg;
9890 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9891 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9892 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9893 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9894 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9895 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9896
9897 if (ioa_cfg->sis64) {
9898 t->init_feedback_reg = base + p->init_feedback_reg;
9899 t->dump_addr_reg = base + p->dump_addr_reg;
9900 t->dump_data_reg = base + p->dump_data_reg;
9901 t->endian_swap_reg = base + p->endian_swap_reg;
9902 }
9903}
9904
1da177e4
LT
9905/**
9906 * ipr_init_ioa_cfg - Initialize IOA config struct
9907 * @ioa_cfg: ioa config struct
9908 * @host: scsi host struct
9909 * @pdev: PCI dev struct
9910 *
9911 * Return value:
9912 * none
9913 **/
6f039790
GKH
9914static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9915 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4 9916{
6270e593 9917 int i;
1da177e4
LT
9918
9919 ioa_cfg->host = host;
9920 ioa_cfg->pdev = pdev;
9921 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9922 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9923 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9924 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9925 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9926 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9927 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9928 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9929
1da177e4
LT
9930 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9931 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
afc3f83c 9932 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
1da177e4
LT
9933 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9934 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9935 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 9936 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9937 init_waitqueue_head(&ioa_cfg->msi_wait_q);
6270e593 9938 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1da177e4
LT
9939 ioa_cfg->sdt_state = INACTIVE;
9940
9941 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9942 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9943
3e7ebdfa
WB
9944 if (ioa_cfg->sis64) {
9945 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9946 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9947 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9948 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
6270e593
BK
9949 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9950 + ((sizeof(struct ipr_config_table_entry64)
9951 * ioa_cfg->max_devs_supported)));
3e7ebdfa
WB
9952 } else {
9953 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9954 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9955 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9956 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
6270e593
BK
9957 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9958 + ((sizeof(struct ipr_config_table_entry)
9959 * ioa_cfg->max_devs_supported)));
3e7ebdfa 9960 }
6270e593 9961
f688f96d 9962 host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9963 host->unique_id = host->host_no;
9964 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9965 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9966 pci_set_drvdata(pdev, ioa_cfg);
9967
6270e593
BK
9968 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9969 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9970 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9971 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9972 if (i == 0)
9973 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9974 else
9975 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
dcbad00e 9976 }
1da177e4
LT
9977}
9978
9979/**
1be7bd82 9980 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
9981 * @dev_id: PCI device id struct
9982 *
9983 * Return value:
1be7bd82 9984 * ptr to chip information on success / NULL on failure
1da177e4 9985 **/
6f039790 9986static const struct ipr_chip_t *
1be7bd82 9987ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9988{
9989 int i;
9990
1da177e4
LT
9991 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9992 if (ipr_chip[i].vendor == dev_id->vendor &&
9993 ipr_chip[i].device == dev_id->device)
1be7bd82 9994 return &ipr_chip[i];
1da177e4
LT
9995 return NULL;
9996}
9997
6270e593
BK
9998/**
9999 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10000 * during probe time
10001 * @ioa_cfg: ioa config struct
10002 *
10003 * Return value:
10004 * None
10005 **/
10006static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10007{
10008 struct pci_dev *pdev = ioa_cfg->pdev;
10009
10010 if (pci_channel_offline(pdev)) {
10011 wait_event_timeout(ioa_cfg->eeh_wait_q,
10012 !pci_channel_offline(pdev),
10013 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10014 pci_restore_state(pdev);
10015 }
10016}
10017
05a6538a 10018static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10019{
10020 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10021
10022 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10023 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10024 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10025 ioa_cfg->vectors_info[vec_idx].
10026 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10027 }
10028}
10029
a299ee62
CH
10030static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10031 struct pci_dev *pdev)
05a6538a 10032{
10033 int i, rc;
10034
10035 for (i = 1; i < ioa_cfg->nvectors; i++) {
a299ee62 10036 rc = request_irq(pci_irq_vector(pdev, i),
05a6538a 10037 ipr_isr_mhrrq,
10038 0,
10039 ioa_cfg->vectors_info[i].desc,
10040 &ioa_cfg->hrrq[i]);
10041 if (rc) {
10042 while (--i >= 0)
a299ee62 10043 free_irq(pci_irq_vector(pdev, i),
05a6538a 10044 &ioa_cfg->hrrq[i]);
10045 return rc;
10046 }
10047 }
10048 return 0;
10049}
10050
95fecd90
WB
10051/**
10052 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10053 * @pdev: PCI device struct
10054 *
10055 * Description: Simply set the msi_received flag to 1 indicating that
10056 * Message Signaled Interrupts are supported.
10057 *
10058 * Return value:
10059 * 0 on success / non-zero on failure
10060 **/
6f039790 10061static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
10062{
10063 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10064 unsigned long lock_flags = 0;
10065 irqreturn_t rc = IRQ_HANDLED;
10066
05a6538a 10067 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
10068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10069
10070 ioa_cfg->msi_received = 1;
10071 wake_up(&ioa_cfg->msi_wait_q);
10072
10073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10074 return rc;
10075}
10076
10077/**
10078 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10079 * @pdev: PCI device struct
10080 *
a299ee62 10081 * Description: This routine sets up and initiates a test interrupt to determine
95fecd90
WB
10082 * if the interrupt is received via the ipr_test_intr() service routine.
10083 * If the tests fails, the driver will fall back to LSI.
10084 *
10085 * Return value:
10086 * 0 on success / non-zero on failure
10087 **/
6f039790 10088static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
10089{
10090 int rc;
10091 volatile u32 int_reg;
10092 unsigned long lock_flags = 0;
a299ee62 10093 int irq = pci_irq_vector(pdev, 0);
95fecd90
WB
10094
10095 ENTER;
10096
10097 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10098 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10099 ioa_cfg->msi_received = 0;
10100 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 10101 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
10102 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10104
a299ee62 10105 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
95fecd90 10106 if (rc) {
a299ee62 10107 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
95fecd90
WB
10108 return rc;
10109 } else if (ipr_debug)
a299ee62 10110 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
95fecd90 10111
214777ba 10112 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
10113 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10114 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 10115 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
10116 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10117
95fecd90
WB
10118 if (!ioa_cfg->msi_received) {
10119 /* MSI test failed */
10120 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10121 rc = -EOPNOTSUPP;
10122 } else if (ipr_debug)
10123 dev_info(&pdev->dev, "MSI test succeeded.\n");
10124
10125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10126
a299ee62 10127 free_irq(irq, ioa_cfg);
95fecd90
WB
10128
10129 LEAVE;
10130
10131 return rc;
10132}
10133
05a6538a 10134 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
10135 * @pdev: PCI device struct
10136 * @dev_id: PCI device id struct
10137 *
10138 * Return value:
10139 * 0 on success / non-zero on failure
10140 **/
6f039790
GKH
10141static int ipr_probe_ioa(struct pci_dev *pdev,
10142 const struct pci_device_id *dev_id)
1da177e4
LT
10143{
10144 struct ipr_ioa_cfg *ioa_cfg;
10145 struct Scsi_Host *host;
10146 unsigned long ipr_regs_pci;
10147 void __iomem *ipr_regs;
a2a65a3e 10148 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 10149 volatile u32 mask, uproc, interrupts;
feccada9 10150 unsigned long lock_flags, driver_lock_flags;
a299ee62 10151 unsigned int irq_flag;
1da177e4
LT
10152
10153 ENTER;
10154
1da177e4 10155 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
1da177e4
LT
10156 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10157
10158 if (!host) {
10159 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10160 rc = -ENOMEM;
6270e593 10161 goto out;
1da177e4
LT
10162 }
10163
10164 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10165 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 10166 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 10167
1be7bd82 10168 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 10169
1be7bd82 10170 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
10171 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10172 dev_id->vendor, dev_id->device);
10173 goto out_scsi_host_put;
10174 }
10175
a32c055f
WB
10176 /* set SIS 32 or SIS 64 */
10177 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 10178 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 10179 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 10180 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 10181
5469cb5b
BK
10182 if (ipr_transop_timeout)
10183 ioa_cfg->transop_timeout = ipr_transop_timeout;
10184 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10185 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10186 else
10187 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10188
44c10138 10189 ioa_cfg->revid = pdev->revision;
463fc696 10190
6270e593
BK
10191 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10192
1da177e4
LT
10193 ipr_regs_pci = pci_resource_start(pdev, 0);
10194
10195 rc = pci_request_regions(pdev, IPR_NAME);
10196 if (rc < 0) {
10197 dev_err(&pdev->dev,
10198 "Couldn't register memory range of registers\n");
10199 goto out_scsi_host_put;
10200 }
10201
6270e593
BK
10202 rc = pci_enable_device(pdev);
10203
10204 if (rc || pci_channel_offline(pdev)) {
10205 if (pci_channel_offline(pdev)) {
10206 ipr_wait_for_pci_err_recovery(ioa_cfg);
10207 rc = pci_enable_device(pdev);
10208 }
10209
10210 if (rc) {
10211 dev_err(&pdev->dev, "Cannot enable adapter\n");
10212 ipr_wait_for_pci_err_recovery(ioa_cfg);
10213 goto out_release_regions;
10214 }
10215 }
10216
25729a7f 10217 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
10218
10219 if (!ipr_regs) {
10220 dev_err(&pdev->dev,
10221 "Couldn't map memory range of registers\n");
10222 rc = -ENOMEM;
6270e593 10223 goto out_disable;
1da177e4
LT
10224 }
10225
10226 ioa_cfg->hdw_dma_regs = ipr_regs;
10227 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10228 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10229
6270e593 10230 ipr_init_regs(ioa_cfg);
1da177e4 10231
a32c055f 10232 if (ioa_cfg->sis64) {
869404cb 10233 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
a32c055f 10234 if (rc < 0) {
869404cb
AB
10235 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10236 rc = dma_set_mask_and_coherent(&pdev->dev,
10237 DMA_BIT_MASK(32));
a32c055f 10238 }
a32c055f 10239 } else
869404cb 10240 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
a32c055f 10241
1da177e4 10242 if (rc < 0) {
869404cb 10243 dev_err(&pdev->dev, "Failed to set DMA mask\n");
1da177e4
LT
10244 goto cleanup_nomem;
10245 }
10246
10247 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10248 ioa_cfg->chip_cfg->cache_line_size);
10249
10250 if (rc != PCIBIOS_SUCCESSFUL) {
10251 dev_err(&pdev->dev, "Write of cache line size failed\n");
6270e593 10252 ipr_wait_for_pci_err_recovery(ioa_cfg);
1da177e4
LT
10253 rc = -EIO;
10254 goto cleanup_nomem;
10255 }
10256
6270e593
BK
10257 /* Issue MMIO read to ensure card is not in EEH */
10258 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10259 ipr_wait_for_pci_err_recovery(ioa_cfg);
10260
05a6538a 10261 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10262 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10263 IPR_MAX_MSIX_VECTORS);
10264 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10265 }
10266
a299ee62
CH
10267 irq_flag = PCI_IRQ_LEGACY;
10268 if (ioa_cfg->ipr_chip->has_msi)
10269 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10270 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10271 if (rc < 0) {
10272 ipr_wait_for_pci_err_recovery(ioa_cfg);
10273 goto cleanup_nomem;
05a6538a 10274 }
a299ee62
CH
10275 ioa_cfg->nvectors = rc;
10276
10277 if (!pdev->msi_enabled && !pdev->msix_enabled)
10278 ioa_cfg->clear_isr = 1;
05a6538a 10279
6270e593
BK
10280 pci_set_master(pdev);
10281
10282 if (pci_channel_offline(pdev)) {
10283 ipr_wait_for_pci_err_recovery(ioa_cfg);
10284 pci_set_master(pdev);
10285 if (pci_channel_offline(pdev)) {
10286 rc = -EIO;
10287 goto out_msi_disable;
10288 }
10289 }
10290
a299ee62 10291 if (pdev->msi_enabled || pdev->msix_enabled) {
95fecd90 10292 rc = ipr_test_msi(ioa_cfg, pdev);
a299ee62
CH
10293 switch (rc) {
10294 case 0:
10295 dev_info(&pdev->dev,
10296 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10297 pdev->msix_enabled ? "-X" : "");
10298 break;
10299 case -EOPNOTSUPP:
6270e593 10300 ipr_wait_for_pci_err_recovery(ioa_cfg);
a299ee62 10301 pci_free_irq_vectors(pdev);
05a6538a 10302
05a6538a 10303 ioa_cfg->nvectors = 1;
9dadfb97 10304 ioa_cfg->clear_isr = 1;
a299ee62
CH
10305 break;
10306 default:
95fecd90 10307 goto out_msi_disable;
05a6538a 10308 }
10309 }
10310
10311 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10312 (unsigned int)num_online_cpus(),
10313 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 10314
1da177e4 10315 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 10316 goto out_msi_disable;
1da177e4
LT
10317
10318 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 10319 goto out_msi_disable;
1da177e4
LT
10320
10321 rc = ipr_alloc_mem(ioa_cfg);
10322 if (rc < 0) {
10323 dev_err(&pdev->dev,
10324 "Couldn't allocate enough memory for device driver!\n");
f170c684 10325 goto out_msi_disable;
1da177e4
LT
10326 }
10327
6270e593
BK
10328 /* Save away PCI config space for use following IOA reset */
10329 rc = pci_save_state(pdev);
10330
10331 if (rc != PCIBIOS_SUCCESSFUL) {
10332 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10333 rc = -EIO;
10334 goto cleanup_nolog;
10335 }
10336
ce155cce
BK
10337 /*
10338 * If HRRQ updated interrupt is not masked, or reset alert is set,
10339 * the card is in an unknown state and needs a hard reset
10340 */
214777ba
WB
10341 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10342 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10343 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
10344 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10345 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 10346 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
10347 ioa_cfg->needs_hard_reset = 1;
10348 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10349 ioa_cfg->ioa_unit_checked = 1;
ce155cce 10350
56d6aa33 10351 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10352 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 10353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10354
a299ee62 10355 if (pdev->msi_enabled || pdev->msix_enabled) {
05a6538a 10356 name_msi_vectors(ioa_cfg);
a299ee62 10357 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
05a6538a 10358 ioa_cfg->vectors_info[0].desc,
10359 &ioa_cfg->hrrq[0]);
10360 if (!rc)
a299ee62 10361 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
05a6538a 10362 } else {
10363 rc = request_irq(pdev->irq, ipr_isr,
10364 IRQF_SHARED,
10365 IPR_NAME, &ioa_cfg->hrrq[0]);
10366 }
1da177e4
LT
10367 if (rc) {
10368 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10369 pdev->irq, rc);
10370 goto cleanup_nolog;
10371 }
10372
463fc696
BK
10373 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10374 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10375 ioa_cfg->needs_warm_reset = 1;
10376 ioa_cfg->reset = ipr_reset_slot_reset;
2796ca5e
BK
10377
10378 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10379 WQ_MEM_RECLAIM, host->host_no);
10380
10381 if (!ioa_cfg->reset_work_q) {
10382 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
c8e18acc 10383 rc = -ENOMEM;
2796ca5e
BK
10384 goto out_free_irq;
10385 }
463fc696
BK
10386 } else
10387 ioa_cfg->reset = ipr_reset_start_bist;
10388
feccada9 10389 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10390 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
feccada9 10391 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10392
10393 LEAVE;
10394out:
10395 return rc;
10396
2796ca5e
BK
10397out_free_irq:
10398 ipr_free_irqs(ioa_cfg);
1da177e4
LT
10399cleanup_nolog:
10400 ipr_free_mem(ioa_cfg);
95fecd90 10401out_msi_disable:
6270e593 10402 ipr_wait_for_pci_err_recovery(ioa_cfg);
a299ee62 10403 pci_free_irq_vectors(pdev);
f170c684
JL
10404cleanup_nomem:
10405 iounmap(ipr_regs);
6270e593
BK
10406out_disable:
10407 pci_disable_device(pdev);
1da177e4
LT
10408out_release_regions:
10409 pci_release_regions(pdev);
10410out_scsi_host_put:
10411 scsi_host_put(host);
1da177e4
LT
10412 goto out;
10413}
10414
1da177e4
LT
10415/**
10416 * ipr_initiate_ioa_bringdown - Bring down an adapter
10417 * @ioa_cfg: ioa config struct
10418 * @shutdown_type: shutdown type
10419 *
10420 * Description: This function will initiate bringing down the adapter.
10421 * This consists of issuing an IOA shutdown to the adapter
10422 * to flush the cache, and running BIST.
10423 * If the caller needs to wait on the completion of the reset,
10424 * the caller must sleep on the reset_wait_q.
10425 *
10426 * Return value:
10427 * none
10428 **/
10429static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10430 enum ipr_shutdown_type shutdown_type)
10431{
10432 ENTER;
10433 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10434 ioa_cfg->sdt_state = ABORT_DUMP;
10435 ioa_cfg->reset_retries = 0;
10436 ioa_cfg->in_ioa_bringdown = 1;
10437 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10438 LEAVE;
10439}
10440
10441/**
10442 * __ipr_remove - Remove a single adapter
10443 * @pdev: pci device struct
10444 *
10445 * Adapter hot plug remove entry point.
10446 *
10447 * Return value:
10448 * none
10449 **/
10450static void __ipr_remove(struct pci_dev *pdev)
10451{
10452 unsigned long host_lock_flags = 0;
10453 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
bfae7820 10454 int i;
feccada9 10455 unsigned long driver_lock_flags;
1da177e4
LT
10456 ENTER;
10457
10458 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 10459 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10460 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10461 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10462 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10463 }
10464
bfae7820
BK
10465 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10466 spin_lock(&ioa_cfg->hrrq[i]._lock);
10467 ioa_cfg->hrrq[i].removing_ioa = 1;
10468 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10469 }
10470 wmb();
1da177e4
LT
10471 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10472
10473 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10474 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 10475 flush_work(&ioa_cfg->work_q);
2796ca5e
BK
10476 if (ioa_cfg->reset_work_q)
10477 flush_workqueue(ioa_cfg->reset_work_q);
9077a944 10478 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1da177e4
LT
10479 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10480
feccada9 10481 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10482 list_del(&ioa_cfg->queue);
feccada9 10483 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10484
10485 if (ioa_cfg->sdt_state == ABORT_DUMP)
10486 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10487 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10488
10489 ipr_free_all_resources(ioa_cfg);
10490
10491 LEAVE;
10492}
10493
10494/**
10495 * ipr_remove - IOA hot plug remove entry point
10496 * @pdev: pci device struct
10497 *
10498 * Adapter hot plug remove entry point.
10499 *
10500 * Return value:
10501 * none
10502 **/
6f039790 10503static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
10504{
10505 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10506
10507 ENTER;
10508
ee959b00 10509 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 10510 &ipr_trace_attr);
ee959b00 10511 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 10512 &ipr_dump_attr);
afc3f83c
BK
10513 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10514 &ipr_ioa_async_err_log);
1da177e4
LT
10515 scsi_remove_host(ioa_cfg->host);
10516
10517 __ipr_remove(pdev);
10518
10519 LEAVE;
10520}
10521
10522/**
10523 * ipr_probe - Adapter hot plug add entry point
10524 *
10525 * Return value:
10526 * 0 on success / non-zero on failure
10527 **/
6f039790 10528static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
10529{
10530 struct ipr_ioa_cfg *ioa_cfg;
b195d5e2 10531 unsigned long flags;
b53d124a 10532 int rc, i;
1da177e4
LT
10533
10534 rc = ipr_probe_ioa(pdev, dev_id);
10535
10536 if (rc)
10537 return rc;
10538
10539 ioa_cfg = pci_get_drvdata(pdev);
10540 rc = ipr_probe_ioa_part2(ioa_cfg);
10541
10542 if (rc) {
10543 __ipr_remove(pdev);
10544 return rc;
10545 }
10546
10547 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10548
10549 if (rc) {
10550 __ipr_remove(pdev);
10551 return rc;
10552 }
10553
ee959b00 10554 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10555 &ipr_trace_attr);
10556
10557 if (rc) {
10558 scsi_remove_host(ioa_cfg->host);
10559 __ipr_remove(pdev);
10560 return rc;
10561 }
10562
afc3f83c
BK
10563 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10564 &ipr_ioa_async_err_log);
10565
10566 if (rc) {
10567 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10568 &ipr_dump_attr);
10569 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10570 &ipr_trace_attr);
10571 scsi_remove_host(ioa_cfg->host);
10572 __ipr_remove(pdev);
10573 return rc;
10574 }
10575
ee959b00 10576 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10577 &ipr_dump_attr);
10578
10579 if (rc) {
afc3f83c
BK
10580 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10581 &ipr_ioa_async_err_log);
ee959b00 10582 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10583 &ipr_trace_attr);
10584 scsi_remove_host(ioa_cfg->host);
10585 __ipr_remove(pdev);
10586 return rc;
10587 }
a3d1ddd9
BK
10588 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10589 ioa_cfg->scan_enabled = 1;
10590 schedule_work(&ioa_cfg->work_q);
10591 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
1da177e4 10592
b53d124a 10593 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10594
89f8b33c 10595 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10596 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
511cbce2 10597 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
b53d124a 10598 ioa_cfg->iopoll_weight, ipr_iopoll);
b53d124a 10599 }
10600 }
10601
a3d1ddd9
BK
10602 scsi_scan_host(ioa_cfg->host);
10603
1da177e4
LT
10604 return 0;
10605}
10606
10607/**
10608 * ipr_shutdown - Shutdown handler.
d18c3db5 10609 * @pdev: pci device struct
1da177e4
LT
10610 *
10611 * This function is invoked upon system shutdown/reboot. It will issue
10612 * an adapter shutdown to the adapter to flush the write cache.
10613 *
10614 * Return value:
10615 * none
10616 **/
d18c3db5 10617static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 10618{
d18c3db5 10619 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4 10620 unsigned long lock_flags = 0;
4fdd7c7a 10621 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
b53d124a 10622 int i;
1da177e4
LT
10623
10624 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
89f8b33c 10625 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10626 ioa_cfg->iopoll_weight = 0;
10627 for (i = 1; i < ioa_cfg->hrrq_num; i++)
511cbce2 10628 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
b53d124a 10629 }
10630
203fa3fe 10631 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10632 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10633 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10635 }
10636
4fdd7c7a
BK
10637 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10638 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10639
10640 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
1da177e4
LT
10641 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10642 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4fdd7c7a 10643 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
2796ca5e 10644 ipr_free_irqs(ioa_cfg);
4fdd7c7a
BK
10645 pci_disable_device(ioa_cfg->pdev);
10646 }
1da177e4
LT
10647}
10648
6f039790 10649static struct pci_device_id ipr_pci_table[] = {
1da177e4 10650 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10651 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 10652 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10653 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 10654 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10655 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 10656 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10657 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 10658 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10659 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 10660 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 10662 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10663 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 10664 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
10665 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10666 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10667 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 10668 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10669 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
10670 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10671 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10672 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
10673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10674 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10675 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 10676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10677 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
10678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10679 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 10680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
10681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10682 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 10683 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
10684 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10685 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
10686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
10688 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 10690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 10691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 10692 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 10693 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 10694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 10695 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 10696 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 10697 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10698 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10699 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10700 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10701 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10702 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
10703 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10704 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10705 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10706 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10707 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10708 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 10709 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10710 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
10711 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10712 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
10713 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10714 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 10715 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10716 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 10717 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10718 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 10719 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10720 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
10721 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10722 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10723 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10724 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 10725 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10726 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10727 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10728 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10729 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10730 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10731 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10732 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
43c5fdaf 10733 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10734 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10735 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
f94d9964
WX
10736 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10737 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
43c5fdaf 10738 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10739 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10741 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10743 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10745 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10747 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
5eeac3e9
WX
10749 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10751 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10753 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
00da9ffa
WX
10755 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10756 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10757 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10758 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
1da177e4
LT
10759 { }
10760};
10761MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10762
a55b2d21 10763static const struct pci_error_handlers ipr_err_handler = {
f8a88b19 10764 .error_detected = ipr_pci_error_detected,
6270e593 10765 .mmio_enabled = ipr_pci_mmio_enabled,
f8a88b19
LV
10766 .slot_reset = ipr_pci_slot_reset,
10767};
10768
1da177e4
LT
10769static struct pci_driver ipr_driver = {
10770 .name = IPR_NAME,
10771 .id_table = ipr_pci_table,
10772 .probe = ipr_probe,
6f039790 10773 .remove = ipr_remove,
d18c3db5 10774 .shutdown = ipr_shutdown,
f8a88b19 10775 .err_handler = &ipr_err_handler,
1da177e4
LT
10776};
10777
f72919ec
WB
10778/**
10779 * ipr_halt_done - Shutdown prepare completion
10780 *
10781 * Return value:
10782 * none
10783 **/
10784static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10785{
05a6538a 10786 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
10787}
10788
10789/**
10790 * ipr_halt - Issue shutdown prepare to all adapters
10791 *
10792 * Return value:
10793 * NOTIFY_OK on success / NOTIFY_DONE on failure
10794 **/
10795static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10796{
10797 struct ipr_cmnd *ipr_cmd;
10798 struct ipr_ioa_cfg *ioa_cfg;
feccada9 10799 unsigned long flags = 0, driver_lock_flags;
f72919ec
WB
10800
10801 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10802 return NOTIFY_DONE;
10803
feccada9 10804 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10805
10806 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10807 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4fdd7c7a
BK
10808 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10809 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
f72919ec
WB
10810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10811 continue;
10812 }
10813
10814 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10815 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10816 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10817 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10818 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10819
10820 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10822 }
feccada9 10823 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10824
10825 return NOTIFY_OK;
10826}
10827
10828static struct notifier_block ipr_notifier = {
10829 ipr_halt, NULL, 0
10830};
10831
1da177e4
LT
10832/**
10833 * ipr_init - Module entry point
10834 *
10835 * Return value:
10836 * 0 on success / negative value on failure
10837 **/
10838static int __init ipr_init(void)
10839{
10840 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10841 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10842
f72919ec 10843 register_reboot_notifier(&ipr_notifier);
dcbccbde 10844 return pci_register_driver(&ipr_driver);
1da177e4
LT
10845}
10846
10847/**
10848 * ipr_exit - Module unload
10849 *
10850 * Module unload entry point.
10851 *
10852 * Return value:
10853 * none
10854 **/
10855static void __exit ipr_exit(void)
10856{
f72919ec 10857 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
10858 pci_unregister_driver(&ipr_driver);
10859}
10860
10861module_init(ipr_init);
10862module_exit(ipr_exit);