]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] ipr: Add support for MSI-X and distributed completion
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
05a6538a 101static unsigned int ipr_number_of_msix = 2;
1da177e4
LT
102static DEFINE_SPINLOCK(ipr_driver_lock);
103
104/* This table describes the differences between DMA controller chips */
105static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 107 .mailbox = 0x0042C,
89aad428 108 .max_cmds = 100,
1da177e4 109 .cache_line_size = 0x20,
7dd21308 110 .clear_isr = 1,
1da177e4
LT
111 {
112 .set_interrupt_mask_reg = 0x0022C,
113 .clr_interrupt_mask_reg = 0x00230,
214777ba 114 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 115 .sense_interrupt_mask_reg = 0x0022C,
214777ba 116 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 117 .clr_interrupt_reg = 0x00228,
214777ba 118 .clr_interrupt_reg32 = 0x00228,
1da177e4 119 .sense_interrupt_reg = 0x00224,
214777ba 120 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
121 .ioarrin_reg = 0x00404,
122 .sense_uproc_interrupt_reg = 0x00214,
214777ba 123 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 124 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
125 .set_uproc_interrupt_reg32 = 0x00214,
126 .clr_uproc_interrupt_reg = 0x00218,
127 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
128 }
129 },
130 { /* Snipe and Scamp */
131 .mailbox = 0x0052C,
89aad428 132 .max_cmds = 100,
1da177e4 133 .cache_line_size = 0x20,
7dd21308 134 .clear_isr = 1,
1da177e4
LT
135 {
136 .set_interrupt_mask_reg = 0x00288,
137 .clr_interrupt_mask_reg = 0x0028C,
214777ba 138 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 139 .sense_interrupt_mask_reg = 0x00288,
214777ba 140 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 141 .clr_interrupt_reg = 0x00284,
214777ba 142 .clr_interrupt_reg32 = 0x00284,
1da177e4 143 .sense_interrupt_reg = 0x00280,
214777ba 144 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
145 .ioarrin_reg = 0x00504,
146 .sense_uproc_interrupt_reg = 0x00290,
214777ba 147 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 148 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
149 .set_uproc_interrupt_reg32 = 0x00290,
150 .clr_uproc_interrupt_reg = 0x00294,
151 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
152 }
153 },
a74c1639 154 { /* CRoC */
110def85 155 .mailbox = 0x00044,
89aad428 156 .max_cmds = 1000,
a74c1639 157 .cache_line_size = 0x20,
7dd21308 158 .clear_isr = 0,
a74c1639
WB
159 {
160 .set_interrupt_mask_reg = 0x00010,
161 .clr_interrupt_mask_reg = 0x00018,
214777ba 162 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 163 .sense_interrupt_mask_reg = 0x00010,
214777ba 164 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 165 .clr_interrupt_reg = 0x00008,
214777ba 166 .clr_interrupt_reg32 = 0x0000C,
a74c1639 167 .sense_interrupt_reg = 0x00000,
214777ba 168 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
169 .ioarrin_reg = 0x00070,
170 .sense_uproc_interrupt_reg = 0x00020,
214777ba 171 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 172 .set_uproc_interrupt_reg = 0x00020,
214777ba 173 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 174 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
175 .clr_uproc_interrupt_reg32 = 0x0002C,
176 .init_feedback_reg = 0x0005C,
dcbad00e 177 .dump_addr_reg = 0x00064,
8701f185
WB
178 .dump_data_reg = 0x00068,
179 .endian_swap_reg = 0x00084
a74c1639
WB
180 }
181 },
1da177e4
LT
182};
183
184static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
185 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
187 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
194};
195
203fa3fe 196static int ipr_max_bus_speeds[] = {
1da177e4
LT
197 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
198};
199
200MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
201MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
202module_param_named(max_speed, ipr_max_speed, uint, 0);
203MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
204module_param_named(log_level, ipr_log_level, uint, 0);
205MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
206module_param_named(testmode, ipr_testmode, int, 0);
207MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 208module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
209MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
210module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
211MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 212module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 213MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
214module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
215MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
216module_param_named(max_devs, ipr_max_devs, int, 0);
217MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
218 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 219module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
220MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
1da177e4
LT
221MODULE_LICENSE("GPL");
222MODULE_VERSION(IPR_DRIVER_VERSION);
223
1da177e4
LT
224/* A constant array of IOASCs/URCs/Error Messages */
225static const
226struct ipr_error_table_t ipr_error_table[] = {
933916f3 227 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
228 "8155: An unknown error was received"},
229 {0x00330000, 0, 0,
230 "Soft underlength error"},
231 {0x005A0000, 0, 0,
232 "Command to be cancelled not found"},
233 {0x00808000, 0, 0,
234 "Qualified success"},
933916f3 235 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 236 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 237 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 238 "4101: Soft device bus fabric error"},
5aa3a333
WB
239 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
240 "FFFC: Logical block guard error recovered by the device"},
241 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
242 "FFFC: Logical block reference tag error recovered by the device"},
243 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
244 "4171: Recovered scatter list tag / sequence number error"},
245 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
246 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
247 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
248 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
249 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
250 "FFFD: Recovered logical block reference tag error detected by the IOA"},
251 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
252 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 253 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 254 "FFF9: Device sector reassign successful"},
933916f3 255 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 256 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 257 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 258 "7001: IOA sector reassignment successful"},
933916f3 259 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 261 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 262 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 263 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 265 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 266 "FFF6: Device hardware error recovered by the IOA"},
933916f3 267 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 268 "FFF6: Device hardware error recovered by the device"},
933916f3 269 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 270 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 271 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 272 "FFFA: Undefined device response recovered by the IOA"},
933916f3 273 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 274 "FFF6: Device bus error, message or command phase"},
933916f3 275 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 276 "FFFE: Task Management Function failed"},
933916f3 277 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 278 "FFF6: Failure prediction threshold exceeded"},
933916f3 279 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
280 "8009: Impending cache battery pack failure"},
281 {0x02040400, 0, 0,
282 "34FF: Disk device format in progress"},
65f56475
BK
283 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
284 "9070: IOA requested reset"},
1da177e4
LT
285 {0x023F0000, 0, 0,
286 "Synchronization required"},
287 {0x024E0000, 0, 0,
288 "No ready, IOA shutdown"},
289 {0x025A0000, 0, 0,
290 "Not ready, IOA has been shutdown"},
933916f3 291 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
292 "3020: Storage subsystem configuration error"},
293 {0x03110B00, 0, 0,
294 "FFF5: Medium error, data unreadable, recommend reassign"},
295 {0x03110C00, 0, 0,
296 "7000: Medium error, data unreadable, do not reassign"},
933916f3 297 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 298 "FFF3: Disk media format bad"},
933916f3 299 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 300 "3002: Addressed device failed to respond to selection"},
933916f3 301 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 302 "3100: Device bus error"},
933916f3 303 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
304 "3109: IOA timed out a device command"},
305 {0x04088000, 0, 0,
306 "3120: SCSI bus is not operational"},
933916f3 307 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 308 "4100: Hard device bus fabric error"},
5aa3a333
WB
309 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
310 "310C: Logical block guard error detected by the device"},
311 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
312 "310C: Logical block reference tag error detected by the device"},
313 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
314 "4170: Scatter list tag / sequence number error"},
315 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
316 "8150: Logical block CRC error on IOA to Host transfer"},
317 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
318 "4170: Logical block sequence number error on IOA to Host transfer"},
319 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "310D: Logical block reference tag error detected by the IOA"},
321 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
322 "310D: Logical block guard error detected by the IOA"},
933916f3 323 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 324 "9000: IOA reserved area data check"},
933916f3 325 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 326 "9001: IOA reserved area invalid data pattern"},
933916f3 327 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 328 "9002: IOA reserved area LRC error"},
5aa3a333
WB
329 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
330 "Hardware Error, IOA metadata access error"},
933916f3 331 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 332 "102E: Out of alternate sectors for disk storage"},
933916f3 333 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 334 "FFF4: Data transfer underlength error"},
933916f3 335 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 336 "FFF4: Data transfer overlength error"},
933916f3 337 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 338 "3400: Logical unit failure"},
933916f3 339 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 340 "FFF4: Device microcode is corrupt"},
933916f3 341 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
342 "8150: PCI bus error"},
343 {0x04430000, 1, 0,
344 "Unsupported device bus message received"},
933916f3 345 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 346 "FFF4: Disk device problem"},
933916f3 347 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 348 "8150: Permanent IOA failure"},
933916f3 349 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 350 "3010: Disk device returned wrong response to IOA"},
933916f3 351 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
352 "8151: IOA microcode error"},
353 {0x04448500, 0, 0,
354 "Device bus status error"},
933916f3 355 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
357 {0x04448700, 0, 0,
358 "ATA device status error"},
1da177e4
LT
359 {0x04490000, 0, 0,
360 "Message reject received from the device"},
933916f3 361 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "8008: A permanent cache battery pack failure occurred"},
933916f3 363 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 364 "9090: Disk unit has been modified after the last known status"},
933916f3 365 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 366 "9081: IOA detected device error"},
933916f3 367 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 368 "9082: IOA detected device error"},
933916f3 369 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 370 "3110: Device bus error, message or command phase"},
933916f3 371 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 372 "3110: SAS Command / Task Management Function failed"},
933916f3 373 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 374 "9091: Incorrect hardware configuration change has been detected"},
933916f3 375 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 376 "9073: Invalid multi-adapter configuration"},
933916f3 377 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 378 "4010: Incorrect connection between cascaded expanders"},
933916f3 379 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 380 "4020: Connections exceed IOA design limits"},
933916f3 381 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 382 "4030: Incorrect multipath connection"},
933916f3 383 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 384 "4110: Unsupported enclosure function"},
933916f3 385 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
386 "FFF4: Command to logical unit failed"},
387 {0x05240000, 1, 0,
388 "Illegal request, invalid request type or request packet"},
389 {0x05250000, 0, 0,
390 "Illegal request, invalid resource handle"},
b0df54bb
BK
391 {0x05258000, 0, 0,
392 "Illegal request, commands not allowed to this device"},
393 {0x05258100, 0, 0,
394 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
395 {0x05258200, 0, 0,
396 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
397 {0x05260000, 0, 0,
398 "Illegal request, invalid field in parameter list"},
399 {0x05260100, 0, 0,
400 "Illegal request, parameter not supported"},
401 {0x05260200, 0, 0,
402 "Illegal request, parameter value invalid"},
403 {0x052C0000, 0, 0,
404 "Illegal request, command sequence error"},
b0df54bb
BK
405 {0x052C8000, 1, 0,
406 "Illegal request, dual adapter support not enabled"},
933916f3 407 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 408 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 409 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 410 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 411 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 412 "3140: Device bus not ready to ready transition"},
933916f3 413 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
414 "FFFB: SCSI bus was reset"},
415 {0x06290500, 0, 0,
416 "FFFE: SCSI bus transition to single ended"},
417 {0x06290600, 0, 0,
418 "FFFE: SCSI bus transition to LVD"},
933916f3 419 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 420 "FFFB: SCSI bus was reset by another initiator"},
933916f3 421 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 422 "3029: A device replacement has occurred"},
933916f3 423 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 424 "9051: IOA cache data exists for a missing or failed device"},
933916f3 425 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 426 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 427 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 428 "9025: Disk unit is not supported at its physical location"},
933916f3 429 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 430 "3020: IOA detected a SCSI bus configuration error"},
933916f3 431 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 432 "3150: SCSI bus configuration error"},
933916f3 433 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 434 "9074: Asymmetric advanced function disk configuration"},
933916f3 435 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 436 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 437 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 438 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 439 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 440 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 441 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 442 "9076: Configuration error, missing remote IOA"},
933916f3 443 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 444 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
445 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
446 "4070: Logically bad block written on device"},
933916f3 447 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 448 "9041: Array protection temporarily suspended"},
933916f3 449 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 450 "9042: Corrupt array parity detected on specified device"},
933916f3 451 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 452 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 453 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 454 "9071: Link operational transition"},
933916f3 455 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 456 "9072: Link not operational transition"},
933916f3 457 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 458 "9032: Array exposed but still protected"},
e435340c
BK
459 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
460 "70DD: Device forced failed by disrupt device command"},
933916f3 461 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 462 "4061: Multipath redundancy level got better"},
933916f3 463 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 464 "4060: Multipath redundancy level got worse"},
1da177e4
LT
465 {0x07270000, 0, 0,
466 "Failure due to other device"},
933916f3 467 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 468 "9008: IOA does not support functions expected by devices"},
933916f3 469 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 470 "9010: Cache data associated with attached devices cannot be found"},
933916f3 471 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 472 "9011: Cache data belongs to devices other than those attached"},
933916f3 473 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 474 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 475 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 476 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 477 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 478 "9022: Exposed array is missing a required device"},
933916f3 479 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 480 "9023: Array member(s) not at required physical locations"},
933916f3 481 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 482 "9024: Array not functional due to present hardware configuration"},
933916f3 483 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 484 "9026: Array not functional due to present hardware configuration"},
933916f3 485 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 486 "9027: Array is missing a device and parity is out of sync"},
933916f3 487 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 488 "9028: Maximum number of arrays already exist"},
933916f3 489 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 490 "9050: Required cache data cannot be located for a disk unit"},
933916f3 491 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 492 "9052: Cache data exists for a device that has been modified"},
933916f3 493 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 494 "9054: IOA resources not available due to previous problems"},
933916f3 495 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 496 "9092: Disk unit requires initialization before use"},
933916f3 497 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 498 "9029: Incorrect hardware configuration change has been detected"},
933916f3 499 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 500 "9060: One or more disk pairs are missing from an array"},
933916f3 501 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 502 "9061: One or more disks are missing from an array"},
933916f3 503 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 504 "9062: One or more disks are missing from an array"},
933916f3 505 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
506 "9063: Maximum number of functional arrays has been exceeded"},
507 {0x0B260000, 0, 0,
508 "Aborted command, invalid descriptor"},
509 {0x0B5A0000, 0, 0,
510 "Command terminated by host"}
511};
512
513static const struct ipr_ses_table_entry ipr_ses_table[] = {
514 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
515 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
516 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
517 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
518 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
519 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
520 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
521 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
522 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
523 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
524 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
527};
528
529/*
530 * Function Prototypes
531 */
532static int ipr_reset_alert(struct ipr_cmnd *);
533static void ipr_process_ccn(struct ipr_cmnd *);
534static void ipr_process_error(struct ipr_cmnd *);
535static void ipr_reset_ioa_job(struct ipr_cmnd *);
536static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
537 enum ipr_shutdown_type);
538
539#ifdef CONFIG_SCSI_IPR_TRACE
540/**
541 * ipr_trc_hook - Add a trace entry to the driver trace
542 * @ipr_cmd: ipr command struct
543 * @type: trace type
544 * @add_data: additional data
545 *
546 * Return value:
547 * none
548 **/
549static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
550 u8 type, u32 add_data)
551{
552 struct ipr_trace_entry *trace_entry;
553 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
554
555 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
556 trace_entry->time = jiffies;
557 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
558 trace_entry->type = type;
a32c055f
WB
559 if (ipr_cmd->ioa_cfg->sis64)
560 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
561 else
562 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 563 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
564 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
565 trace_entry->u.add_data = add_data;
566}
567#else
203fa3fe 568#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
569#endif
570
172cd6e1
BK
571/**
572 * ipr_lock_and_done - Acquire lock and complete command
573 * @ipr_cmd: ipr command struct
574 *
575 * Return value:
576 * none
577 **/
578static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
579{
580 unsigned long lock_flags;
581 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
582
583 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
584 ipr_cmd->done(ipr_cmd);
585 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
586}
587
1da177e4
LT
588/**
589 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
590 * @ipr_cmd: ipr command struct
591 *
592 * Return value:
593 * none
594 **/
595static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
596{
597 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
598 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
599 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 600 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 601 int hrrq_id;
1da177e4 602
05a6538a 603 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 604 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 605 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 606 ioarcb->data_transfer_length = 0;
1da177e4 607 ioarcb->read_data_transfer_length = 0;
a32c055f 608 ioarcb->ioadl_len = 0;
1da177e4 609 ioarcb->read_ioadl_len = 0;
a32c055f 610
96d21f00 611 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
612 ioarcb->u.sis64_addr_data.data_ioadl_addr =
613 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
614 ioasa64->u.gata.status = 0;
615 } else {
a32c055f
WB
616 ioarcb->write_ioadl_addr =
617 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
618 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 619 ioasa->u.gata.status = 0;
a32c055f
WB
620 }
621
96d21f00
WB
622 ioasa->hdr.ioasc = 0;
623 ioasa->hdr.residual_data_len = 0;
1da177e4 624 ipr_cmd->scsi_cmd = NULL;
35a39691 625 ipr_cmd->qc = NULL;
1da177e4
LT
626 ipr_cmd->sense_buffer[0] = 0;
627 ipr_cmd->dma_use_sg = 0;
628}
629
630/**
631 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
632 * @ipr_cmd: ipr command struct
633 *
634 * Return value:
635 * none
636 **/
172cd6e1
BK
637static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
638 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
639{
640 ipr_reinit_ipr_cmnd(ipr_cmd);
641 ipr_cmd->u.scratch = 0;
642 ipr_cmd->sibling = NULL;
172cd6e1 643 ipr_cmd->fast_done = fast_done;
1da177e4
LT
644 init_timer(&ipr_cmd->timer);
645}
646
647/**
00bfef2c 648 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
649 * @ioa_cfg: ioa config struct
650 *
651 * Return value:
652 * pointer to ipr command struct
653 **/
654static
05a6538a 655struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 656{
05a6538a 657 struct ipr_cmnd *ipr_cmd = NULL;
658
659 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
660 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
661 struct ipr_cmnd, queue);
662 list_del(&ipr_cmd->queue);
663 }
1da177e4 664
1da177e4
LT
665
666 return ipr_cmd;
667}
668
00bfef2c
BK
669/**
670 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
671 * @ioa_cfg: ioa config struct
672 *
673 * Return value:
674 * pointer to ipr command struct
675 **/
676static
677struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
678{
05a6538a 679 struct ipr_cmnd *ipr_cmd =
680 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 681 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
682 return ipr_cmd;
683}
684
1da177e4
LT
685/**
686 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
687 * @ioa_cfg: ioa config struct
688 * @clr_ints: interrupts to clear
689 *
690 * This function masks all interrupts on the adapter, then clears the
691 * interrupts specified in the mask
692 *
693 * Return value:
694 * none
695 **/
696static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
697 u32 clr_ints)
698{
699 volatile u32 int_reg;
700
701 /* Stop new interrupts */
702 ioa_cfg->allow_interrupts = 0;
703
704 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
705 if (ioa_cfg->sis64)
706 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
707 else
708 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
709
710 /* Clear any pending interrupts */
214777ba
WB
711 if (ioa_cfg->sis64)
712 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
713 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
714 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
715}
716
717/**
718 * ipr_save_pcix_cmd_reg - Save PCI-X command register
719 * @ioa_cfg: ioa config struct
720 *
721 * Return value:
722 * 0 on success / -EIO on failure
723 **/
724static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
725{
726 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
727
7dce0e1c
BK
728 if (pcix_cmd_reg == 0)
729 return 0;
1da177e4
LT
730
731 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
732 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
733 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
734 return -EIO;
735 }
736
737 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
738 return 0;
739}
740
741/**
742 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
743 * @ioa_cfg: ioa config struct
744 *
745 * Return value:
746 * 0 on success / -EIO on failure
747 **/
748static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
749{
750 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
751
752 if (pcix_cmd_reg) {
753 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
754 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
755 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
756 return -EIO;
757 }
1da177e4
LT
758 }
759
760 return 0;
761}
762
35a39691
BK
763/**
764 * ipr_sata_eh_done - done function for aborted SATA commands
765 * @ipr_cmd: ipr command struct
766 *
767 * This function is invoked for ops generated to SATA
768 * devices which are being aborted.
769 *
770 * Return value:
771 * none
772 **/
773static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
774{
35a39691
BK
775 struct ata_queued_cmd *qc = ipr_cmd->qc;
776 struct ipr_sata_port *sata_port = qc->ap->private_data;
777
778 qc->err_mask |= AC_ERR_OTHER;
779 sata_port->ioasa.status |= ATA_BUSY;
05a6538a 780 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
781 ata_qc_complete(qc);
782}
783
1da177e4
LT
784/**
785 * ipr_scsi_eh_done - mid-layer done function for aborted ops
786 * @ipr_cmd: ipr command struct
787 *
788 * This function is invoked by the interrupt handler for
789 * ops generated by the SCSI mid-layer which are being aborted.
790 *
791 * Return value:
792 * none
793 **/
794static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
795{
1da177e4
LT
796 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
797
798 scsi_cmd->result |= (DID_ERROR << 16);
799
63015bc9 800 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 801 scsi_cmd->scsi_done(scsi_cmd);
05a6538a 802 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
803}
804
805/**
806 * ipr_fail_all_ops - Fails all outstanding ops.
807 * @ioa_cfg: ioa config struct
808 *
809 * This function fails all outstanding ops.
810 *
811 * Return value:
812 * none
813 **/
814static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
815{
816 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 817 struct ipr_hrr_queue *hrrq;
1da177e4
LT
818
819 ENTER;
05a6538a 820 for_each_hrrq(hrrq, ioa_cfg) {
821 list_for_each_entry_safe(ipr_cmd,
822 temp, &hrrq->hrrq_pending_q, queue) {
823 list_del(&ipr_cmd->queue);
1da177e4 824
05a6538a 825 ipr_cmd->s.ioasa.hdr.ioasc =
826 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
827 ipr_cmd->s.ioasa.hdr.ilid =
828 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 829
05a6538a 830 if (ipr_cmd->scsi_cmd)
831 ipr_cmd->done = ipr_scsi_eh_done;
832 else if (ipr_cmd->qc)
833 ipr_cmd->done = ipr_sata_eh_done;
1da177e4 834
05a6538a 835 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
836 IPR_IOASC_IOA_WAS_RESET);
837 del_timer(&ipr_cmd->timer);
838 ipr_cmd->done(ipr_cmd);
839 }
1da177e4 840 }
1da177e4
LT
841 LEAVE;
842}
843
a32c055f
WB
844/**
845 * ipr_send_command - Send driver initiated requests.
846 * @ipr_cmd: ipr command struct
847 *
848 * This function sends a command to the adapter using the correct write call.
849 * In the case of sis64, calculate the ioarcb size required. Then or in the
850 * appropriate bits.
851 *
852 * Return value:
853 * none
854 **/
855static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
856{
857 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
858 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
859
860 if (ioa_cfg->sis64) {
861 /* The default size is 256 bytes */
862 send_dma_addr |= 0x1;
863
864 /* If the number of ioadls * size of ioadl > 128 bytes,
865 then use a 512 byte ioarcb */
866 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
867 send_dma_addr |= 0x4;
868 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
869 } else
870 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
871}
872
1da177e4
LT
873/**
874 * ipr_do_req - Send driver initiated requests.
875 * @ipr_cmd: ipr command struct
876 * @done: done function
877 * @timeout_func: timeout function
878 * @timeout: timeout value
879 *
880 * This function sends the specified command to the adapter with the
881 * timeout given. The done function is invoked on command completion.
882 *
883 * Return value:
884 * none
885 **/
886static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
887 void (*done) (struct ipr_cmnd *),
888 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
889{
05a6538a 890 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
891
892 ipr_cmd->done = done;
893
894 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
895 ipr_cmd->timer.expires = jiffies + timeout;
896 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
897
898 add_timer(&ipr_cmd->timer);
899
900 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
901
a32c055f 902 ipr_send_command(ipr_cmd);
1da177e4
LT
903}
904
905/**
906 * ipr_internal_cmd_done - Op done function for an internally generated op.
907 * @ipr_cmd: ipr command struct
908 *
909 * This function is the op done function for an internally generated,
910 * blocking op. It simply wakes the sleeping thread.
911 *
912 * Return value:
913 * none
914 **/
915static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
916{
917 if (ipr_cmd->sibling)
918 ipr_cmd->sibling = NULL;
919 else
920 complete(&ipr_cmd->completion);
921}
922
a32c055f
WB
923/**
924 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
925 * @ipr_cmd: ipr command struct
926 * @dma_addr: dma address
927 * @len: transfer length
928 * @flags: ioadl flag value
929 *
930 * This function initializes an ioadl in the case where there is only a single
931 * descriptor.
932 *
933 * Return value:
934 * nothing
935 **/
936static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
937 u32 len, int flags)
938{
939 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
940 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
941
942 ipr_cmd->dma_use_sg = 1;
943
944 if (ipr_cmd->ioa_cfg->sis64) {
945 ioadl64->flags = cpu_to_be32(flags);
946 ioadl64->data_len = cpu_to_be32(len);
947 ioadl64->address = cpu_to_be64(dma_addr);
948
949 ipr_cmd->ioarcb.ioadl_len =
950 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
951 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
952 } else {
953 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
954 ioadl->address = cpu_to_be32(dma_addr);
955
956 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
957 ipr_cmd->ioarcb.read_ioadl_len =
958 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
959 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
960 } else {
961 ipr_cmd->ioarcb.ioadl_len =
962 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
963 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
964 }
965 }
966}
967
1da177e4
LT
968/**
969 * ipr_send_blocking_cmd - Send command and sleep on its completion.
970 * @ipr_cmd: ipr command struct
971 * @timeout_func: function to invoke if command times out
972 * @timeout: timeout
973 *
974 * Return value:
975 * none
976 **/
977static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
978 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
979 u32 timeout)
980{
981 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
982
983 init_completion(&ipr_cmd->completion);
984 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
985
986 spin_unlock_irq(ioa_cfg->host->host_lock);
987 wait_for_completion(&ipr_cmd->completion);
988 spin_lock_irq(ioa_cfg->host->host_lock);
989}
990
05a6538a 991static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
992{
993 if (ioa_cfg->hrrq_num == 1)
994 ioa_cfg->hrrq_index = 0;
995 else {
996 if (++ioa_cfg->hrrq_index >= ioa_cfg->hrrq_num)
997 ioa_cfg->hrrq_index = 1;
998 }
999 return ioa_cfg->hrrq_index;
1000}
1001
1da177e4
LT
1002/**
1003 * ipr_send_hcam - Send an HCAM to the adapter.
1004 * @ioa_cfg: ioa config struct
1005 * @type: HCAM type
1006 * @hostrcb: hostrcb struct
1007 *
1008 * This function will send a Host Controlled Async command to the adapter.
1009 * If HCAMs are currently not allowed to be issued to the adapter, it will
1010 * place the hostrcb on the free queue.
1011 *
1012 * Return value:
1013 * none
1014 **/
1015static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1016 struct ipr_hostrcb *hostrcb)
1017{
1018 struct ipr_cmnd *ipr_cmd;
1019 struct ipr_ioarcb *ioarcb;
1020
1021 if (ioa_cfg->allow_cmds) {
1022 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1023 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1024 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1025
1026 ipr_cmd->u.hostrcb = hostrcb;
1027 ioarcb = &ipr_cmd->ioarcb;
1028
1029 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1030 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1031 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1032 ioarcb->cmd_pkt.cdb[1] = type;
1033 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1034 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1035
a32c055f
WB
1036 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1037 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1038
1039 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1040 ipr_cmd->done = ipr_process_ccn;
1041 else
1042 ipr_cmd->done = ipr_process_error;
1043
1044 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1045
a32c055f 1046 ipr_send_command(ipr_cmd);
1da177e4
LT
1047 } else {
1048 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1049 }
1050}
1051
3e7ebdfa
WB
1052/**
1053 * ipr_update_ata_class - Update the ata class in the resource entry
1054 * @res: resource entry struct
1055 * @proto: cfgte device bus protocol value
1056 *
1057 * Return value:
1058 * none
1059 **/
1060static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1061{
203fa3fe 1062 switch (proto) {
3e7ebdfa
WB
1063 case IPR_PROTO_SATA:
1064 case IPR_PROTO_SAS_STP:
1065 res->ata_class = ATA_DEV_ATA;
1066 break;
1067 case IPR_PROTO_SATA_ATAPI:
1068 case IPR_PROTO_SAS_STP_ATAPI:
1069 res->ata_class = ATA_DEV_ATAPI;
1070 break;
1071 default:
1072 res->ata_class = ATA_DEV_UNKNOWN;
1073 break;
1074 };
1075}
1076
1da177e4
LT
1077/**
1078 * ipr_init_res_entry - Initialize a resource entry struct.
1079 * @res: resource entry struct
3e7ebdfa 1080 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1081 *
1082 * Return value:
1083 * none
1084 **/
3e7ebdfa
WB
1085static void ipr_init_res_entry(struct ipr_resource_entry *res,
1086 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1087{
3e7ebdfa
WB
1088 int found = 0;
1089 unsigned int proto;
1090 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1091 struct ipr_resource_entry *gscsi_res = NULL;
1092
ee0a90fa 1093 res->needs_sync_complete = 0;
1da177e4
LT
1094 res->in_erp = 0;
1095 res->add_to_ml = 0;
1096 res->del_from_ml = 0;
1097 res->resetting_device = 0;
1098 res->sdev = NULL;
35a39691 1099 res->sata_port = NULL;
3e7ebdfa
WB
1100
1101 if (ioa_cfg->sis64) {
1102 proto = cfgtew->u.cfgte64->proto;
1103 res->res_flags = cfgtew->u.cfgte64->res_flags;
1104 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1105 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1106
1107 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1108 sizeof(res->res_path));
1109
1110 res->bus = 0;
0cb992ed
WB
1111 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1112 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1113 res->lun = scsilun_to_int(&res->dev_lun);
1114
1115 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1116 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1117 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1118 found = 1;
1119 res->target = gscsi_res->target;
1120 break;
1121 }
1122 }
1123 if (!found) {
1124 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1125 ioa_cfg->max_devs_supported);
1126 set_bit(res->target, ioa_cfg->target_ids);
1127 }
3e7ebdfa
WB
1128 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1129 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1130 res->target = 0;
1131 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1132 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1133 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1134 ioa_cfg->max_devs_supported);
1135 set_bit(res->target, ioa_cfg->array_ids);
1136 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1137 res->bus = IPR_VSET_VIRTUAL_BUS;
1138 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1139 ioa_cfg->max_devs_supported);
1140 set_bit(res->target, ioa_cfg->vset_ids);
1141 } else {
1142 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1143 ioa_cfg->max_devs_supported);
1144 set_bit(res->target, ioa_cfg->target_ids);
1145 }
1146 } else {
1147 proto = cfgtew->u.cfgte->proto;
1148 res->qmodel = IPR_QUEUEING_MODEL(res);
1149 res->flags = cfgtew->u.cfgte->flags;
1150 if (res->flags & IPR_IS_IOA_RESOURCE)
1151 res->type = IPR_RES_TYPE_IOAFP;
1152 else
1153 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1154
1155 res->bus = cfgtew->u.cfgte->res_addr.bus;
1156 res->target = cfgtew->u.cfgte->res_addr.target;
1157 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1158 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1159 }
1160
1161 ipr_update_ata_class(res, proto);
1162}
1163
1164/**
1165 * ipr_is_same_device - Determine if two devices are the same.
1166 * @res: resource entry struct
1167 * @cfgtew: config table entry wrapper struct
1168 *
1169 * Return value:
1170 * 1 if the devices are the same / 0 otherwise
1171 **/
1172static int ipr_is_same_device(struct ipr_resource_entry *res,
1173 struct ipr_config_table_entry_wrapper *cfgtew)
1174{
1175 if (res->ioa_cfg->sis64) {
1176 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1177 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1178 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1179 sizeof(cfgtew->u.cfgte64->lun))) {
1180 return 1;
1181 }
1182 } else {
1183 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1184 res->target == cfgtew->u.cfgte->res_addr.target &&
1185 res->lun == cfgtew->u.cfgte->res_addr.lun)
1186 return 1;
1187 }
1188
1189 return 0;
1190}
1191
1192/**
b3b3b407 1193 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1194 * @res_path: resource path
1195 * @buf: buffer
b3b3b407 1196 * @len: length of buffer provided
3e7ebdfa
WB
1197 *
1198 * Return value:
1199 * pointer to buffer
1200 **/
b3b3b407 1201static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1202{
1203 int i;
5adcbeb3 1204 char *p = buffer;
3e7ebdfa 1205
46d74563 1206 *p = '\0';
5adcbeb3
WB
1207 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1208 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1209 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1210
1211 return buffer;
1212}
1213
b3b3b407
BK
1214/**
1215 * ipr_format_res_path - Format the resource path for printing.
1216 * @ioa_cfg: ioa config struct
1217 * @res_path: resource path
1218 * @buf: buffer
1219 * @len: length of buffer provided
1220 *
1221 * Return value:
1222 * pointer to buffer
1223 **/
1224static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1225 u8 *res_path, char *buffer, int len)
1226{
1227 char *p = buffer;
1228
1229 *p = '\0';
1230 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1231 __ipr_format_res_path(res_path, p, len - (buffer - p));
1232 return buffer;
1233}
1234
3e7ebdfa
WB
1235/**
1236 * ipr_update_res_entry - Update the resource entry.
1237 * @res: resource entry struct
1238 * @cfgtew: config table entry wrapper struct
1239 *
1240 * Return value:
1241 * none
1242 **/
1243static void ipr_update_res_entry(struct ipr_resource_entry *res,
1244 struct ipr_config_table_entry_wrapper *cfgtew)
1245{
1246 char buffer[IPR_MAX_RES_PATH_LENGTH];
1247 unsigned int proto;
1248 int new_path = 0;
1249
1250 if (res->ioa_cfg->sis64) {
1251 res->flags = cfgtew->u.cfgte64->flags;
1252 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1253 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1254
1255 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1256 sizeof(struct ipr_std_inq_data));
1257
1258 res->qmodel = IPR_QUEUEING_MODEL64(res);
1259 proto = cfgtew->u.cfgte64->proto;
1260 res->res_handle = cfgtew->u.cfgte64->res_handle;
1261 res->dev_id = cfgtew->u.cfgte64->dev_id;
1262
1263 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1264 sizeof(res->dev_lun.scsi_lun));
1265
1266 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1267 sizeof(res->res_path))) {
1268 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1269 sizeof(res->res_path));
1270 new_path = 1;
1271 }
1272
1273 if (res->sdev && new_path)
1274 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1275 ipr_format_res_path(res->ioa_cfg,
1276 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1277 } else {
1278 res->flags = cfgtew->u.cfgte->flags;
1279 if (res->flags & IPR_IS_IOA_RESOURCE)
1280 res->type = IPR_RES_TYPE_IOAFP;
1281 else
1282 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1283
1284 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1285 sizeof(struct ipr_std_inq_data));
1286
1287 res->qmodel = IPR_QUEUEING_MODEL(res);
1288 proto = cfgtew->u.cfgte->proto;
1289 res->res_handle = cfgtew->u.cfgte->res_handle;
1290 }
1291
1292 ipr_update_ata_class(res, proto);
1293}
1294
1295/**
1296 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1297 * for the resource.
1298 * @res: resource entry struct
1299 * @cfgtew: config table entry wrapper struct
1300 *
1301 * Return value:
1302 * none
1303 **/
1304static void ipr_clear_res_target(struct ipr_resource_entry *res)
1305{
1306 struct ipr_resource_entry *gscsi_res = NULL;
1307 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1308
1309 if (!ioa_cfg->sis64)
1310 return;
1311
1312 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1313 clear_bit(res->target, ioa_cfg->array_ids);
1314 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1315 clear_bit(res->target, ioa_cfg->vset_ids);
1316 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1317 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1318 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1319 return;
1320 clear_bit(res->target, ioa_cfg->target_ids);
1321
1322 } else if (res->bus == 0)
1323 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1324}
1325
1326/**
1327 * ipr_handle_config_change - Handle a config change from the adapter
1328 * @ioa_cfg: ioa config struct
1329 * @hostrcb: hostrcb
1330 *
1331 * Return value:
1332 * none
1333 **/
1334static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1335 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1336{
1337 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1338 struct ipr_config_table_entry_wrapper cfgtew;
1339 __be32 cc_res_handle;
1340
1da177e4
LT
1341 u32 is_ndn = 1;
1342
3e7ebdfa
WB
1343 if (ioa_cfg->sis64) {
1344 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1345 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1346 } else {
1347 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1348 cc_res_handle = cfgtew.u.cfgte->res_handle;
1349 }
1da177e4
LT
1350
1351 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1352 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1353 is_ndn = 0;
1354 break;
1355 }
1356 }
1357
1358 if (is_ndn) {
1359 if (list_empty(&ioa_cfg->free_res_q)) {
1360 ipr_send_hcam(ioa_cfg,
1361 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1362 hostrcb);
1363 return;
1364 }
1365
1366 res = list_entry(ioa_cfg->free_res_q.next,
1367 struct ipr_resource_entry, queue);
1368
1369 list_del(&res->queue);
3e7ebdfa 1370 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1371 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1372 }
1373
3e7ebdfa 1374 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1375
1376 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1377 if (res->sdev) {
1da177e4 1378 res->del_from_ml = 1;
3e7ebdfa 1379 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1380 if (ioa_cfg->allow_ml_add_del)
1381 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1382 } else {
1383 ipr_clear_res_target(res);
1da177e4 1384 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1385 }
5767a1c4 1386 } else if (!res->sdev || res->del_from_ml) {
1da177e4
LT
1387 res->add_to_ml = 1;
1388 if (ioa_cfg->allow_ml_add_del)
1389 schedule_work(&ioa_cfg->work_q);
1390 }
1391
1392 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1393}
1394
1395/**
1396 * ipr_process_ccn - Op done function for a CCN.
1397 * @ipr_cmd: ipr command struct
1398 *
1399 * This function is the op done function for a configuration
1400 * change notification host controlled async from the adapter.
1401 *
1402 * Return value:
1403 * none
1404 **/
1405static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1406{
1407 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1408 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1409 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1410
1411 list_del(&hostrcb->queue);
05a6538a 1412 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1413
1414 if (ioasc) {
1415 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1416 dev_err(&ioa_cfg->pdev->dev,
1417 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1418
1419 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1420 } else {
1421 ipr_handle_config_change(ioa_cfg, hostrcb);
1422 }
1423}
1424
8cf093e2
BK
1425/**
1426 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1427 * @i: index into buffer
1428 * @buf: string to modify
1429 *
1430 * This function will strip all trailing whitespace, pad the end
1431 * of the string with a single space, and NULL terminate the string.
1432 *
1433 * Return value:
1434 * new length of string
1435 **/
1436static int strip_and_pad_whitespace(int i, char *buf)
1437{
1438 while (i && buf[i] == ' ')
1439 i--;
1440 buf[i+1] = ' ';
1441 buf[i+2] = '\0';
1442 return i + 2;
1443}
1444
1445/**
1446 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1447 * @prefix: string to print at start of printk
1448 * @hostrcb: hostrcb pointer
1449 * @vpd: vendor/product id/sn struct
1450 *
1451 * Return value:
1452 * none
1453 **/
1454static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1455 struct ipr_vpd *vpd)
1456{
1457 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1458 int i = 0;
1459
1460 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1461 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1462
1463 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1464 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1465
1466 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1467 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1468
1469 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1470}
1471
1da177e4
LT
1472/**
1473 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1474 * @vpd: vendor/product id/sn struct
1da177e4
LT
1475 *
1476 * Return value:
1477 * none
1478 **/
cfc32139 1479static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1480{
1481 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1482 + IPR_SERIAL_NUM_LEN];
1483
cfc32139
BK
1484 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1485 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1486 IPR_PROD_ID_LEN);
1487 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1488 ipr_err("Vendor/Product ID: %s\n", buffer);
1489
cfc32139 1490 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1491 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1492 ipr_err(" Serial Number: %s\n", buffer);
1493}
1494
8cf093e2
BK
1495/**
1496 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1497 * @prefix: string to print at start of printk
1498 * @hostrcb: hostrcb pointer
1499 * @vpd: vendor/product id/sn/wwn struct
1500 *
1501 * Return value:
1502 * none
1503 **/
1504static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1505 struct ipr_ext_vpd *vpd)
1506{
1507 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1508 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1509 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1510}
1511
ee0f05b8
BK
1512/**
1513 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1514 * @vpd: vendor/product id/sn/wwn struct
1515 *
1516 * Return value:
1517 * none
1518 **/
1519static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1520{
1521 ipr_log_vpd(&vpd->vpd);
1522 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1523 be32_to_cpu(vpd->wwid[1]));
1524}
1525
1526/**
1527 * ipr_log_enhanced_cache_error - Log a cache error.
1528 * @ioa_cfg: ioa config struct
1529 * @hostrcb: hostrcb struct
1530 *
1531 * Return value:
1532 * none
1533 **/
1534static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1535 struct ipr_hostrcb *hostrcb)
1536{
4565e370
WB
1537 struct ipr_hostrcb_type_12_error *error;
1538
1539 if (ioa_cfg->sis64)
1540 error = &hostrcb->hcam.u.error64.u.type_12_error;
1541 else
1542 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1543
1544 ipr_err("-----Current Configuration-----\n");
1545 ipr_err("Cache Directory Card Information:\n");
1546 ipr_log_ext_vpd(&error->ioa_vpd);
1547 ipr_err("Adapter Card Information:\n");
1548 ipr_log_ext_vpd(&error->cfc_vpd);
1549
1550 ipr_err("-----Expected Configuration-----\n");
1551 ipr_err("Cache Directory Card Information:\n");
1552 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1553 ipr_err("Adapter Card Information:\n");
1554 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1555
1556 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1557 be32_to_cpu(error->ioa_data[0]),
1558 be32_to_cpu(error->ioa_data[1]),
1559 be32_to_cpu(error->ioa_data[2]));
1560}
1561
1da177e4
LT
1562/**
1563 * ipr_log_cache_error - Log a cache error.
1564 * @ioa_cfg: ioa config struct
1565 * @hostrcb: hostrcb struct
1566 *
1567 * Return value:
1568 * none
1569 **/
1570static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1571 struct ipr_hostrcb *hostrcb)
1572{
1573 struct ipr_hostrcb_type_02_error *error =
1574 &hostrcb->hcam.u.error.u.type_02_error;
1575
1576 ipr_err("-----Current Configuration-----\n");
1577 ipr_err("Cache Directory Card Information:\n");
cfc32139 1578 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1579 ipr_err("Adapter Card Information:\n");
cfc32139 1580 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1581
1582 ipr_err("-----Expected Configuration-----\n");
1583 ipr_err("Cache Directory Card Information:\n");
cfc32139 1584 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1585 ipr_err("Adapter Card Information:\n");
cfc32139 1586 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1587
1588 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1589 be32_to_cpu(error->ioa_data[0]),
1590 be32_to_cpu(error->ioa_data[1]),
1591 be32_to_cpu(error->ioa_data[2]));
1592}
1593
ee0f05b8
BK
1594/**
1595 * ipr_log_enhanced_config_error - Log a configuration error.
1596 * @ioa_cfg: ioa config struct
1597 * @hostrcb: hostrcb struct
1598 *
1599 * Return value:
1600 * none
1601 **/
1602static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1603 struct ipr_hostrcb *hostrcb)
1604{
1605 int errors_logged, i;
1606 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1607 struct ipr_hostrcb_type_13_error *error;
1608
1609 error = &hostrcb->hcam.u.error.u.type_13_error;
1610 errors_logged = be32_to_cpu(error->errors_logged);
1611
1612 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1613 be32_to_cpu(error->errors_detected), errors_logged);
1614
1615 dev_entry = error->dev;
1616
1617 for (i = 0; i < errors_logged; i++, dev_entry++) {
1618 ipr_err_separator;
1619
1620 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1621 ipr_log_ext_vpd(&dev_entry->vpd);
1622
1623 ipr_err("-----New Device Information-----\n");
1624 ipr_log_ext_vpd(&dev_entry->new_vpd);
1625
1626 ipr_err("Cache Directory Card Information:\n");
1627 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1628
1629 ipr_err("Adapter Card Information:\n");
1630 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1631 }
1632}
1633
4565e370
WB
1634/**
1635 * ipr_log_sis64_config_error - Log a device error.
1636 * @ioa_cfg: ioa config struct
1637 * @hostrcb: hostrcb struct
1638 *
1639 * Return value:
1640 * none
1641 **/
1642static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1643 struct ipr_hostrcb *hostrcb)
1644{
1645 int errors_logged, i;
1646 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1647 struct ipr_hostrcb_type_23_error *error;
1648 char buffer[IPR_MAX_RES_PATH_LENGTH];
1649
1650 error = &hostrcb->hcam.u.error64.u.type_23_error;
1651 errors_logged = be32_to_cpu(error->errors_logged);
1652
1653 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1654 be32_to_cpu(error->errors_detected), errors_logged);
1655
1656 dev_entry = error->dev;
1657
1658 for (i = 0; i < errors_logged; i++, dev_entry++) {
1659 ipr_err_separator;
1660
1661 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1662 __ipr_format_res_path(dev_entry->res_path,
1663 buffer, sizeof(buffer)));
4565e370
WB
1664 ipr_log_ext_vpd(&dev_entry->vpd);
1665
1666 ipr_err("-----New Device Information-----\n");
1667 ipr_log_ext_vpd(&dev_entry->new_vpd);
1668
1669 ipr_err("Cache Directory Card Information:\n");
1670 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1671
1672 ipr_err("Adapter Card Information:\n");
1673 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1674 }
1675}
1676
1da177e4
LT
1677/**
1678 * ipr_log_config_error - Log a configuration error.
1679 * @ioa_cfg: ioa config struct
1680 * @hostrcb: hostrcb struct
1681 *
1682 * Return value:
1683 * none
1684 **/
1685static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1686 struct ipr_hostrcb *hostrcb)
1687{
1688 int errors_logged, i;
1689 struct ipr_hostrcb_device_data_entry *dev_entry;
1690 struct ipr_hostrcb_type_03_error *error;
1691
1692 error = &hostrcb->hcam.u.error.u.type_03_error;
1693 errors_logged = be32_to_cpu(error->errors_logged);
1694
1695 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1696 be32_to_cpu(error->errors_detected), errors_logged);
1697
cfc32139 1698 dev_entry = error->dev;
1da177e4
LT
1699
1700 for (i = 0; i < errors_logged; i++, dev_entry++) {
1701 ipr_err_separator;
1702
fa15b1f6 1703 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1704 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1705
1706 ipr_err("-----New Device Information-----\n");
cfc32139 1707 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1708
1709 ipr_err("Cache Directory Card Information:\n");
cfc32139 1710 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1711
1712 ipr_err("Adapter Card Information:\n");
cfc32139 1713 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1714
1715 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1716 be32_to_cpu(dev_entry->ioa_data[0]),
1717 be32_to_cpu(dev_entry->ioa_data[1]),
1718 be32_to_cpu(dev_entry->ioa_data[2]),
1719 be32_to_cpu(dev_entry->ioa_data[3]),
1720 be32_to_cpu(dev_entry->ioa_data[4]));
1721 }
1722}
1723
ee0f05b8
BK
1724/**
1725 * ipr_log_enhanced_array_error - Log an array configuration error.
1726 * @ioa_cfg: ioa config struct
1727 * @hostrcb: hostrcb struct
1728 *
1729 * Return value:
1730 * none
1731 **/
1732static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1733 struct ipr_hostrcb *hostrcb)
1734{
1735 int i, num_entries;
1736 struct ipr_hostrcb_type_14_error *error;
1737 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1738 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1739
1740 error = &hostrcb->hcam.u.error.u.type_14_error;
1741
1742 ipr_err_separator;
1743
1744 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1745 error->protection_level,
1746 ioa_cfg->host->host_no,
1747 error->last_func_vset_res_addr.bus,
1748 error->last_func_vset_res_addr.target,
1749 error->last_func_vset_res_addr.lun);
1750
1751 ipr_err_separator;
1752
1753 array_entry = error->array_member;
1754 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1755 ARRAY_SIZE(error->array_member));
ee0f05b8
BK
1756
1757 for (i = 0; i < num_entries; i++, array_entry++) {
1758 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1759 continue;
1760
1761 if (be32_to_cpu(error->exposed_mode_adn) == i)
1762 ipr_err("Exposed Array Member %d:\n", i);
1763 else
1764 ipr_err("Array Member %d:\n", i);
1765
1766 ipr_log_ext_vpd(&array_entry->vpd);
1767 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1768 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1769 "Expected Location");
1770
1771 ipr_err_separator;
1772 }
1773}
1774
1da177e4
LT
1775/**
1776 * ipr_log_array_error - Log an array configuration error.
1777 * @ioa_cfg: ioa config struct
1778 * @hostrcb: hostrcb struct
1779 *
1780 * Return value:
1781 * none
1782 **/
1783static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1784 struct ipr_hostrcb *hostrcb)
1785{
1786 int i;
1787 struct ipr_hostrcb_type_04_error *error;
1788 struct ipr_hostrcb_array_data_entry *array_entry;
1789 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1790
1791 error = &hostrcb->hcam.u.error.u.type_04_error;
1792
1793 ipr_err_separator;
1794
1795 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1796 error->protection_level,
1797 ioa_cfg->host->host_no,
1798 error->last_func_vset_res_addr.bus,
1799 error->last_func_vset_res_addr.target,
1800 error->last_func_vset_res_addr.lun);
1801
1802 ipr_err_separator;
1803
1804 array_entry = error->array_member;
1805
1806 for (i = 0; i < 18; i++) {
cfc32139 1807 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1808 continue;
1809
fa15b1f6 1810 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1811 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1812 else
1da177e4 1813 ipr_err("Array Member %d:\n", i);
1da177e4 1814
cfc32139 1815 ipr_log_vpd(&array_entry->vpd);
1da177e4 1816
fa15b1f6
BK
1817 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1818 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1819 "Expected Location");
1da177e4
LT
1820
1821 ipr_err_separator;
1822
1823 if (i == 9)
1824 array_entry = error->array_member2;
1825 else
1826 array_entry++;
1827 }
1828}
1829
1830/**
b0df54bb 1831 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1832 * @ioa_cfg: ioa config struct
b0df54bb
BK
1833 * @data: IOA error data
1834 * @len: data length
1da177e4
LT
1835 *
1836 * Return value:
1837 * none
1838 **/
ac719aba 1839static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1840{
1841 int i;
1da177e4 1842
b0df54bb 1843 if (len == 0)
1da177e4
LT
1844 return;
1845
ac719aba
BK
1846 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1847 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1848
b0df54bb 1849 for (i = 0; i < len / 4; i += 4) {
1da177e4 1850 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1851 be32_to_cpu(data[i]),
1852 be32_to_cpu(data[i+1]),
1853 be32_to_cpu(data[i+2]),
1854 be32_to_cpu(data[i+3]));
1da177e4
LT
1855 }
1856}
1857
ee0f05b8
BK
1858/**
1859 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1860 * @ioa_cfg: ioa config struct
1861 * @hostrcb: hostrcb struct
1862 *
1863 * Return value:
1864 * none
1865 **/
1866static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1867 struct ipr_hostrcb *hostrcb)
1868{
1869 struct ipr_hostrcb_type_17_error *error;
1870
4565e370
WB
1871 if (ioa_cfg->sis64)
1872 error = &hostrcb->hcam.u.error64.u.type_17_error;
1873 else
1874 error = &hostrcb->hcam.u.error.u.type_17_error;
1875
ee0f05b8 1876 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1877 strim(error->failure_reason);
ee0f05b8 1878
8cf093e2
BK
1879 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1880 be32_to_cpu(hostrcb->hcam.u.error.prc));
1881 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1882 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1883 be32_to_cpu(hostrcb->hcam.length) -
1884 (offsetof(struct ipr_hostrcb_error, u) +
1885 offsetof(struct ipr_hostrcb_type_17_error, data)));
1886}
1887
b0df54bb
BK
1888/**
1889 * ipr_log_dual_ioa_error - Log a dual adapter error.
1890 * @ioa_cfg: ioa config struct
1891 * @hostrcb: hostrcb struct
1892 *
1893 * Return value:
1894 * none
1895 **/
1896static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1897 struct ipr_hostrcb *hostrcb)
1898{
1899 struct ipr_hostrcb_type_07_error *error;
1900
1901 error = &hostrcb->hcam.u.error.u.type_07_error;
1902 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1903 strim(error->failure_reason);
b0df54bb 1904
8cf093e2
BK
1905 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1906 be32_to_cpu(hostrcb->hcam.u.error.prc));
1907 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1908 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1909 be32_to_cpu(hostrcb->hcam.length) -
1910 (offsetof(struct ipr_hostrcb_error, u) +
1911 offsetof(struct ipr_hostrcb_type_07_error, data)));
1912}
1913
49dc6a18
BK
1914static const struct {
1915 u8 active;
1916 char *desc;
1917} path_active_desc[] = {
1918 { IPR_PATH_NO_INFO, "Path" },
1919 { IPR_PATH_ACTIVE, "Active path" },
1920 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1921};
1922
1923static const struct {
1924 u8 state;
1925 char *desc;
1926} path_state_desc[] = {
1927 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1928 { IPR_PATH_HEALTHY, "is healthy" },
1929 { IPR_PATH_DEGRADED, "is degraded" },
1930 { IPR_PATH_FAILED, "is failed" }
1931};
1932
1933/**
1934 * ipr_log_fabric_path - Log a fabric path error
1935 * @hostrcb: hostrcb struct
1936 * @fabric: fabric descriptor
1937 *
1938 * Return value:
1939 * none
1940 **/
1941static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1942 struct ipr_hostrcb_fabric_desc *fabric)
1943{
1944 int i, j;
1945 u8 path_state = fabric->path_state;
1946 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1947 u8 state = path_state & IPR_PATH_STATE_MASK;
1948
1949 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1950 if (path_active_desc[i].active != active)
1951 continue;
1952
1953 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1954 if (path_state_desc[j].state != state)
1955 continue;
1956
1957 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1958 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1959 path_active_desc[i].desc, path_state_desc[j].desc,
1960 fabric->ioa_port);
1961 } else if (fabric->cascaded_expander == 0xff) {
1962 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1963 path_active_desc[i].desc, path_state_desc[j].desc,
1964 fabric->ioa_port, fabric->phy);
1965 } else if (fabric->phy == 0xff) {
1966 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1967 path_active_desc[i].desc, path_state_desc[j].desc,
1968 fabric->ioa_port, fabric->cascaded_expander);
1969 } else {
1970 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1971 path_active_desc[i].desc, path_state_desc[j].desc,
1972 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1973 }
1974 return;
1975 }
1976 }
1977
1978 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1979 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1980}
1981
4565e370
WB
1982/**
1983 * ipr_log64_fabric_path - Log a fabric path error
1984 * @hostrcb: hostrcb struct
1985 * @fabric: fabric descriptor
1986 *
1987 * Return value:
1988 * none
1989 **/
1990static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1991 struct ipr_hostrcb64_fabric_desc *fabric)
1992{
1993 int i, j;
1994 u8 path_state = fabric->path_state;
1995 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1996 u8 state = path_state & IPR_PATH_STATE_MASK;
1997 char buffer[IPR_MAX_RES_PATH_LENGTH];
1998
1999 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2000 if (path_active_desc[i].active != active)
2001 continue;
2002
2003 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2004 if (path_state_desc[j].state != state)
2005 continue;
2006
2007 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2008 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2009 ipr_format_res_path(hostrcb->ioa_cfg,
2010 fabric->res_path,
2011 buffer, sizeof(buffer)));
4565e370
WB
2012 return;
2013 }
2014 }
2015
2016 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2017 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2018 buffer, sizeof(buffer)));
4565e370
WB
2019}
2020
49dc6a18
BK
2021static const struct {
2022 u8 type;
2023 char *desc;
2024} path_type_desc[] = {
2025 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2026 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2027 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2028 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2029};
2030
2031static const struct {
2032 u8 status;
2033 char *desc;
2034} path_status_desc[] = {
2035 { IPR_PATH_CFG_NO_PROB, "Functional" },
2036 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2037 { IPR_PATH_CFG_FAILED, "Failed" },
2038 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2039 { IPR_PATH_NOT_DETECTED, "Missing" },
2040 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2041};
2042
2043static const char *link_rate[] = {
2044 "unknown",
2045 "disabled",
2046 "phy reset problem",
2047 "spinup hold",
2048 "port selector",
2049 "unknown",
2050 "unknown",
2051 "unknown",
2052 "1.5Gbps",
2053 "3.0Gbps",
2054 "unknown",
2055 "unknown",
2056 "unknown",
2057 "unknown",
2058 "unknown",
2059 "unknown"
2060};
2061
2062/**
2063 * ipr_log_path_elem - Log a fabric path element.
2064 * @hostrcb: hostrcb struct
2065 * @cfg: fabric path element struct
2066 *
2067 * Return value:
2068 * none
2069 **/
2070static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2071 struct ipr_hostrcb_config_element *cfg)
2072{
2073 int i, j;
2074 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2075 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2076
2077 if (type == IPR_PATH_CFG_NOT_EXIST)
2078 return;
2079
2080 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2081 if (path_type_desc[i].type != type)
2082 continue;
2083
2084 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2085 if (path_status_desc[j].status != status)
2086 continue;
2087
2088 if (type == IPR_PATH_CFG_IOA_PORT) {
2089 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2090 path_status_desc[j].desc, path_type_desc[i].desc,
2091 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2092 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2093 } else {
2094 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2095 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2096 path_status_desc[j].desc, path_type_desc[i].desc,
2097 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2098 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2099 } else if (cfg->cascaded_expander == 0xff) {
2100 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2101 "WWN=%08X%08X\n", path_status_desc[j].desc,
2102 path_type_desc[i].desc, cfg->phy,
2103 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2104 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2105 } else if (cfg->phy == 0xff) {
2106 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2107 "WWN=%08X%08X\n", path_status_desc[j].desc,
2108 path_type_desc[i].desc, cfg->cascaded_expander,
2109 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2110 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2111 } else {
2112 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2113 "WWN=%08X%08X\n", path_status_desc[j].desc,
2114 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2115 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2116 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2117 }
2118 }
2119 return;
2120 }
2121 }
2122
2123 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2124 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2125 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2126 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2127}
2128
4565e370
WB
2129/**
2130 * ipr_log64_path_elem - Log a fabric path element.
2131 * @hostrcb: hostrcb struct
2132 * @cfg: fabric path element struct
2133 *
2134 * Return value:
2135 * none
2136 **/
2137static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2138 struct ipr_hostrcb64_config_element *cfg)
2139{
2140 int i, j;
2141 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2142 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2143 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2144 char buffer[IPR_MAX_RES_PATH_LENGTH];
2145
2146 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2147 return;
2148
2149 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2150 if (path_type_desc[i].type != type)
2151 continue;
2152
2153 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2154 if (path_status_desc[j].status != status)
2155 continue;
2156
2157 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2158 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2159 ipr_format_res_path(hostrcb->ioa_cfg,
2160 cfg->res_path, buffer, sizeof(buffer)),
2161 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2162 be32_to_cpu(cfg->wwid[0]),
2163 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2164 return;
2165 }
2166 }
2167 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2168 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2169 ipr_format_res_path(hostrcb->ioa_cfg,
2170 cfg->res_path, buffer, sizeof(buffer)),
2171 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2172 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2173}
2174
49dc6a18
BK
2175/**
2176 * ipr_log_fabric_error - Log a fabric error.
2177 * @ioa_cfg: ioa config struct
2178 * @hostrcb: hostrcb struct
2179 *
2180 * Return value:
2181 * none
2182 **/
2183static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2184 struct ipr_hostrcb *hostrcb)
2185{
2186 struct ipr_hostrcb_type_20_error *error;
2187 struct ipr_hostrcb_fabric_desc *fabric;
2188 struct ipr_hostrcb_config_element *cfg;
2189 int i, add_len;
2190
2191 error = &hostrcb->hcam.u.error.u.type_20_error;
2192 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2193 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2194
2195 add_len = be32_to_cpu(hostrcb->hcam.length) -
2196 (offsetof(struct ipr_hostrcb_error, u) +
2197 offsetof(struct ipr_hostrcb_type_20_error, desc));
2198
2199 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2200 ipr_log_fabric_path(hostrcb, fabric);
2201 for_each_fabric_cfg(fabric, cfg)
2202 ipr_log_path_elem(hostrcb, cfg);
2203
2204 add_len -= be16_to_cpu(fabric->length);
2205 fabric = (struct ipr_hostrcb_fabric_desc *)
2206 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2207 }
2208
ac719aba 2209 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2210}
2211
4565e370
WB
2212/**
2213 * ipr_log_sis64_array_error - Log a sis64 array error.
2214 * @ioa_cfg: ioa config struct
2215 * @hostrcb: hostrcb struct
2216 *
2217 * Return value:
2218 * none
2219 **/
2220static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2221 struct ipr_hostrcb *hostrcb)
2222{
2223 int i, num_entries;
2224 struct ipr_hostrcb_type_24_error *error;
2225 struct ipr_hostrcb64_array_data_entry *array_entry;
2226 char buffer[IPR_MAX_RES_PATH_LENGTH];
2227 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2228
2229 error = &hostrcb->hcam.u.error64.u.type_24_error;
2230
2231 ipr_err_separator;
2232
2233 ipr_err("RAID %s Array Configuration: %s\n",
2234 error->protection_level,
b3b3b407
BK
2235 ipr_format_res_path(ioa_cfg, error->last_res_path,
2236 buffer, sizeof(buffer)));
4565e370
WB
2237
2238 ipr_err_separator;
2239
2240 array_entry = error->array_member;
7262026f
WB
2241 num_entries = min_t(u32, error->num_entries,
2242 ARRAY_SIZE(error->array_member));
4565e370
WB
2243
2244 for (i = 0; i < num_entries; i++, array_entry++) {
2245
2246 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2247 continue;
2248
2249 if (error->exposed_mode_adn == i)
2250 ipr_err("Exposed Array Member %d:\n", i);
2251 else
2252 ipr_err("Array Member %d:\n", i);
2253
2254 ipr_err("Array Member %d:\n", i);
2255 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2256 ipr_err("Current Location: %s\n",
b3b3b407
BK
2257 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2258 buffer, sizeof(buffer)));
7262026f 2259 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2260 ipr_format_res_path(ioa_cfg,
2261 array_entry->expected_res_path,
2262 buffer, sizeof(buffer)));
4565e370
WB
2263
2264 ipr_err_separator;
2265 }
2266}
2267
2268/**
2269 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2270 * @ioa_cfg: ioa config struct
2271 * @hostrcb: hostrcb struct
2272 *
2273 * Return value:
2274 * none
2275 **/
2276static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2277 struct ipr_hostrcb *hostrcb)
2278{
2279 struct ipr_hostrcb_type_30_error *error;
2280 struct ipr_hostrcb64_fabric_desc *fabric;
2281 struct ipr_hostrcb64_config_element *cfg;
2282 int i, add_len;
2283
2284 error = &hostrcb->hcam.u.error64.u.type_30_error;
2285
2286 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2287 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2288
2289 add_len = be32_to_cpu(hostrcb->hcam.length) -
2290 (offsetof(struct ipr_hostrcb64_error, u) +
2291 offsetof(struct ipr_hostrcb_type_30_error, desc));
2292
2293 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2294 ipr_log64_fabric_path(hostrcb, fabric);
2295 for_each_fabric_cfg(fabric, cfg)
2296 ipr_log64_path_elem(hostrcb, cfg);
2297
2298 add_len -= be16_to_cpu(fabric->length);
2299 fabric = (struct ipr_hostrcb64_fabric_desc *)
2300 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2301 }
2302
2303 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2304}
2305
b0df54bb
BK
2306/**
2307 * ipr_log_generic_error - Log an adapter error.
2308 * @ioa_cfg: ioa config struct
2309 * @hostrcb: hostrcb struct
2310 *
2311 * Return value:
2312 * none
2313 **/
2314static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2315 struct ipr_hostrcb *hostrcb)
2316{
ac719aba 2317 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2318 be32_to_cpu(hostrcb->hcam.length));
2319}
2320
1da177e4
LT
2321/**
2322 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2323 * @ioasc: IOASC
2324 *
2325 * This function will return the index of into the ipr_error_table
2326 * for the specified IOASC. If the IOASC is not in the table,
2327 * 0 will be returned, which points to the entry used for unknown errors.
2328 *
2329 * Return value:
2330 * index into the ipr_error_table
2331 **/
2332static u32 ipr_get_error(u32 ioasc)
2333{
2334 int i;
2335
2336 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2337 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2338 return i;
2339
2340 return 0;
2341}
2342
2343/**
2344 * ipr_handle_log_data - Log an adapter error.
2345 * @ioa_cfg: ioa config struct
2346 * @hostrcb: hostrcb struct
2347 *
2348 * This function logs an adapter error to the system.
2349 *
2350 * Return value:
2351 * none
2352 **/
2353static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2354 struct ipr_hostrcb *hostrcb)
2355{
2356 u32 ioasc;
2357 int error_index;
2358
2359 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2360 return;
2361
2362 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2363 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2364
4565e370
WB
2365 if (ioa_cfg->sis64)
2366 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2367 else
2368 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2369
4565e370
WB
2370 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2371 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2372 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2373 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2374 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2375 }
2376
2377 error_index = ipr_get_error(ioasc);
2378
2379 if (!ipr_error_table[error_index].log_hcam)
2380 return;
2381
49dc6a18 2382 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2383
2384 /* Set indication we have logged an error */
2385 ioa_cfg->errors_logged++;
2386
933916f3 2387 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2388 return;
cf852037
BK
2389 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2390 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2391
2392 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2393 case IPR_HOST_RCB_OVERLAY_ID_2:
2394 ipr_log_cache_error(ioa_cfg, hostrcb);
2395 break;
2396 case IPR_HOST_RCB_OVERLAY_ID_3:
2397 ipr_log_config_error(ioa_cfg, hostrcb);
2398 break;
2399 case IPR_HOST_RCB_OVERLAY_ID_4:
2400 case IPR_HOST_RCB_OVERLAY_ID_6:
2401 ipr_log_array_error(ioa_cfg, hostrcb);
2402 break;
b0df54bb
BK
2403 case IPR_HOST_RCB_OVERLAY_ID_7:
2404 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2405 break;
ee0f05b8
BK
2406 case IPR_HOST_RCB_OVERLAY_ID_12:
2407 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2408 break;
2409 case IPR_HOST_RCB_OVERLAY_ID_13:
2410 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2411 break;
2412 case IPR_HOST_RCB_OVERLAY_ID_14:
2413 case IPR_HOST_RCB_OVERLAY_ID_16:
2414 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2415 break;
2416 case IPR_HOST_RCB_OVERLAY_ID_17:
2417 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2418 break;
49dc6a18
BK
2419 case IPR_HOST_RCB_OVERLAY_ID_20:
2420 ipr_log_fabric_error(ioa_cfg, hostrcb);
2421 break;
4565e370
WB
2422 case IPR_HOST_RCB_OVERLAY_ID_23:
2423 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2424 break;
2425 case IPR_HOST_RCB_OVERLAY_ID_24:
2426 case IPR_HOST_RCB_OVERLAY_ID_26:
2427 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2428 break;
2429 case IPR_HOST_RCB_OVERLAY_ID_30:
2430 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2431 break;
cf852037 2432 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2433 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2434 default:
a9cfca96 2435 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2436 break;
2437 }
2438}
2439
2440/**
2441 * ipr_process_error - Op done function for an adapter error log.
2442 * @ipr_cmd: ipr command struct
2443 *
2444 * This function is the op done function for an error log host
2445 * controlled async from the adapter. It will log the error and
2446 * send the HCAM back to the adapter.
2447 *
2448 * Return value:
2449 * none
2450 **/
2451static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2452{
2453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2454 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2455 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2456 u32 fd_ioasc;
2457
2458 if (ioa_cfg->sis64)
2459 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2460 else
2461 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2462
2463 list_del(&hostrcb->queue);
05a6538a 2464 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2465
2466 if (!ioasc) {
2467 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2468 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2469 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2470 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2471 dev_err(&ioa_cfg->pdev->dev,
2472 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2473 }
2474
2475 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2476}
2477
2478/**
2479 * ipr_timeout - An internally generated op has timed out.
2480 * @ipr_cmd: ipr command struct
2481 *
2482 * This function blocks host requests and initiates an
2483 * adapter reset.
2484 *
2485 * Return value:
2486 * none
2487 **/
2488static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2489{
2490 unsigned long lock_flags = 0;
2491 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2492
2493 ENTER;
2494 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2495
2496 ioa_cfg->errors_logged++;
2497 dev_err(&ioa_cfg->pdev->dev,
2498 "Adapter being reset due to command timeout.\n");
2499
2500 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2501 ioa_cfg->sdt_state = GET_DUMP;
2502
2503 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2504 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2505
2506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2507 LEAVE;
2508}
2509
2510/**
2511 * ipr_oper_timeout - Adapter timed out transitioning to operational
2512 * @ipr_cmd: ipr command struct
2513 *
2514 * This function blocks host requests and initiates an
2515 * adapter reset.
2516 *
2517 * Return value:
2518 * none
2519 **/
2520static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2521{
2522 unsigned long lock_flags = 0;
2523 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2524
2525 ENTER;
2526 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2527
2528 ioa_cfg->errors_logged++;
2529 dev_err(&ioa_cfg->pdev->dev,
2530 "Adapter timed out transitioning to operational.\n");
2531
2532 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2533 ioa_cfg->sdt_state = GET_DUMP;
2534
2535 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2536 if (ipr_fastfail)
2537 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2538 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2539 }
2540
2541 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2542 LEAVE;
2543}
2544
2545/**
2546 * ipr_reset_reload - Reset/Reload the IOA
2547 * @ioa_cfg: ioa config struct
2548 * @shutdown_type: shutdown type
2549 *
2550 * This function resets the adapter and re-initializes it.
2551 * This function assumes that all new host commands have been stopped.
2552 * Return value:
2553 * SUCCESS / FAILED
2554 **/
2555static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2556 enum ipr_shutdown_type shutdown_type)
2557{
2558 if (!ioa_cfg->in_reset_reload)
2559 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2560
2561 spin_unlock_irq(ioa_cfg->host->host_lock);
2562 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2563 spin_lock_irq(ioa_cfg->host->host_lock);
2564
2565 /* If we got hit with a host reset while we were already resetting
2566 the adapter for some reason, and the reset failed. */
2567 if (ioa_cfg->ioa_is_dead) {
2568 ipr_trace;
2569 return FAILED;
2570 }
2571
2572 return SUCCESS;
2573}
2574
2575/**
2576 * ipr_find_ses_entry - Find matching SES in SES table
2577 * @res: resource entry struct of SES
2578 *
2579 * Return value:
2580 * pointer to SES table entry / NULL on failure
2581 **/
2582static const struct ipr_ses_table_entry *
2583ipr_find_ses_entry(struct ipr_resource_entry *res)
2584{
2585 int i, j, matches;
3e7ebdfa 2586 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2587 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2588
2589 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2590 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2591 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2592 vpids = &res->std_inq_data.vpids;
2593 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2594 matches++;
2595 else
2596 break;
2597 } else
2598 matches++;
2599 }
2600
2601 if (matches == IPR_PROD_ID_LEN)
2602 return ste;
2603 }
2604
2605 return NULL;
2606}
2607
2608/**
2609 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2610 * @ioa_cfg: ioa config struct
2611 * @bus: SCSI bus
2612 * @bus_width: bus width
2613 *
2614 * Return value:
2615 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2616 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2617 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2618 * max 160MHz = max 320MB/sec).
2619 **/
2620static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2621{
2622 struct ipr_resource_entry *res;
2623 const struct ipr_ses_table_entry *ste;
2624 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2625
2626 /* Loop through each config table entry in the config table buffer */
2627 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2628 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2629 continue;
2630
3e7ebdfa 2631 if (bus != res->bus)
1da177e4
LT
2632 continue;
2633
2634 if (!(ste = ipr_find_ses_entry(res)))
2635 continue;
2636
2637 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2638 }
2639
2640 return max_xfer_rate;
2641}
2642
2643/**
2644 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2645 * @ioa_cfg: ioa config struct
2646 * @max_delay: max delay in micro-seconds to wait
2647 *
2648 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2649 *
2650 * Return value:
2651 * 0 on success / other on failure
2652 **/
2653static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2654{
2655 volatile u32 pcii_reg;
2656 int delay = 1;
2657
2658 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2659 while (delay < max_delay) {
2660 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2661
2662 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2663 return 0;
2664
2665 /* udelay cannot be used if delay is more than a few milliseconds */
2666 if ((delay / 1000) > MAX_UDELAY_MS)
2667 mdelay(delay / 1000);
2668 else
2669 udelay(delay);
2670
2671 delay += delay;
2672 }
2673 return -EIO;
2674}
2675
dcbad00e
WB
2676/**
2677 * ipr_get_sis64_dump_data_section - Dump IOA memory
2678 * @ioa_cfg: ioa config struct
2679 * @start_addr: adapter address to dump
2680 * @dest: destination kernel buffer
2681 * @length_in_words: length to dump in 4 byte words
2682 *
2683 * Return value:
2684 * 0 on success
2685 **/
2686static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2687 u32 start_addr,
2688 __be32 *dest, u32 length_in_words)
2689{
2690 int i;
2691
2692 for (i = 0; i < length_in_words; i++) {
2693 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2694 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2695 dest++;
2696 }
2697
2698 return 0;
2699}
2700
1da177e4
LT
2701/**
2702 * ipr_get_ldump_data_section - Dump IOA memory
2703 * @ioa_cfg: ioa config struct
2704 * @start_addr: adapter address to dump
2705 * @dest: destination kernel buffer
2706 * @length_in_words: length to dump in 4 byte words
2707 *
2708 * Return value:
2709 * 0 on success / -EIO on failure
2710 **/
2711static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2712 u32 start_addr,
2713 __be32 *dest, u32 length_in_words)
2714{
2715 volatile u32 temp_pcii_reg;
2716 int i, delay = 0;
2717
dcbad00e
WB
2718 if (ioa_cfg->sis64)
2719 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2720 dest, length_in_words);
2721
1da177e4
LT
2722 /* Write IOA interrupt reg starting LDUMP state */
2723 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2724 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2725
2726 /* Wait for IO debug acknowledge */
2727 if (ipr_wait_iodbg_ack(ioa_cfg,
2728 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2729 dev_err(&ioa_cfg->pdev->dev,
2730 "IOA dump long data transfer timeout\n");
2731 return -EIO;
2732 }
2733
2734 /* Signal LDUMP interlocked - clear IO debug ack */
2735 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2736 ioa_cfg->regs.clr_interrupt_reg);
2737
2738 /* Write Mailbox with starting address */
2739 writel(start_addr, ioa_cfg->ioa_mailbox);
2740
2741 /* Signal address valid - clear IOA Reset alert */
2742 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2743 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2744
2745 for (i = 0; i < length_in_words; i++) {
2746 /* Wait for IO debug acknowledge */
2747 if (ipr_wait_iodbg_ack(ioa_cfg,
2748 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2749 dev_err(&ioa_cfg->pdev->dev,
2750 "IOA dump short data transfer timeout\n");
2751 return -EIO;
2752 }
2753
2754 /* Read data from mailbox and increment destination pointer */
2755 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2756 dest++;
2757
2758 /* For all but the last word of data, signal data received */
2759 if (i < (length_in_words - 1)) {
2760 /* Signal dump data received - Clear IO debug Ack */
2761 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2762 ioa_cfg->regs.clr_interrupt_reg);
2763 }
2764 }
2765
2766 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2767 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2768 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2769
2770 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2771 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2772
2773 /* Signal dump data received - Clear IO debug Ack */
2774 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2775 ioa_cfg->regs.clr_interrupt_reg);
2776
2777 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2778 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2779 temp_pcii_reg =
214777ba 2780 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2781
2782 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2783 return 0;
2784
2785 udelay(10);
2786 delay += 10;
2787 }
2788
2789 return 0;
2790}
2791
2792#ifdef CONFIG_SCSI_IPR_DUMP
2793/**
2794 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2795 * @ioa_cfg: ioa config struct
2796 * @pci_address: adapter address
2797 * @length: length of data to copy
2798 *
2799 * Copy data from PCI adapter to kernel buffer.
2800 * Note: length MUST be a 4 byte multiple
2801 * Return value:
2802 * 0 on success / other on failure
2803 **/
2804static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2805 unsigned long pci_address, u32 length)
2806{
2807 int bytes_copied = 0;
4d4dd706 2808 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2809 __be32 *page;
2810 unsigned long lock_flags = 0;
2811 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2812
4d4dd706
KSS
2813 if (ioa_cfg->sis64)
2814 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2815 else
2816 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2817
1da177e4 2818 while (bytes_copied < length &&
4d4dd706 2819 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2820 if (ioa_dump->page_offset >= PAGE_SIZE ||
2821 ioa_dump->page_offset == 0) {
2822 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2823
2824 if (!page) {
2825 ipr_trace;
2826 return bytes_copied;
2827 }
2828
2829 ioa_dump->page_offset = 0;
2830 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2831 ioa_dump->next_page_index++;
2832 } else
2833 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2834
2835 rem_len = length - bytes_copied;
2836 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2837 cur_len = min(rem_len, rem_page_len);
2838
2839 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2840 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2841 rc = -EIO;
2842 } else {
2843 rc = ipr_get_ldump_data_section(ioa_cfg,
2844 pci_address + bytes_copied,
2845 &page[ioa_dump->page_offset / 4],
2846 (cur_len / sizeof(u32)));
2847 }
2848 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2849
2850 if (!rc) {
2851 ioa_dump->page_offset += cur_len;
2852 bytes_copied += cur_len;
2853 } else {
2854 ipr_trace;
2855 break;
2856 }
2857 schedule();
2858 }
2859
2860 return bytes_copied;
2861}
2862
2863/**
2864 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2865 * @hdr: dump entry header struct
2866 *
2867 * Return value:
2868 * nothing
2869 **/
2870static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2871{
2872 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2873 hdr->num_elems = 1;
2874 hdr->offset = sizeof(*hdr);
2875 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2876}
2877
2878/**
2879 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2880 * @ioa_cfg: ioa config struct
2881 * @driver_dump: driver dump struct
2882 *
2883 * Return value:
2884 * nothing
2885 **/
2886static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2887 struct ipr_driver_dump *driver_dump)
2888{
2889 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2890
2891 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2892 driver_dump->ioa_type_entry.hdr.len =
2893 sizeof(struct ipr_dump_ioa_type_entry) -
2894 sizeof(struct ipr_dump_entry_header);
2895 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2896 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2897 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2898 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2899 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2900 ucode_vpd->minor_release[1];
2901 driver_dump->hdr.num_entries++;
2902}
2903
2904/**
2905 * ipr_dump_version_data - Fill in the driver version in the dump.
2906 * @ioa_cfg: ioa config struct
2907 * @driver_dump: driver dump struct
2908 *
2909 * Return value:
2910 * nothing
2911 **/
2912static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2913 struct ipr_driver_dump *driver_dump)
2914{
2915 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2916 driver_dump->version_entry.hdr.len =
2917 sizeof(struct ipr_dump_version_entry) -
2918 sizeof(struct ipr_dump_entry_header);
2919 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2920 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2921 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2922 driver_dump->hdr.num_entries++;
2923}
2924
2925/**
2926 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2927 * @ioa_cfg: ioa config struct
2928 * @driver_dump: driver dump struct
2929 *
2930 * Return value:
2931 * nothing
2932 **/
2933static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2934 struct ipr_driver_dump *driver_dump)
2935{
2936 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2937 driver_dump->trace_entry.hdr.len =
2938 sizeof(struct ipr_dump_trace_entry) -
2939 sizeof(struct ipr_dump_entry_header);
2940 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2941 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2942 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2943 driver_dump->hdr.num_entries++;
2944}
2945
2946/**
2947 * ipr_dump_location_data - Fill in the IOA location in the dump.
2948 * @ioa_cfg: ioa config struct
2949 * @driver_dump: driver dump struct
2950 *
2951 * Return value:
2952 * nothing
2953 **/
2954static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2955 struct ipr_driver_dump *driver_dump)
2956{
2957 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2958 driver_dump->location_entry.hdr.len =
2959 sizeof(struct ipr_dump_location_entry) -
2960 sizeof(struct ipr_dump_entry_header);
2961 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2962 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2963 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2964 driver_dump->hdr.num_entries++;
2965}
2966
2967/**
2968 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2969 * @ioa_cfg: ioa config struct
2970 * @dump: dump struct
2971 *
2972 * Return value:
2973 * nothing
2974 **/
2975static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2976{
2977 unsigned long start_addr, sdt_word;
2978 unsigned long lock_flags = 0;
2979 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2980 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
2981 u32 num_entries, max_num_entries, start_off, end_off;
2982 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 2983 struct ipr_sdt *sdt;
dcbad00e 2984 int valid = 1;
1da177e4
LT
2985 int i;
2986
2987 ENTER;
2988
2989 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2990
41e9a696 2991 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
2992 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2993 return;
2994 }
2995
110def85
WB
2996 if (ioa_cfg->sis64) {
2997 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2998 ssleep(IPR_DUMP_DELAY_SECONDS);
2999 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3000 }
3001
1da177e4
LT
3002 start_addr = readl(ioa_cfg->ioa_mailbox);
3003
dcbad00e 3004 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3005 dev_err(&ioa_cfg->pdev->dev,
3006 "Invalid dump table format: %lx\n", start_addr);
3007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3008 return;
3009 }
3010
3011 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3012
3013 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3014
3015 /* Initialize the overall dump header */
3016 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3017 driver_dump->hdr.num_entries = 1;
3018 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3019 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3020 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3021 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3022
3023 ipr_dump_version_data(ioa_cfg, driver_dump);
3024 ipr_dump_location_data(ioa_cfg, driver_dump);
3025 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3026 ipr_dump_trace_data(ioa_cfg, driver_dump);
3027
3028 /* Update dump_header */
3029 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3030
3031 /* IOA Dump entry */
3032 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3033 ioa_dump->hdr.len = 0;
3034 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3035 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3036
3037 /* First entries in sdt are actually a list of dump addresses and
3038 lengths to gather the real dump data. sdt represents the pointer
3039 to the ioa generated dump table. Dump data will be extracted based
3040 on entries in this table */
3041 sdt = &ioa_dump->sdt;
3042
4d4dd706
KSS
3043 if (ioa_cfg->sis64) {
3044 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3045 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3046 } else {
3047 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3048 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3049 }
3050
3051 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3052 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3053 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3054 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3055
3056 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3057 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3058 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3059 dev_err(&ioa_cfg->pdev->dev,
3060 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3061 rc, be32_to_cpu(sdt->hdr.state));
3062 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3063 ioa_cfg->sdt_state = DUMP_OBTAINED;
3064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3065 return;
3066 }
3067
3068 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3069
4d4dd706
KSS
3070 if (num_entries > max_num_entries)
3071 num_entries = max_num_entries;
3072
3073 /* Update dump length to the actual data to be copied */
3074 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3075 if (ioa_cfg->sis64)
3076 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3077 else
3078 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3079
3080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3081
3082 for (i = 0; i < num_entries; i++) {
4d4dd706 3083 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3084 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3085 break;
3086 }
3087
3088 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3089 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3090 if (ioa_cfg->sis64)
3091 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3092 else {
3093 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3094 end_off = be32_to_cpu(sdt->entry[i].end_token);
3095
3096 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3097 bytes_to_copy = end_off - start_off;
3098 else
3099 valid = 0;
3100 }
3101 if (valid) {
4d4dd706 3102 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3103 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3104 continue;
3105 }
3106
3107 /* Copy data from adapter to driver buffers */
3108 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3109 bytes_to_copy);
3110
3111 ioa_dump->hdr.len += bytes_copied;
3112
3113 if (bytes_copied != bytes_to_copy) {
3114 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3115 break;
3116 }
3117 }
3118 }
3119 }
3120
3121 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3122
3123 /* Update dump_header */
3124 driver_dump->hdr.len += ioa_dump->hdr.len;
3125 wmb();
3126 ioa_cfg->sdt_state = DUMP_OBTAINED;
3127 LEAVE;
3128}
3129
3130#else
203fa3fe 3131#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3132#endif
3133
3134/**
3135 * ipr_release_dump - Free adapter dump memory
3136 * @kref: kref struct
3137 *
3138 * Return value:
3139 * nothing
3140 **/
3141static void ipr_release_dump(struct kref *kref)
3142{
203fa3fe 3143 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3144 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3145 unsigned long lock_flags = 0;
3146 int i;
3147
3148 ENTER;
3149 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3150 ioa_cfg->dump = NULL;
3151 ioa_cfg->sdt_state = INACTIVE;
3152 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3153
3154 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3155 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3156
4d4dd706 3157 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3158 kfree(dump);
3159 LEAVE;
3160}
3161
3162/**
3163 * ipr_worker_thread - Worker thread
c4028958 3164 * @work: ioa config struct
1da177e4
LT
3165 *
3166 * Called at task level from a work thread. This function takes care
3167 * of adding and removing device from the mid-layer as configuration
3168 * changes are detected by the adapter.
3169 *
3170 * Return value:
3171 * nothing
3172 **/
c4028958 3173static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3174{
3175 unsigned long lock_flags;
3176 struct ipr_resource_entry *res;
3177 struct scsi_device *sdev;
3178 struct ipr_dump *dump;
c4028958
DH
3179 struct ipr_ioa_cfg *ioa_cfg =
3180 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3181 u8 bus, target, lun;
3182 int did_work;
3183
3184 ENTER;
3185 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3186
41e9a696 3187 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3188 dump = ioa_cfg->dump;
3189 if (!dump) {
3190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3191 return;
3192 }
3193 kref_get(&dump->kref);
3194 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3195 ipr_get_ioa_dump(ioa_cfg, dump);
3196 kref_put(&dump->kref, ipr_release_dump);
3197
3198 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3199 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3200 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3202 return;
3203 }
3204
3205restart:
3206 do {
3207 did_work = 0;
3208 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3209 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3210 return;
3211 }
3212
3213 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3214 if (res->del_from_ml && res->sdev) {
3215 did_work = 1;
3216 sdev = res->sdev;
3217 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3218 if (!res->add_to_ml)
3219 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3220 else
3221 res->del_from_ml = 0;
1da177e4
LT
3222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3223 scsi_remove_device(sdev);
3224 scsi_device_put(sdev);
3225 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3226 }
3227 break;
3228 }
3229 }
203fa3fe 3230 } while (did_work);
1da177e4
LT
3231
3232 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3233 if (res->add_to_ml) {
3e7ebdfa
WB
3234 bus = res->bus;
3235 target = res->target;
3236 lun = res->lun;
1121b794 3237 res->add_to_ml = 0;
1da177e4
LT
3238 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3239 scsi_add_device(ioa_cfg->host, bus, target, lun);
3240 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3241 goto restart;
3242 }
3243 }
3244
3245 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3246 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3247 LEAVE;
3248}
3249
3250#ifdef CONFIG_SCSI_IPR_TRACE
3251/**
3252 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3253 * @filp: open sysfs file
1da177e4 3254 * @kobj: kobject struct
91a69029 3255 * @bin_attr: bin_attribute struct
1da177e4
LT
3256 * @buf: buffer
3257 * @off: offset
3258 * @count: buffer size
3259 *
3260 * Return value:
3261 * number of bytes printed to buffer
3262 **/
2c3c8bea 3263static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3264 struct bin_attribute *bin_attr,
3265 char *buf, loff_t off, size_t count)
1da177e4 3266{
ee959b00
TJ
3267 struct device *dev = container_of(kobj, struct device, kobj);
3268 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3269 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3270 unsigned long lock_flags = 0;
d777aaf3 3271 ssize_t ret;
1da177e4
LT
3272
3273 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3274 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3275 IPR_TRACE_SIZE);
1da177e4 3276 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3277
3278 return ret;
1da177e4
LT
3279}
3280
3281static struct bin_attribute ipr_trace_attr = {
3282 .attr = {
3283 .name = "trace",
3284 .mode = S_IRUGO,
3285 },
3286 .size = 0,
3287 .read = ipr_read_trace,
3288};
3289#endif
3290
3291/**
3292 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3293 * @dev: class device struct
3294 * @buf: buffer
1da177e4
LT
3295 *
3296 * Return value:
3297 * number of bytes printed to buffer
3298 **/
ee959b00
TJ
3299static ssize_t ipr_show_fw_version(struct device *dev,
3300 struct device_attribute *attr, char *buf)
1da177e4 3301{
ee959b00 3302 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3303 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3304 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3305 unsigned long lock_flags = 0;
3306 int len;
3307
3308 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3309 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3310 ucode_vpd->major_release, ucode_vpd->card_type,
3311 ucode_vpd->minor_release[0],
3312 ucode_vpd->minor_release[1]);
3313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3314 return len;
3315}
3316
ee959b00 3317static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3318 .attr = {
3319 .name = "fw_version",
3320 .mode = S_IRUGO,
3321 },
3322 .show = ipr_show_fw_version,
3323};
3324
3325/**
3326 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3327 * @dev: class device struct
3328 * @buf: buffer
1da177e4
LT
3329 *
3330 * Return value:
3331 * number of bytes printed to buffer
3332 **/
ee959b00
TJ
3333static ssize_t ipr_show_log_level(struct device *dev,
3334 struct device_attribute *attr, char *buf)
1da177e4 3335{
ee959b00 3336 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3337 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3338 unsigned long lock_flags = 0;
3339 int len;
3340
3341 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3342 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3344 return len;
3345}
3346
3347/**
3348 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3349 * @dev: class device struct
3350 * @buf: buffer
1da177e4
LT
3351 *
3352 * Return value:
3353 * number of bytes printed to buffer
3354 **/
ee959b00 3355static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3356 struct device_attribute *attr,
1da177e4
LT
3357 const char *buf, size_t count)
3358{
ee959b00 3359 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3360 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3361 unsigned long lock_flags = 0;
3362
3363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3364 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3366 return strlen(buf);
3367}
3368
ee959b00 3369static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3370 .attr = {
3371 .name = "log_level",
3372 .mode = S_IRUGO | S_IWUSR,
3373 },
3374 .show = ipr_show_log_level,
3375 .store = ipr_store_log_level
3376};
3377
3378/**
3379 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3380 * @dev: device struct
3381 * @buf: buffer
3382 * @count: buffer size
1da177e4
LT
3383 *
3384 * This function will reset the adapter and wait a reasonable
3385 * amount of time for any errors that the adapter might log.
3386 *
3387 * Return value:
3388 * count on success / other on failure
3389 **/
ee959b00
TJ
3390static ssize_t ipr_store_diagnostics(struct device *dev,
3391 struct device_attribute *attr,
1da177e4
LT
3392 const char *buf, size_t count)
3393{
ee959b00 3394 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3396 unsigned long lock_flags = 0;
3397 int rc = count;
3398
3399 if (!capable(CAP_SYS_ADMIN))
3400 return -EACCES;
3401
1da177e4 3402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3403 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3405 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3406 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3407 }
3408
1da177e4
LT
3409 ioa_cfg->errors_logged = 0;
3410 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3411
3412 if (ioa_cfg->in_reset_reload) {
3413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3414 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3415
3416 /* Wait for a second for any errors to be logged */
3417 msleep(1000);
3418 } else {
3419 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3420 return -EIO;
3421 }
3422
3423 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3424 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3425 rc = -EIO;
3426 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3427
3428 return rc;
3429}
3430
ee959b00 3431static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3432 .attr = {
3433 .name = "run_diagnostics",
3434 .mode = S_IWUSR,
3435 },
3436 .store = ipr_store_diagnostics
3437};
3438
f37eb54b
BK
3439/**
3440 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3441 * @class_dev: device struct
3442 * @buf: buffer
f37eb54b
BK
3443 *
3444 * Return value:
3445 * number of bytes printed to buffer
3446 **/
ee959b00
TJ
3447static ssize_t ipr_show_adapter_state(struct device *dev,
3448 struct device_attribute *attr, char *buf)
f37eb54b 3449{
ee959b00 3450 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3451 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3452 unsigned long lock_flags = 0;
3453 int len;
3454
3455 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3456 if (ioa_cfg->ioa_is_dead)
3457 len = snprintf(buf, PAGE_SIZE, "offline\n");
3458 else
3459 len = snprintf(buf, PAGE_SIZE, "online\n");
3460 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3461 return len;
3462}
3463
3464/**
3465 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3466 * @dev: device struct
3467 * @buf: buffer
3468 * @count: buffer size
f37eb54b
BK
3469 *
3470 * This function will change the adapter's state.
3471 *
3472 * Return value:
3473 * count on success / other on failure
3474 **/
ee959b00
TJ
3475static ssize_t ipr_store_adapter_state(struct device *dev,
3476 struct device_attribute *attr,
f37eb54b
BK
3477 const char *buf, size_t count)
3478{
ee959b00 3479 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3480 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3481 unsigned long lock_flags;
3482 int result = count;
3483
3484 if (!capable(CAP_SYS_ADMIN))
3485 return -EACCES;
3486
3487 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3488 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3489 ioa_cfg->ioa_is_dead = 0;
3490 ioa_cfg->reset_retries = 0;
3491 ioa_cfg->in_ioa_bringdown = 0;
3492 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3493 }
3494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3495 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3496
3497 return result;
3498}
3499
ee959b00 3500static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3501 .attr = {
49dd0961 3502 .name = "online_state",
f37eb54b
BK
3503 .mode = S_IRUGO | S_IWUSR,
3504 },
3505 .show = ipr_show_adapter_state,
3506 .store = ipr_store_adapter_state
3507};
3508
1da177e4
LT
3509/**
3510 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3511 * @dev: device struct
3512 * @buf: buffer
3513 * @count: buffer size
1da177e4
LT
3514 *
3515 * This function will reset the adapter.
3516 *
3517 * Return value:
3518 * count on success / other on failure
3519 **/
ee959b00
TJ
3520static ssize_t ipr_store_reset_adapter(struct device *dev,
3521 struct device_attribute *attr,
1da177e4
LT
3522 const char *buf, size_t count)
3523{
ee959b00 3524 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3525 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3526 unsigned long lock_flags;
3527 int result = count;
3528
3529 if (!capable(CAP_SYS_ADMIN))
3530 return -EACCES;
3531
3532 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3533 if (!ioa_cfg->in_reset_reload)
3534 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3536 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3537
3538 return result;
3539}
3540
ee959b00 3541static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3542 .attr = {
3543 .name = "reset_host",
3544 .mode = S_IWUSR,
3545 },
3546 .store = ipr_store_reset_adapter
3547};
3548
3549/**
3550 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3551 * @buf_len: buffer length
3552 *
3553 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3554 * list to use for microcode download
3555 *
3556 * Return value:
3557 * pointer to sglist / NULL on failure
3558 **/
3559static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3560{
3561 int sg_size, order, bsize_elem, num_elem, i, j;
3562 struct ipr_sglist *sglist;
3563 struct scatterlist *scatterlist;
3564 struct page *page;
3565
3566 /* Get the minimum size per scatter/gather element */
3567 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3568
3569 /* Get the actual size per element */
3570 order = get_order(sg_size);
3571
3572 /* Determine the actual number of bytes per element */
3573 bsize_elem = PAGE_SIZE * (1 << order);
3574
3575 /* Determine the actual number of sg entries needed */
3576 if (buf_len % bsize_elem)
3577 num_elem = (buf_len / bsize_elem) + 1;
3578 else
3579 num_elem = buf_len / bsize_elem;
3580
3581 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3582 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3583 (sizeof(struct scatterlist) * (num_elem - 1)),
3584 GFP_KERNEL);
3585
3586 if (sglist == NULL) {
3587 ipr_trace;
3588 return NULL;
3589 }
3590
1da177e4 3591 scatterlist = sglist->scatterlist;
45711f1a 3592 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3593
3594 sglist->order = order;
3595 sglist->num_sg = num_elem;
3596
3597 /* Allocate a bunch of sg elements */
3598 for (i = 0; i < num_elem; i++) {
3599 page = alloc_pages(GFP_KERNEL, order);
3600 if (!page) {
3601 ipr_trace;
3602
3603 /* Free up what we already allocated */
3604 for (j = i - 1; j >= 0; j--)
45711f1a 3605 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3606 kfree(sglist);
3607 return NULL;
3608 }
3609
642f1490 3610 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3611 }
3612
3613 return sglist;
3614}
3615
3616/**
3617 * ipr_free_ucode_buffer - Frees a microcode download buffer
3618 * @p_dnld: scatter/gather list pointer
3619 *
3620 * Free a DMA'able ucode download buffer previously allocated with
3621 * ipr_alloc_ucode_buffer
3622 *
3623 * Return value:
3624 * nothing
3625 **/
3626static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3627{
3628 int i;
3629
3630 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3631 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3632
3633 kfree(sglist);
3634}
3635
3636/**
3637 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3638 * @sglist: scatter/gather list pointer
3639 * @buffer: buffer pointer
3640 * @len: buffer length
3641 *
3642 * Copy a microcode image from a user buffer into a buffer allocated by
3643 * ipr_alloc_ucode_buffer
3644 *
3645 * Return value:
3646 * 0 on success / other on failure
3647 **/
3648static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3649 u8 *buffer, u32 len)
3650{
3651 int bsize_elem, i, result = 0;
3652 struct scatterlist *scatterlist;
3653 void *kaddr;
3654
3655 /* Determine the actual number of bytes per element */
3656 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3657
3658 scatterlist = sglist->scatterlist;
3659
3660 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3661 struct page *page = sg_page(&scatterlist[i]);
3662
3663 kaddr = kmap(page);
1da177e4 3664 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3665 kunmap(page);
1da177e4
LT
3666
3667 scatterlist[i].length = bsize_elem;
3668
3669 if (result != 0) {
3670 ipr_trace;
3671 return result;
3672 }
3673 }
3674
3675 if (len % bsize_elem) {
45711f1a
JA
3676 struct page *page = sg_page(&scatterlist[i]);
3677
3678 kaddr = kmap(page);
1da177e4 3679 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3680 kunmap(page);
1da177e4
LT
3681
3682 scatterlist[i].length = len % bsize_elem;
3683 }
3684
3685 sglist->buffer_len = len;
3686 return result;
3687}
3688
a32c055f
WB
3689/**
3690 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3691 * @ipr_cmd: ipr command struct
3692 * @sglist: scatter/gather list
3693 *
3694 * Builds a microcode download IOA data list (IOADL).
3695 *
3696 **/
3697static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3698 struct ipr_sglist *sglist)
3699{
3700 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3701 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3702 struct scatterlist *scatterlist = sglist->scatterlist;
3703 int i;
3704
3705 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3706 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3707 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3708
3709 ioarcb->ioadl_len =
3710 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3711 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3712 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3713 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3714 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3715 }
3716
3717 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3718}
3719
1da177e4 3720/**
12baa420 3721 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3722 * @ipr_cmd: ipr command struct
3723 * @sglist: scatter/gather list
1da177e4 3724 *
12baa420 3725 * Builds a microcode download IOA data list (IOADL).
1da177e4 3726 *
1da177e4 3727 **/
12baa420
BK
3728static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3729 struct ipr_sglist *sglist)
1da177e4 3730{
1da177e4 3731 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3732 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3733 struct scatterlist *scatterlist = sglist->scatterlist;
3734 int i;
3735
12baa420 3736 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3737 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3738 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3739
3740 ioarcb->ioadl_len =
1da177e4
LT
3741 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3742
3743 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3744 ioadl[i].flags_and_data_len =
3745 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3746 ioadl[i].address =
3747 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3748 }
3749
12baa420
BK
3750 ioadl[i-1].flags_and_data_len |=
3751 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3752}
3753
3754/**
3755 * ipr_update_ioa_ucode - Update IOA's microcode
3756 * @ioa_cfg: ioa config struct
3757 * @sglist: scatter/gather list
3758 *
3759 * Initiate an adapter reset to update the IOA's microcode
3760 *
3761 * Return value:
3762 * 0 on success / -EIO on failure
3763 **/
3764static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3765 struct ipr_sglist *sglist)
3766{
3767 unsigned long lock_flags;
3768
3769 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3770 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3771 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3772 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3773 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3774 }
12baa420
BK
3775
3776 if (ioa_cfg->ucode_sglist) {
3777 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3778 dev_err(&ioa_cfg->pdev->dev,
3779 "Microcode download already in progress\n");
3780 return -EIO;
1da177e4 3781 }
12baa420
BK
3782
3783 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3784 sglist->num_sg, DMA_TO_DEVICE);
3785
3786 if (!sglist->num_dma_sg) {
3787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3788 dev_err(&ioa_cfg->pdev->dev,
3789 "Failed to map microcode download buffer!\n");
1da177e4
LT
3790 return -EIO;
3791 }
3792
12baa420
BK
3793 ioa_cfg->ucode_sglist = sglist;
3794 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3795 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3796 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3797
3798 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3799 ioa_cfg->ucode_sglist = NULL;
3800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3801 return 0;
3802}
3803
3804/**
3805 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3806 * @class_dev: device struct
3807 * @buf: buffer
3808 * @count: buffer size
1da177e4
LT
3809 *
3810 * This function will update the firmware on the adapter.
3811 *
3812 * Return value:
3813 * count on success / other on failure
3814 **/
ee959b00
TJ
3815static ssize_t ipr_store_update_fw(struct device *dev,
3816 struct device_attribute *attr,
3817 const char *buf, size_t count)
1da177e4 3818{
ee959b00 3819 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3820 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3821 struct ipr_ucode_image_header *image_hdr;
3822 const struct firmware *fw_entry;
3823 struct ipr_sglist *sglist;
1da177e4
LT
3824 char fname[100];
3825 char *src;
3826 int len, result, dnld_size;
3827
3828 if (!capable(CAP_SYS_ADMIN))
3829 return -EACCES;
3830
3831 len = snprintf(fname, 99, "%s", buf);
3832 fname[len-1] = '\0';
3833
203fa3fe 3834 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
3835 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3836 return -EIO;
3837 }
3838
3839 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3840
1da177e4
LT
3841 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3842 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3843 sglist = ipr_alloc_ucode_buffer(dnld_size);
3844
3845 if (!sglist) {
3846 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3847 release_firmware(fw_entry);
3848 return -ENOMEM;
3849 }
3850
3851 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3852
3853 if (result) {
3854 dev_err(&ioa_cfg->pdev->dev,
3855 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3856 goto out;
1da177e4
LT
3857 }
3858
14ed9cc7
WB
3859 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3860
12baa420 3861 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3862
12baa420
BK
3863 if (!result)
3864 result = count;
3865out:
1da177e4
LT
3866 ipr_free_ucode_buffer(sglist);
3867 release_firmware(fw_entry);
12baa420 3868 return result;
1da177e4
LT
3869}
3870
ee959b00 3871static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3872 .attr = {
3873 .name = "update_fw",
3874 .mode = S_IWUSR,
3875 },
3876 .store = ipr_store_update_fw
3877};
3878
75576bb9
WB
3879/**
3880 * ipr_show_fw_type - Show the adapter's firmware type.
3881 * @dev: class device struct
3882 * @buf: buffer
3883 *
3884 * Return value:
3885 * number of bytes printed to buffer
3886 **/
3887static ssize_t ipr_show_fw_type(struct device *dev,
3888 struct device_attribute *attr, char *buf)
3889{
3890 struct Scsi_Host *shost = class_to_shost(dev);
3891 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3892 unsigned long lock_flags = 0;
3893 int len;
3894
3895 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3896 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3898 return len;
3899}
3900
3901static struct device_attribute ipr_ioa_fw_type_attr = {
3902 .attr = {
3903 .name = "fw_type",
3904 .mode = S_IRUGO,
3905 },
3906 .show = ipr_show_fw_type
3907};
3908
ee959b00 3909static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3910 &ipr_fw_version_attr,
3911 &ipr_log_level_attr,
3912 &ipr_diagnostics_attr,
f37eb54b 3913 &ipr_ioa_state_attr,
1da177e4
LT
3914 &ipr_ioa_reset_attr,
3915 &ipr_update_fw_attr,
75576bb9 3916 &ipr_ioa_fw_type_attr,
1da177e4
LT
3917 NULL,
3918};
3919
3920#ifdef CONFIG_SCSI_IPR_DUMP
3921/**
3922 * ipr_read_dump - Dump the adapter
2c3c8bea 3923 * @filp: open sysfs file
1da177e4 3924 * @kobj: kobject struct
91a69029 3925 * @bin_attr: bin_attribute struct
1da177e4
LT
3926 * @buf: buffer
3927 * @off: offset
3928 * @count: buffer size
3929 *
3930 * Return value:
3931 * number of bytes printed to buffer
3932 **/
2c3c8bea 3933static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3934 struct bin_attribute *bin_attr,
3935 char *buf, loff_t off, size_t count)
1da177e4 3936{
ee959b00 3937 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3938 struct Scsi_Host *shost = class_to_shost(cdev);
3939 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3940 struct ipr_dump *dump;
3941 unsigned long lock_flags = 0;
3942 char *src;
4d4dd706 3943 int len, sdt_end;
1da177e4
LT
3944 size_t rc = count;
3945
3946 if (!capable(CAP_SYS_ADMIN))
3947 return -EACCES;
3948
3949 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3950 dump = ioa_cfg->dump;
3951
3952 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3953 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3954 return 0;
3955 }
3956 kref_get(&dump->kref);
3957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3958
3959 if (off > dump->driver_dump.hdr.len) {
3960 kref_put(&dump->kref, ipr_release_dump);
3961 return 0;
3962 }
3963
3964 if (off + count > dump->driver_dump.hdr.len) {
3965 count = dump->driver_dump.hdr.len - off;
3966 rc = count;
3967 }
3968
3969 if (count && off < sizeof(dump->driver_dump)) {
3970 if (off + count > sizeof(dump->driver_dump))
3971 len = sizeof(dump->driver_dump) - off;
3972 else
3973 len = count;
3974 src = (u8 *)&dump->driver_dump + off;
3975 memcpy(buf, src, len);
3976 buf += len;
3977 off += len;
3978 count -= len;
3979 }
3980
3981 off -= sizeof(dump->driver_dump);
3982
4d4dd706
KSS
3983 if (ioa_cfg->sis64)
3984 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3985 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3986 sizeof(struct ipr_sdt_entry));
3987 else
3988 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3989 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3990
3991 if (count && off < sdt_end) {
3992 if (off + count > sdt_end)
3993 len = sdt_end - off;
1da177e4
LT
3994 else
3995 len = count;
3996 src = (u8 *)&dump->ioa_dump + off;
3997 memcpy(buf, src, len);
3998 buf += len;
3999 off += len;
4000 count -= len;
4001 }
4002
4d4dd706 4003 off -= sdt_end;
1da177e4
LT
4004
4005 while (count) {
4006 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4007 len = PAGE_ALIGN(off) - off;
4008 else
4009 len = count;
4010 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4011 src += off & ~PAGE_MASK;
4012 memcpy(buf, src, len);
4013 buf += len;
4014 off += len;
4015 count -= len;
4016 }
4017
4018 kref_put(&dump->kref, ipr_release_dump);
4019 return rc;
4020}
4021
4022/**
4023 * ipr_alloc_dump - Prepare for adapter dump
4024 * @ioa_cfg: ioa config struct
4025 *
4026 * Return value:
4027 * 0 on success / other on failure
4028 **/
4029static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4030{
4031 struct ipr_dump *dump;
4d4dd706 4032 __be32 **ioa_data;
1da177e4
LT
4033 unsigned long lock_flags = 0;
4034
0bc42e35 4035 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4036
4037 if (!dump) {
4038 ipr_err("Dump memory allocation failed\n");
4039 return -ENOMEM;
4040 }
4041
4d4dd706
KSS
4042 if (ioa_cfg->sis64)
4043 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4044 else
4045 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4046
4047 if (!ioa_data) {
4048 ipr_err("Dump memory allocation failed\n");
4049 kfree(dump);
4050 return -ENOMEM;
4051 }
4052
4053 dump->ioa_dump.ioa_data = ioa_data;
4054
1da177e4
LT
4055 kref_init(&dump->kref);
4056 dump->ioa_cfg = ioa_cfg;
4057
4058 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4059
4060 if (INACTIVE != ioa_cfg->sdt_state) {
4061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4062 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4063 kfree(dump);
4064 return 0;
4065 }
4066
4067 ioa_cfg->dump = dump;
4068 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4069 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
4070 ioa_cfg->dump_taken = 1;
4071 schedule_work(&ioa_cfg->work_q);
4072 }
4073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4074
1da177e4
LT
4075 return 0;
4076}
4077
4078/**
4079 * ipr_free_dump - Free adapter dump memory
4080 * @ioa_cfg: ioa config struct
4081 *
4082 * Return value:
4083 * 0 on success / other on failure
4084 **/
4085static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4086{
4087 struct ipr_dump *dump;
4088 unsigned long lock_flags = 0;
4089
4090 ENTER;
4091
4092 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4093 dump = ioa_cfg->dump;
4094 if (!dump) {
4095 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4096 return 0;
4097 }
4098
4099 ioa_cfg->dump = NULL;
4100 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4101
4102 kref_put(&dump->kref, ipr_release_dump);
4103
4104 LEAVE;
4105 return 0;
4106}
4107
4108/**
4109 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4110 * @filp: open sysfs file
1da177e4 4111 * @kobj: kobject struct
91a69029 4112 * @bin_attr: bin_attribute struct
1da177e4
LT
4113 * @buf: buffer
4114 * @off: offset
4115 * @count: buffer size
4116 *
4117 * Return value:
4118 * number of bytes printed to buffer
4119 **/
2c3c8bea 4120static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4121 struct bin_attribute *bin_attr,
4122 char *buf, loff_t off, size_t count)
1da177e4 4123{
ee959b00 4124 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4125 struct Scsi_Host *shost = class_to_shost(cdev);
4126 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4127 int rc;
4128
4129 if (!capable(CAP_SYS_ADMIN))
4130 return -EACCES;
4131
4132 if (buf[0] == '1')
4133 rc = ipr_alloc_dump(ioa_cfg);
4134 else if (buf[0] == '0')
4135 rc = ipr_free_dump(ioa_cfg);
4136 else
4137 return -EINVAL;
4138
4139 if (rc)
4140 return rc;
4141 else
4142 return count;
4143}
4144
4145static struct bin_attribute ipr_dump_attr = {
4146 .attr = {
4147 .name = "dump",
4148 .mode = S_IRUSR | S_IWUSR,
4149 },
4150 .size = 0,
4151 .read = ipr_read_dump,
4152 .write = ipr_write_dump
4153};
4154#else
4155static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4156#endif
4157
4158/**
4159 * ipr_change_queue_depth - Change the device's queue depth
4160 * @sdev: scsi device struct
4161 * @qdepth: depth to set
e881a172 4162 * @reason: calling context
1da177e4
LT
4163 *
4164 * Return value:
4165 * actual depth set
4166 **/
e881a172
MC
4167static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4168 int reason)
1da177e4 4169{
35a39691
BK
4170 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4171 struct ipr_resource_entry *res;
4172 unsigned long lock_flags = 0;
4173
e881a172
MC
4174 if (reason != SCSI_QDEPTH_DEFAULT)
4175 return -EOPNOTSUPP;
4176
35a39691
BK
4177 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4178 res = (struct ipr_resource_entry *)sdev->hostdata;
4179
4180 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4181 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4182 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4183
1da177e4
LT
4184 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4185 return sdev->queue_depth;
4186}
4187
4188/**
4189 * ipr_change_queue_type - Change the device's queue type
4190 * @dsev: scsi device struct
4191 * @tag_type: type of tags to use
4192 *
4193 * Return value:
4194 * actual queue type set
4195 **/
4196static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4197{
4198 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4199 struct ipr_resource_entry *res;
4200 unsigned long lock_flags = 0;
4201
4202 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4203 res = (struct ipr_resource_entry *)sdev->hostdata;
4204
4205 if (res) {
4206 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4207 /*
4208 * We don't bother quiescing the device here since the
4209 * adapter firmware does it for us.
4210 */
4211 scsi_set_tag_type(sdev, tag_type);
4212
4213 if (tag_type)
4214 scsi_activate_tcq(sdev, sdev->queue_depth);
4215 else
4216 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4217 } else
4218 tag_type = 0;
4219 } else
4220 tag_type = 0;
4221
4222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4223 return tag_type;
4224}
4225
4226/**
4227 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4228 * @dev: device struct
46d74563 4229 * @attr: device attribute structure
1da177e4
LT
4230 * @buf: buffer
4231 *
4232 * Return value:
4233 * number of bytes printed to buffer
4234 **/
10523b3b 4235static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4236{
4237 struct scsi_device *sdev = to_scsi_device(dev);
4238 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4239 struct ipr_resource_entry *res;
4240 unsigned long lock_flags = 0;
4241 ssize_t len = -ENXIO;
4242
4243 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4244 res = (struct ipr_resource_entry *)sdev->hostdata;
4245 if (res)
3e7ebdfa 4246 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4247 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4248 return len;
4249}
4250
4251static struct device_attribute ipr_adapter_handle_attr = {
4252 .attr = {
4253 .name = "adapter_handle",
4254 .mode = S_IRUSR,
4255 },
4256 .show = ipr_show_adapter_handle
4257};
4258
3e7ebdfa 4259/**
5adcbeb3
WB
4260 * ipr_show_resource_path - Show the resource path or the resource address for
4261 * this device.
3e7ebdfa 4262 * @dev: device struct
46d74563 4263 * @attr: device attribute structure
3e7ebdfa
WB
4264 * @buf: buffer
4265 *
4266 * Return value:
4267 * number of bytes printed to buffer
4268 **/
4269static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4270{
4271 struct scsi_device *sdev = to_scsi_device(dev);
4272 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4273 struct ipr_resource_entry *res;
4274 unsigned long lock_flags = 0;
4275 ssize_t len = -ENXIO;
4276 char buffer[IPR_MAX_RES_PATH_LENGTH];
4277
4278 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4279 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4280 if (res && ioa_cfg->sis64)
3e7ebdfa 4281 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4282 __ipr_format_res_path(res->res_path, buffer,
4283 sizeof(buffer)));
5adcbeb3
WB
4284 else if (res)
4285 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4286 res->bus, res->target, res->lun);
4287
3e7ebdfa
WB
4288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4289 return len;
4290}
4291
4292static struct device_attribute ipr_resource_path_attr = {
4293 .attr = {
4294 .name = "resource_path",
75576bb9 4295 .mode = S_IRUGO,
3e7ebdfa
WB
4296 },
4297 .show = ipr_show_resource_path
4298};
4299
46d74563
WB
4300/**
4301 * ipr_show_device_id - Show the device_id for this device.
4302 * @dev: device struct
4303 * @attr: device attribute structure
4304 * @buf: buffer
4305 *
4306 * Return value:
4307 * number of bytes printed to buffer
4308 **/
4309static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4310{
4311 struct scsi_device *sdev = to_scsi_device(dev);
4312 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4313 struct ipr_resource_entry *res;
4314 unsigned long lock_flags = 0;
4315 ssize_t len = -ENXIO;
4316
4317 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4318 res = (struct ipr_resource_entry *)sdev->hostdata;
4319 if (res && ioa_cfg->sis64)
4320 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4321 else if (res)
4322 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4323
4324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4325 return len;
4326}
4327
4328static struct device_attribute ipr_device_id_attr = {
4329 .attr = {
4330 .name = "device_id",
4331 .mode = S_IRUGO,
4332 },
4333 .show = ipr_show_device_id
4334};
4335
75576bb9
WB
4336/**
4337 * ipr_show_resource_type - Show the resource type for this device.
4338 * @dev: device struct
46d74563 4339 * @attr: device attribute structure
75576bb9
WB
4340 * @buf: buffer
4341 *
4342 * Return value:
4343 * number of bytes printed to buffer
4344 **/
4345static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4346{
4347 struct scsi_device *sdev = to_scsi_device(dev);
4348 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4349 struct ipr_resource_entry *res;
4350 unsigned long lock_flags = 0;
4351 ssize_t len = -ENXIO;
4352
4353 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4354 res = (struct ipr_resource_entry *)sdev->hostdata;
4355
4356 if (res)
4357 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4358
4359 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4360 return len;
4361}
4362
4363static struct device_attribute ipr_resource_type_attr = {
4364 .attr = {
4365 .name = "resource_type",
4366 .mode = S_IRUGO,
4367 },
4368 .show = ipr_show_resource_type
4369};
4370
1da177e4
LT
4371static struct device_attribute *ipr_dev_attrs[] = {
4372 &ipr_adapter_handle_attr,
3e7ebdfa 4373 &ipr_resource_path_attr,
46d74563 4374 &ipr_device_id_attr,
75576bb9 4375 &ipr_resource_type_attr,
1da177e4
LT
4376 NULL,
4377};
4378
4379/**
4380 * ipr_biosparam - Return the HSC mapping
4381 * @sdev: scsi device struct
4382 * @block_device: block device pointer
4383 * @capacity: capacity of the device
4384 * @parm: Array containing returned HSC values.
4385 *
4386 * This function generates the HSC parms that fdisk uses.
4387 * We want to make sure we return something that places partitions
4388 * on 4k boundaries for best performance with the IOA.
4389 *
4390 * Return value:
4391 * 0 on success
4392 **/
4393static int ipr_biosparam(struct scsi_device *sdev,
4394 struct block_device *block_device,
4395 sector_t capacity, int *parm)
4396{
4397 int heads, sectors;
4398 sector_t cylinders;
4399
4400 heads = 128;
4401 sectors = 32;
4402
4403 cylinders = capacity;
4404 sector_div(cylinders, (128 * 32));
4405
4406 /* return result */
4407 parm[0] = heads;
4408 parm[1] = sectors;
4409 parm[2] = cylinders;
4410
4411 return 0;
4412}
4413
35a39691
BK
4414/**
4415 * ipr_find_starget - Find target based on bus/target.
4416 * @starget: scsi target struct
4417 *
4418 * Return value:
4419 * resource entry pointer if found / NULL if not found
4420 **/
4421static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4422{
4423 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4424 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4425 struct ipr_resource_entry *res;
4426
4427 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4428 if ((res->bus == starget->channel) &&
0ee1d714 4429 (res->target == starget->id)) {
35a39691
BK
4430 return res;
4431 }
4432 }
4433
4434 return NULL;
4435}
4436
4437static struct ata_port_info sata_port_info;
4438
4439/**
4440 * ipr_target_alloc - Prepare for commands to a SCSI target
4441 * @starget: scsi target struct
4442 *
4443 * If the device is a SATA device, this function allocates an
4444 * ATA port with libata, else it does nothing.
4445 *
4446 * Return value:
4447 * 0 on success / non-0 on failure
4448 **/
4449static int ipr_target_alloc(struct scsi_target *starget)
4450{
4451 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4452 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4453 struct ipr_sata_port *sata_port;
4454 struct ata_port *ap;
4455 struct ipr_resource_entry *res;
4456 unsigned long lock_flags;
4457
4458 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4459 res = ipr_find_starget(starget);
4460 starget->hostdata = NULL;
4461
4462 if (res && ipr_is_gata(res)) {
4463 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4464 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4465 if (!sata_port)
4466 return -ENOMEM;
4467
4468 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4469 if (ap) {
4470 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4471 sata_port->ioa_cfg = ioa_cfg;
4472 sata_port->ap = ap;
4473 sata_port->res = res;
4474
4475 res->sata_port = sata_port;
4476 ap->private_data = sata_port;
4477 starget->hostdata = sata_port;
4478 } else {
4479 kfree(sata_port);
4480 return -ENOMEM;
4481 }
4482 }
4483 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4484
4485 return 0;
4486}
4487
4488/**
4489 * ipr_target_destroy - Destroy a SCSI target
4490 * @starget: scsi target struct
4491 *
4492 * If the device was a SATA device, this function frees the libata
4493 * ATA port, else it does nothing.
4494 *
4495 **/
4496static void ipr_target_destroy(struct scsi_target *starget)
4497{
4498 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4499 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4500 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4501
4502 if (ioa_cfg->sis64) {
0ee1d714
BK
4503 if (!ipr_find_starget(starget)) {
4504 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4505 clear_bit(starget->id, ioa_cfg->array_ids);
4506 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4507 clear_bit(starget->id, ioa_cfg->vset_ids);
4508 else if (starget->channel == 0)
4509 clear_bit(starget->id, ioa_cfg->target_ids);
4510 }
3e7ebdfa 4511 }
35a39691
BK
4512
4513 if (sata_port) {
4514 starget->hostdata = NULL;
4515 ata_sas_port_destroy(sata_port->ap);
4516 kfree(sata_port);
4517 }
4518}
4519
4520/**
4521 * ipr_find_sdev - Find device based on bus/target/lun.
4522 * @sdev: scsi device struct
4523 *
4524 * Return value:
4525 * resource entry pointer if found / NULL if not found
4526 **/
4527static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4528{
4529 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4530 struct ipr_resource_entry *res;
4531
4532 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4533 if ((res->bus == sdev->channel) &&
4534 (res->target == sdev->id) &&
4535 (res->lun == sdev->lun))
35a39691
BK
4536 return res;
4537 }
4538
4539 return NULL;
4540}
4541
1da177e4
LT
4542/**
4543 * ipr_slave_destroy - Unconfigure a SCSI device
4544 * @sdev: scsi device struct
4545 *
4546 * Return value:
4547 * nothing
4548 **/
4549static void ipr_slave_destroy(struct scsi_device *sdev)
4550{
4551 struct ipr_resource_entry *res;
4552 struct ipr_ioa_cfg *ioa_cfg;
4553 unsigned long lock_flags = 0;
4554
4555 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4556
4557 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4558 res = (struct ipr_resource_entry *) sdev->hostdata;
4559 if (res) {
35a39691 4560 if (res->sata_port)
3e4ec344 4561 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4562 sdev->hostdata = NULL;
4563 res->sdev = NULL;
35a39691 4564 res->sata_port = NULL;
1da177e4
LT
4565 }
4566 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4567}
4568
4569/**
4570 * ipr_slave_configure - Configure a SCSI device
4571 * @sdev: scsi device struct
4572 *
4573 * This function configures the specified scsi device.
4574 *
4575 * Return value:
4576 * 0 on success
4577 **/
4578static int ipr_slave_configure(struct scsi_device *sdev)
4579{
4580 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4581 struct ipr_resource_entry *res;
dd406ef8 4582 struct ata_port *ap = NULL;
1da177e4 4583 unsigned long lock_flags = 0;
3e7ebdfa 4584 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4585
4586 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4587 res = sdev->hostdata;
4588 if (res) {
4589 if (ipr_is_af_dasd_device(res))
4590 sdev->type = TYPE_RAID;
0726ce26 4591 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4592 sdev->scsi_level = 4;
0726ce26
BK
4593 sdev->no_uld_attach = 1;
4594 }
1da177e4 4595 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4596 blk_queue_rq_timeout(sdev->request_queue,
4597 IPR_VSET_RW_TIMEOUT);
086fa5ff 4598 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4599 }
dd406ef8
BK
4600 if (ipr_is_gata(res) && res->sata_port)
4601 ap = res->sata_port->ap;
4602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4603
4604 if (ap) {
35a39691 4605 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4606 ata_sas_slave_configure(sdev, ap);
4607 } else
35a39691 4608 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4609 if (ioa_cfg->sis64)
4610 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4611 ipr_format_res_path(ioa_cfg,
4612 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4613 return 0;
1da177e4
LT
4614 }
4615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4616 return 0;
4617}
4618
35a39691
BK
4619/**
4620 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4621 * @sdev: scsi device struct
4622 *
4623 * This function initializes an ATA port so that future commands
4624 * sent through queuecommand will work.
4625 *
4626 * Return value:
4627 * 0 on success
4628 **/
4629static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4630{
4631 struct ipr_sata_port *sata_port = NULL;
4632 int rc = -ENXIO;
4633
4634 ENTER;
4635 if (sdev->sdev_target)
4636 sata_port = sdev->sdev_target->hostdata;
b2024459 4637 if (sata_port) {
35a39691 4638 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4639 if (rc == 0)
4640 rc = ata_sas_sync_probe(sata_port->ap);
4641 }
4642
35a39691
BK
4643 if (rc)
4644 ipr_slave_destroy(sdev);
4645
4646 LEAVE;
4647 return rc;
4648}
4649
1da177e4
LT
4650/**
4651 * ipr_slave_alloc - Prepare for commands to a device.
4652 * @sdev: scsi device struct
4653 *
4654 * This function saves a pointer to the resource entry
4655 * in the scsi device struct if the device exists. We
4656 * can then use this pointer in ipr_queuecommand when
4657 * handling new commands.
4658 *
4659 * Return value:
692aebfc 4660 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4661 **/
4662static int ipr_slave_alloc(struct scsi_device *sdev)
4663{
4664 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4665 struct ipr_resource_entry *res;
4666 unsigned long lock_flags;
692aebfc 4667 int rc = -ENXIO;
1da177e4
LT
4668
4669 sdev->hostdata = NULL;
4670
4671 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4672
35a39691
BK
4673 res = ipr_find_sdev(sdev);
4674 if (res) {
4675 res->sdev = sdev;
4676 res->add_to_ml = 0;
4677 res->in_erp = 0;
4678 sdev->hostdata = res;
4679 if (!ipr_is_naca_model(res))
4680 res->needs_sync_complete = 1;
4681 rc = 0;
4682 if (ipr_is_gata(res)) {
4683 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4684 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4685 }
4686 }
4687
4688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4689
692aebfc 4690 return rc;
1da177e4
LT
4691}
4692
4693/**
4694 * ipr_eh_host_reset - Reset the host adapter
4695 * @scsi_cmd: scsi command struct
4696 *
4697 * Return value:
4698 * SUCCESS / FAILED
4699 **/
203fa3fe 4700static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4701{
4702 struct ipr_ioa_cfg *ioa_cfg;
4703 int rc;
4704
4705 ENTER;
4706 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4707
a92fa25c
KSS
4708 if (!ioa_cfg->in_reset_reload) {
4709 dev_err(&ioa_cfg->pdev->dev,
4710 "Adapter being reset as a result of error recovery.\n");
1da177e4 4711
a92fa25c
KSS
4712 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4713 ioa_cfg->sdt_state = GET_DUMP;
4714 }
1da177e4
LT
4715
4716 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4717
4718 LEAVE;
4719 return rc;
4720}
4721
203fa3fe 4722static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
df0ae249
JG
4723{
4724 int rc;
4725
4726 spin_lock_irq(cmd->device->host->host_lock);
4727 rc = __ipr_eh_host_reset(cmd);
4728 spin_unlock_irq(cmd->device->host->host_lock);
4729
4730 return rc;
4731}
4732
c6513096
BK
4733/**
4734 * ipr_device_reset - Reset the device
4735 * @ioa_cfg: ioa config struct
4736 * @res: resource entry struct
4737 *
4738 * This function issues a device reset to the affected device.
4739 * If the device is a SCSI device, a LUN reset will be sent
4740 * to the device first. If that does not work, a target reset
35a39691
BK
4741 * will be sent. If the device is a SATA device, a PHY reset will
4742 * be sent.
c6513096
BK
4743 *
4744 * Return value:
4745 * 0 on success / non-zero on failure
4746 **/
4747static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4748 struct ipr_resource_entry *res)
4749{
4750 struct ipr_cmnd *ipr_cmd;
4751 struct ipr_ioarcb *ioarcb;
4752 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4753 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4754 u32 ioasc;
4755
4756 ENTER;
4757 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4758 ioarcb = &ipr_cmd->ioarcb;
4759 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4760
4761 if (ipr_cmd->ioa_cfg->sis64) {
4762 regs = &ipr_cmd->i.ata_ioadl.regs;
4763 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4764 } else
4765 regs = &ioarcb->u.add_data.u.regs;
c6513096 4766
3e7ebdfa 4767 ioarcb->res_handle = res->res_handle;
c6513096
BK
4768 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4769 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4770 if (ipr_is_gata(res)) {
4771 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4772 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4773 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4774 }
c6513096
BK
4775
4776 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4777 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 4778 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
4779 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4780 if (ipr_cmd->ioa_cfg->sis64)
4781 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4782 sizeof(struct ipr_ioasa_gata));
4783 else
4784 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4785 sizeof(struct ipr_ioasa_gata));
4786 }
c6513096
BK
4787
4788 LEAVE;
203fa3fe 4789 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
4790}
4791
35a39691
BK
4792/**
4793 * ipr_sata_reset - Reset the SATA port
cc0680a5 4794 * @link: SATA link to reset
35a39691
BK
4795 * @classes: class of the attached device
4796 *
cc0680a5 4797 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4798 *
4799 * Return value:
4800 * 0 on success / non-zero on failure
4801 **/
cc0680a5 4802static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4803 unsigned long deadline)
35a39691 4804{
cc0680a5 4805 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4806 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4807 struct ipr_resource_entry *res;
4808 unsigned long lock_flags = 0;
4809 int rc = -ENXIO;
4810
4811 ENTER;
4812 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 4813 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
4814 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4815 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4816 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4817 }
4818
35a39691
BK
4819 res = sata_port->res;
4820 if (res) {
4821 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4822 *classes = res->ata_class;
35a39691
BK
4823 }
4824
4825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4826 LEAVE;
4827 return rc;
4828}
4829
1da177e4
LT
4830/**
4831 * ipr_eh_dev_reset - Reset the device
4832 * @scsi_cmd: scsi command struct
4833 *
4834 * This function issues a device reset to the affected device.
4835 * A LUN reset will be sent to the device first. If that does
4836 * not work, a target reset will be sent.
4837 *
4838 * Return value:
4839 * SUCCESS / FAILED
4840 **/
203fa3fe 4841static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4842{
4843 struct ipr_cmnd *ipr_cmd;
4844 struct ipr_ioa_cfg *ioa_cfg;
4845 struct ipr_resource_entry *res;
35a39691
BK
4846 struct ata_port *ap;
4847 int rc = 0;
05a6538a 4848 struct ipr_hrr_queue *hrrq;
1da177e4
LT
4849
4850 ENTER;
4851 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4852 res = scsi_cmd->device->hostdata;
4853
eeb88307 4854 if (!res)
1da177e4
LT
4855 return FAILED;
4856
4857 /*
4858 * If we are currently going through reset/reload, return failed. This will force the
4859 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4860 * reset to complete
4861 */
4862 if (ioa_cfg->in_reset_reload)
4863 return FAILED;
4864 if (ioa_cfg->ioa_is_dead)
4865 return FAILED;
4866
05a6538a 4867 for_each_hrrq(hrrq, ioa_cfg) {
4868 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4869 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4870 if (ipr_cmd->scsi_cmd)
4871 ipr_cmd->done = ipr_scsi_eh_done;
4872 if (ipr_cmd->qc)
4873 ipr_cmd->done = ipr_sata_eh_done;
4874 if (ipr_cmd->qc &&
4875 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4876 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4877 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4878 }
7402ecef 4879 }
1da177e4
LT
4880 }
4881 }
1da177e4 4882 res->resetting_device = 1;
fb3ed3cb 4883 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4884
4885 if (ipr_is_gata(res) && res->sata_port) {
4886 ap = res->sata_port->ap;
4887 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4888 ata_std_error_handler(ap);
35a39691 4889 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26 4890
05a6538a 4891 for_each_hrrq(hrrq, ioa_cfg) {
4892 list_for_each_entry(ipr_cmd,
4893 &hrrq->hrrq_pending_q, queue) {
4894 if (ipr_cmd->ioarcb.res_handle ==
4895 res->res_handle) {
4896 rc = -EIO;
4897 break;
4898 }
5af23d26
BK
4899 }
4900 }
35a39691
BK
4901 } else
4902 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4903 res->resetting_device = 0;
4904
1da177e4 4905 LEAVE;
203fa3fe 4906 return rc ? FAILED : SUCCESS;
1da177e4
LT
4907}
4908
203fa3fe 4909static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
4910{
4911 int rc;
4912
4913 spin_lock_irq(cmd->device->host->host_lock);
4914 rc = __ipr_eh_dev_reset(cmd);
4915 spin_unlock_irq(cmd->device->host->host_lock);
4916
4917 return rc;
4918}
4919
1da177e4
LT
4920/**
4921 * ipr_bus_reset_done - Op done function for bus reset.
4922 * @ipr_cmd: ipr command struct
4923 *
4924 * This function is the op done function for a bus reset
4925 *
4926 * Return value:
4927 * none
4928 **/
4929static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4930{
4931 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4932 struct ipr_resource_entry *res;
4933
4934 ENTER;
3e7ebdfa
WB
4935 if (!ioa_cfg->sis64)
4936 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4937 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4938 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4939 break;
4940 }
1da177e4 4941 }
1da177e4
LT
4942
4943 /*
4944 * If abort has not completed, indicate the reset has, else call the
4945 * abort's done function to wake the sleeping eh thread
4946 */
4947 if (ipr_cmd->sibling->sibling)
4948 ipr_cmd->sibling->sibling = NULL;
4949 else
4950 ipr_cmd->sibling->done(ipr_cmd->sibling);
4951
05a6538a 4952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
4953 LEAVE;
4954}
4955
4956/**
4957 * ipr_abort_timeout - An abort task has timed out
4958 * @ipr_cmd: ipr command struct
4959 *
4960 * This function handles when an abort task times out. If this
4961 * happens we issue a bus reset since we have resources tied
4962 * up that must be freed before returning to the midlayer.
4963 *
4964 * Return value:
4965 * none
4966 **/
4967static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4968{
4969 struct ipr_cmnd *reset_cmd;
4970 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4971 struct ipr_cmd_pkt *cmd_pkt;
4972 unsigned long lock_flags = 0;
4973
4974 ENTER;
4975 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4976 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4977 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4978 return;
4979 }
4980
fb3ed3cb 4981 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4982 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4983 ipr_cmd->sibling = reset_cmd;
4984 reset_cmd->sibling = ipr_cmd;
4985 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4986 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4987 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4988 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4989 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4990
4991 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4992 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4993 LEAVE;
4994}
4995
4996/**
4997 * ipr_cancel_op - Cancel specified op
4998 * @scsi_cmd: scsi command struct
4999 *
5000 * This function cancels specified op.
5001 *
5002 * Return value:
5003 * SUCCESS / FAILED
5004 **/
203fa3fe 5005static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5006{
5007 struct ipr_cmnd *ipr_cmd;
5008 struct ipr_ioa_cfg *ioa_cfg;
5009 struct ipr_resource_entry *res;
5010 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5011 u32 ioasc, int_reg;
1da177e4 5012 int op_found = 0;
05a6538a 5013 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5014
5015 ENTER;
5016 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5017 res = scsi_cmd->device->hostdata;
5018
8fa728a2
JG
5019 /* If we are currently going through reset/reload, return failed.
5020 * This will force the mid-layer to call ipr_eh_host_reset,
5021 * which will then go to sleep and wait for the reset to complete
5022 */
5023 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
5024 return FAILED;
a92fa25c
KSS
5025 if (!res)
5026 return FAILED;
5027
5028 /*
5029 * If we are aborting a timed out op, chances are that the timeout was caused
5030 * by a still not detected EEH error. In such cases, reading a register will
5031 * trigger the EEH recovery infrastructure.
5032 */
5033 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5034
5035 if (!ipr_is_gscsi(res))
1da177e4
LT
5036 return FAILED;
5037
05a6538a 5038 for_each_hrrq(hrrq, ioa_cfg) {
5039 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5040 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5041 ipr_cmd->done = ipr_scsi_eh_done;
5042 op_found = 1;
5043 break;
5044 }
1da177e4
LT
5045 }
5046 }
5047
5048 if (!op_found)
5049 return SUCCESS;
5050
5051 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5052 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5053 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5054 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5055 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5056 ipr_cmd->u.sdev = scsi_cmd->device;
5057
fb3ed3cb
BK
5058 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5059 scsi_cmd->cmnd[0]);
1da177e4 5060 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5061 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5062
5063 /*
5064 * If the abort task timed out and we sent a bus reset, we will get
5065 * one the following responses to the abort
5066 */
5067 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5068 ioasc = 0;
5069 ipr_trace;
5070 }
5071
05a6538a 5072 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
ee0a90fa
BK
5073 if (!ipr_is_naca_model(res))
5074 res->needs_sync_complete = 1;
1da177e4
LT
5075
5076 LEAVE;
203fa3fe 5077 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5078}
5079
5080/**
5081 * ipr_eh_abort - Abort a single op
5082 * @scsi_cmd: scsi command struct
5083 *
5084 * Return value:
5085 * SUCCESS / FAILED
5086 **/
203fa3fe 5087static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5088{
8fa728a2
JG
5089 unsigned long flags;
5090 int rc;
1da177e4
LT
5091
5092 ENTER;
1da177e4 5093
8fa728a2
JG
5094 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5095 rc = ipr_cancel_op(scsi_cmd);
5096 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
5097
5098 LEAVE;
8fa728a2 5099 return rc;
1da177e4
LT
5100}
5101
5102/**
5103 * ipr_handle_other_interrupt - Handle "other" interrupts
5104 * @ioa_cfg: ioa config struct
634651fa 5105 * @int_reg: interrupt register
1da177e4
LT
5106 *
5107 * Return value:
5108 * IRQ_NONE / IRQ_HANDLED
5109 **/
634651fa 5110static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5111 u32 int_reg)
1da177e4
LT
5112{
5113 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5114 u32 int_mask_reg;
7dacb64f
WB
5115 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5116 int_reg &= ~int_mask_reg;
5117
5118 /* If an interrupt on the adapter did not occur, ignore it.
5119 * Or in the case of SIS 64, check for a stage change interrupt.
5120 */
5121 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5122 if (ioa_cfg->sis64) {
5123 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5124 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5125 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5126
5127 /* clear stage change */
5128 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5129 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5130 list_del(&ioa_cfg->reset_cmd->queue);
5131 del_timer(&ioa_cfg->reset_cmd->timer);
5132 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5133 return IRQ_HANDLED;
5134 }
5135 }
5136
5137 return IRQ_NONE;
5138 }
1da177e4
LT
5139
5140 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5141 /* Mask the interrupt */
5142 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5143
5144 /* Clear the interrupt */
5145 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5146 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5147
5148 list_del(&ioa_cfg->reset_cmd->queue);
5149 del_timer(&ioa_cfg->reset_cmd->timer);
5150 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5151 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5152 if (ioa_cfg->clear_isr) {
5153 if (ipr_debug && printk_ratelimit())
5154 dev_err(&ioa_cfg->pdev->dev,
5155 "Spurious interrupt detected. 0x%08X\n", int_reg);
5156 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5157 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5158 return IRQ_NONE;
5159 }
1da177e4
LT
5160 } else {
5161 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5162 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5163 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5164 dev_err(&ioa_cfg->pdev->dev,
5165 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5166 else
5167 dev_err(&ioa_cfg->pdev->dev,
5168 "Permanent IOA failure. 0x%08X\n", int_reg);
5169
5170 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5171 ioa_cfg->sdt_state = GET_DUMP;
5172
5173 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5174 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5175 }
1da177e4
LT
5176 return rc;
5177}
5178
3feeb89d
WB
5179/**
5180 * ipr_isr_eh - Interrupt service routine error handler
5181 * @ioa_cfg: ioa config struct
5182 * @msg: message to log
5183 *
5184 * Return value:
5185 * none
5186 **/
05a6538a 5187static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5188{
5189 ioa_cfg->errors_logged++;
05a6538a 5190 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5191
5192 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5193 ioa_cfg->sdt_state = GET_DUMP;
5194
5195 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5196}
5197
05a6538a 5198static int __ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue,
5199 struct list_head *doneq)
5200{
5201 u32 ioasc;
5202 u16 cmd_index;
5203 struct ipr_cmnd *ipr_cmd;
5204 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5205 int num_hrrq = 0;
5206
5207 /* If interrupts are disabled, ignore the interrupt */
5208 if (!ioa_cfg->allow_interrupts)
5209 return 0;
5210
5211 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5212 hrr_queue->toggle_bit) {
5213
5214 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5215 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5216 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5217
5218 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5219 cmd_index < hrr_queue->min_cmd_id)) {
5220 ipr_isr_eh(ioa_cfg,
5221 "Invalid response handle from IOA: ",
5222 cmd_index);
5223 break;
5224 }
5225
5226 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5227 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5228
5229 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5230
5231 list_move_tail(&ipr_cmd->queue, doneq);
5232
5233 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5234 hrr_queue->hrrq_curr++;
5235 } else {
5236 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5237 hrr_queue->toggle_bit ^= 1u;
5238 }
5239 num_hrrq++;
5240 }
5241 return num_hrrq;
5242}
1da177e4
LT
5243/**
5244 * ipr_isr - Interrupt service routine
5245 * @irq: irq number
5246 * @devp: pointer to ioa config struct
1da177e4
LT
5247 *
5248 * Return value:
5249 * IRQ_NONE / IRQ_HANDLED
5250 **/
7d12e780 5251static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5252{
05a6538a 5253 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5254 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
1da177e4 5255 unsigned long lock_flags = 0;
7dacb64f 5256 u32 int_reg = 0;
1da177e4
LT
5257 u32 ioasc;
5258 u16 cmd_index;
3feeb89d 5259 int num_hrrq = 0;
7dacb64f 5260 int irq_none = 0;
172cd6e1 5261 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5262 irqreturn_t rc = IRQ_NONE;
172cd6e1 5263 LIST_HEAD(doneq);
1da177e4
LT
5264
5265 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
5266 /* If interrupts are disabled, ignore the interrupt */
5267 if (!ioa_cfg->allow_interrupts) {
5268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5269 return IRQ_NONE;
5270 }
5271
1da177e4
LT
5272 while (1) {
5273 ipr_cmd = NULL;
5274
05a6538a 5275 while ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5276 hrrq->toggle_bit) {
1da177e4 5277
05a6538a 5278 cmd_index = (be32_to_cpu(*hrrq->hrrq_curr) &
1da177e4
LT
5279 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5280
05a6538a 5281 if (unlikely(cmd_index > hrrq->max_cmd_id ||
5282 cmd_index < hrrq->min_cmd_id)) {
5283 ipr_isr_eh(ioa_cfg,
5284 "Invalid response handle from IOA: ",
5285 cmd_index);
172cd6e1
BK
5286 rc = IRQ_HANDLED;
5287 goto unlock_out;
1da177e4
LT
5288 }
5289
5290 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
96d21f00 5291 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5292
5293 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5294
172cd6e1 5295 list_move_tail(&ipr_cmd->queue, &doneq);
1da177e4
LT
5296
5297 rc = IRQ_HANDLED;
5298
05a6538a 5299 if (hrrq->hrrq_curr < hrrq->hrrq_end) {
5300 hrrq->hrrq_curr++;
1da177e4 5301 } else {
05a6538a 5302 hrrq->hrrq_curr = hrrq->hrrq_start;
5303 hrrq->toggle_bit ^= 1u;
1da177e4
LT
5304 }
5305 }
5306
7dd21308
BK
5307 if (ipr_cmd && !ioa_cfg->clear_isr)
5308 break;
5309
1da177e4
LT
5310 if (ipr_cmd != NULL) {
5311 /* Clear the PCI interrupt */
a5442ba4 5312 num_hrrq = 0;
3feeb89d 5313 do {
214777ba 5314 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5315 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d
WB
5316 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5317 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5318
7dacb64f
WB
5319 } else if (rc == IRQ_NONE && irq_none == 0) {
5320 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5321 irq_none++;
a5442ba4
WB
5322 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5323 int_reg & IPR_PCII_HRRQ_UPDATED) {
05a6538a 5324 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ: ", num_hrrq);
172cd6e1
BK
5325 rc = IRQ_HANDLED;
5326 goto unlock_out;
1da177e4
LT
5327 } else
5328 break;
5329 }
5330
5331 if (unlikely(rc == IRQ_NONE))
634651fa 5332 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5333
172cd6e1 5334unlock_out:
1da177e4 5335 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
172cd6e1
BK
5336 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5337 list_del(&ipr_cmd->queue);
5338 del_timer(&ipr_cmd->timer);
5339 ipr_cmd->fast_done(ipr_cmd);
5340 }
05a6538a 5341 return rc;
5342}
5343
5344/**
5345 * ipr_isr_mhrrq - Interrupt service routine
5346 * @irq: irq number
5347 * @devp: pointer to ioa config struct
5348 *
5349 * Return value:
5350 * IRQ_NONE / IRQ_HANDLED
5351 **/
5352static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5353{
5354 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5355 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5356 unsigned long lock_flags = 0;
5357 struct ipr_cmnd *ipr_cmd, *temp;
5358 irqreturn_t rc = IRQ_NONE;
5359 LIST_HEAD(doneq);
172cd6e1 5360
05a6538a 5361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5362
5363 /* If interrupts are disabled, ignore the interrupt */
5364 if (!ioa_cfg->allow_interrupts) {
5365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5366 return IRQ_NONE;
5367 }
5368
5369 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5370 hrrq->toggle_bit)
5371
5372 if (__ipr_process_hrrq(hrrq, &doneq))
5373 rc = IRQ_HANDLED;
5374
5375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5376
5377 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5378 list_del(&ipr_cmd->queue);
5379 del_timer(&ipr_cmd->timer);
5380 ipr_cmd->fast_done(ipr_cmd);
5381 }
1da177e4
LT
5382 return rc;
5383}
5384
a32c055f
WB
5385/**
5386 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5387 * @ioa_cfg: ioa config struct
5388 * @ipr_cmd: ipr command struct
5389 *
5390 * Return value:
5391 * 0 on success / -1 on failure
5392 **/
5393static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5394 struct ipr_cmnd *ipr_cmd)
5395{
5396 int i, nseg;
5397 struct scatterlist *sg;
5398 u32 length;
5399 u32 ioadl_flags = 0;
5400 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5401 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5402 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5403
5404 length = scsi_bufflen(scsi_cmd);
5405 if (!length)
5406 return 0;
5407
5408 nseg = scsi_dma_map(scsi_cmd);
5409 if (nseg < 0) {
51f52a47
AB
5410 if (printk_ratelimit())
5411 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
a32c055f
WB
5412 return -1;
5413 }
5414
5415 ipr_cmd->dma_use_sg = nseg;
5416
438b0331 5417 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5418 ioarcb->ioadl_len =
5419 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5420
a32c055f
WB
5421 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5422 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5423 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5424 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5425 ioadl_flags = IPR_IOADL_FLAGS_READ;
5426
5427 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5428 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5429 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5430 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5431 }
5432
5433 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5434 return 0;
5435}
5436
1da177e4
LT
5437/**
5438 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5439 * @ioa_cfg: ioa config struct
5440 * @ipr_cmd: ipr command struct
5441 *
5442 * Return value:
5443 * 0 on success / -1 on failure
5444 **/
5445static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5446 struct ipr_cmnd *ipr_cmd)
5447{
63015bc9
FT
5448 int i, nseg;
5449 struct scatterlist *sg;
1da177e4
LT
5450 u32 length;
5451 u32 ioadl_flags = 0;
5452 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5453 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5454 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5455
63015bc9
FT
5456 length = scsi_bufflen(scsi_cmd);
5457 if (!length)
1da177e4
LT
5458 return 0;
5459
63015bc9
FT
5460 nseg = scsi_dma_map(scsi_cmd);
5461 if (nseg < 0) {
5462 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5463 return -1;
5464 }
51b1c7e1 5465
63015bc9
FT
5466 ipr_cmd->dma_use_sg = nseg;
5467
5468 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5469 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5470 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5471 ioarcb->data_transfer_length = cpu_to_be32(length);
5472 ioarcb->ioadl_len =
63015bc9
FT
5473 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5474 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5475 ioadl_flags = IPR_IOADL_FLAGS_READ;
5476 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5477 ioarcb->read_ioadl_len =
5478 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5479 }
1da177e4 5480
a32c055f
WB
5481 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5482 ioadl = ioarcb->u.add_data.u.ioadl;
5483 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5484 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5485 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5486 }
1da177e4 5487
63015bc9
FT
5488 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5489 ioadl[i].flags_and_data_len =
5490 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5491 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5492 }
5493
63015bc9
FT
5494 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5495 return 0;
1da177e4
LT
5496}
5497
5498/**
5499 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5500 * @scsi_cmd: scsi command struct
5501 *
5502 * Return value:
5503 * task attributes
5504 **/
5505static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5506{
5507 u8 tag[2];
5508 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5509
5510 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5511 switch (tag[0]) {
5512 case MSG_SIMPLE_TAG:
5513 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5514 break;
5515 case MSG_HEAD_TAG:
5516 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5517 break;
5518 case MSG_ORDERED_TAG:
5519 rc = IPR_FLAGS_LO_ORDERED_TASK;
5520 break;
5521 };
5522 }
5523
5524 return rc;
5525}
5526
5527/**
5528 * ipr_erp_done - Process completion of ERP for a device
5529 * @ipr_cmd: ipr command struct
5530 *
5531 * This function copies the sense buffer into the scsi_cmd
5532 * struct and pushes the scsi_done function.
5533 *
5534 * Return value:
5535 * nothing
5536 **/
5537static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5538{
5539 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5540 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5541 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5542
5543 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5544 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5545 scmd_printk(KERN_ERR, scsi_cmd,
5546 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5547 } else {
5548 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5549 SCSI_SENSE_BUFFERSIZE);
5550 }
5551
5552 if (res) {
ee0a90fa
BK
5553 if (!ipr_is_naca_model(res))
5554 res->needs_sync_complete = 1;
1da177e4
LT
5555 res->in_erp = 0;
5556 }
63015bc9 5557 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5558 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5559 scsi_cmd->scsi_done(scsi_cmd);
5560}
5561
5562/**
5563 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5564 * @ipr_cmd: ipr command struct
5565 *
5566 * Return value:
5567 * none
5568 **/
5569static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5570{
51b1c7e1 5571 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5572 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5573 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5574
5575 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5576 ioarcb->data_transfer_length = 0;
1da177e4 5577 ioarcb->read_data_transfer_length = 0;
a32c055f 5578 ioarcb->ioadl_len = 0;
1da177e4 5579 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5580 ioasa->hdr.ioasc = 0;
5581 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5582
5583 if (ipr_cmd->ioa_cfg->sis64)
5584 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5585 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5586 else {
5587 ioarcb->write_ioadl_addr =
5588 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5589 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5590 }
1da177e4
LT
5591}
5592
5593/**
5594 * ipr_erp_request_sense - Send request sense to a device
5595 * @ipr_cmd: ipr command struct
5596 *
5597 * This function sends a request sense to a device as a result
5598 * of a check condition.
5599 *
5600 * Return value:
5601 * nothing
5602 **/
5603static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5604{
5605 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5606 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5607
5608 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5609 ipr_erp_done(ipr_cmd);
5610 return;
5611 }
5612
5613 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5614
5615 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5616 cmd_pkt->cdb[0] = REQUEST_SENSE;
5617 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5618 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5619 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5620 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5621
a32c055f
WB
5622 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5623 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5624
5625 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5626 IPR_REQUEST_SENSE_TIMEOUT * 2);
5627}
5628
5629/**
5630 * ipr_erp_cancel_all - Send cancel all to a device
5631 * @ipr_cmd: ipr command struct
5632 *
5633 * This function sends a cancel all to a device to clear the
5634 * queue. If we are running TCQ on the device, QERR is set to 1,
5635 * which means all outstanding ops have been dropped on the floor.
5636 * Cancel all will return them to us.
5637 *
5638 * Return value:
5639 * nothing
5640 **/
5641static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5642{
5643 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5644 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5645 struct ipr_cmd_pkt *cmd_pkt;
5646
5647 res->in_erp = 1;
5648
5649 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5650
5651 if (!scsi_get_tag_type(scsi_cmd->device)) {
5652 ipr_erp_request_sense(ipr_cmd);
5653 return;
5654 }
5655
5656 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5657 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5658 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5659
5660 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5661 IPR_CANCEL_ALL_TIMEOUT);
5662}
5663
5664/**
5665 * ipr_dump_ioasa - Dump contents of IOASA
5666 * @ioa_cfg: ioa config struct
5667 * @ipr_cmd: ipr command struct
fe964d0a 5668 * @res: resource entry struct
1da177e4
LT
5669 *
5670 * This function is invoked by the interrupt handler when ops
5671 * fail. It will log the IOASA if appropriate. Only called
5672 * for GPDD ops.
5673 *
5674 * Return value:
5675 * none
5676 **/
5677static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5678 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5679{
5680 int i;
5681 u16 data_len;
b0692dd4 5682 u32 ioasc, fd_ioasc;
96d21f00 5683 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5684 __be32 *ioasa_data = (__be32 *)ioasa;
5685 int error_index;
5686
96d21f00
WB
5687 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5688 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5689
5690 if (0 == ioasc)
5691 return;
5692
5693 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5694 return;
5695
b0692dd4
BK
5696 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5697 error_index = ipr_get_error(fd_ioasc);
5698 else
5699 error_index = ipr_get_error(ioasc);
1da177e4
LT
5700
5701 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5702 /* Don't log an error if the IOA already logged one */
96d21f00 5703 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5704 return;
5705
cc9bd5d4
BK
5706 if (!ipr_is_gscsi(res))
5707 return;
5708
1da177e4
LT
5709 if (ipr_error_table[error_index].log_ioasa == 0)
5710 return;
5711 }
5712
fe964d0a 5713 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5714
96d21f00
WB
5715 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5716 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5717 data_len = sizeof(struct ipr_ioasa64);
5718 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5719 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5720
5721 ipr_err("IOASA Dump:\n");
5722
5723 for (i = 0; i < data_len / 4; i += 4) {
5724 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5725 be32_to_cpu(ioasa_data[i]),
5726 be32_to_cpu(ioasa_data[i+1]),
5727 be32_to_cpu(ioasa_data[i+2]),
5728 be32_to_cpu(ioasa_data[i+3]));
5729 }
5730}
5731
5732/**
5733 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5734 * @ioasa: IOASA
5735 * @sense_buf: sense data buffer
5736 *
5737 * Return value:
5738 * none
5739 **/
5740static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5741{
5742 u32 failing_lba;
5743 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5744 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5745 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5746 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5747
5748 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5749
5750 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5751 return;
5752
5753 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5754
5755 if (ipr_is_vset_device(res) &&
5756 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5757 ioasa->u.vset.failing_lba_hi != 0) {
5758 sense_buf[0] = 0x72;
5759 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5760 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5761 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5762
5763 sense_buf[7] = 12;
5764 sense_buf[8] = 0;
5765 sense_buf[9] = 0x0A;
5766 sense_buf[10] = 0x80;
5767
5768 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5769
5770 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5771 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5772 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5773 sense_buf[15] = failing_lba & 0x000000ff;
5774
5775 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5776
5777 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5778 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5779 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5780 sense_buf[19] = failing_lba & 0x000000ff;
5781 } else {
5782 sense_buf[0] = 0x70;
5783 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5784 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5785 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5786
5787 /* Illegal request */
5788 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 5789 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
5790 sense_buf[7] = 10; /* additional length */
5791
5792 /* IOARCB was in error */
5793 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5794 sense_buf[15] = 0xC0;
5795 else /* Parameter data was invalid */
5796 sense_buf[15] = 0x80;
5797
5798 sense_buf[16] =
5799 ((IPR_FIELD_POINTER_MASK &
96d21f00 5800 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
5801 sense_buf[17] =
5802 (IPR_FIELD_POINTER_MASK &
96d21f00 5803 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
5804 } else {
5805 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5806 if (ipr_is_vset_device(res))
5807 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5808 else
5809 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5810
5811 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5812 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5813 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5814 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5815 sense_buf[6] = failing_lba & 0x000000ff;
5816 }
5817
5818 sense_buf[7] = 6; /* additional length */
5819 }
5820 }
5821}
5822
ee0a90fa
BK
5823/**
5824 * ipr_get_autosense - Copy autosense data to sense buffer
5825 * @ipr_cmd: ipr command struct
5826 *
5827 * This function copies the autosense buffer to the buffer
5828 * in the scsi_cmd, if there is autosense available.
5829 *
5830 * Return value:
5831 * 1 if autosense was available / 0 if not
5832 **/
5833static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5834{
96d21f00
WB
5835 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5836 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 5837
96d21f00 5838 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
5839 return 0;
5840
96d21f00
WB
5841 if (ipr_cmd->ioa_cfg->sis64)
5842 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5843 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5844 SCSI_SENSE_BUFFERSIZE));
5845 else
5846 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5847 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5848 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
5849 return 1;
5850}
5851
1da177e4
LT
5852/**
5853 * ipr_erp_start - Process an error response for a SCSI op
5854 * @ioa_cfg: ioa config struct
5855 * @ipr_cmd: ipr command struct
5856 *
5857 * This function determines whether or not to initiate ERP
5858 * on the affected device.
5859 *
5860 * Return value:
5861 * nothing
5862 **/
5863static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5864 struct ipr_cmnd *ipr_cmd)
5865{
5866 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5867 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5868 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 5869 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5870
5871 if (!res) {
5872 ipr_scsi_eh_done(ipr_cmd);
5873 return;
5874 }
5875
8a048994 5876 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5877 ipr_gen_sense(ipr_cmd);
5878
cc9bd5d4
BK
5879 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5880
8a048994 5881 switch (masked_ioasc) {
1da177e4 5882 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
5883 if (ipr_is_naca_model(res))
5884 scsi_cmd->result |= (DID_ABORT << 16);
5885 else
5886 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5887 break;
5888 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5889 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5890 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5891 break;
5892 case IPR_IOASC_HW_SEL_TIMEOUT:
5893 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
5894 if (!ipr_is_naca_model(res))
5895 res->needs_sync_complete = 1;
1da177e4
LT
5896 break;
5897 case IPR_IOASC_SYNC_REQUIRED:
5898 if (!res->in_erp)
5899 res->needs_sync_complete = 1;
5900 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5901 break;
5902 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5903 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5904 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5905 break;
5906 case IPR_IOASC_BUS_WAS_RESET:
5907 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5908 /*
5909 * Report the bus reset and ask for a retry. The device
5910 * will give CC/UA the next command.
5911 */
5912 if (!res->resetting_device)
5913 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5914 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
5915 if (!ipr_is_naca_model(res))
5916 res->needs_sync_complete = 1;
1da177e4
LT
5917 break;
5918 case IPR_IOASC_HW_DEV_BUS_STATUS:
5919 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5920 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
5921 if (!ipr_get_autosense(ipr_cmd)) {
5922 if (!ipr_is_naca_model(res)) {
5923 ipr_erp_cancel_all(ipr_cmd);
5924 return;
5925 }
5926 }
1da177e4 5927 }
ee0a90fa
BK
5928 if (!ipr_is_naca_model(res))
5929 res->needs_sync_complete = 1;
1da177e4
LT
5930 break;
5931 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5932 break;
5933 default:
5b7304fb
BK
5934 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5935 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5936 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5937 res->needs_sync_complete = 1;
5938 break;
5939 }
5940
63015bc9 5941 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5942 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5943 scsi_cmd->scsi_done(scsi_cmd);
5944}
5945
5946/**
5947 * ipr_scsi_done - mid-layer done function
5948 * @ipr_cmd: ipr command struct
5949 *
5950 * This function is invoked by the interrupt handler for
5951 * ops generated by the SCSI mid-layer
5952 *
5953 * Return value:
5954 * none
5955 **/
5956static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5957{
5958 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5959 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 5960 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 5961 unsigned long hrrq_flags;
1da177e4 5962
96d21f00 5963 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
5964
5965 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
5966 scsi_dma_unmap(scsi_cmd);
5967
05a6538a 5968 spin_lock_irqsave(ioa_cfg->host->host_lock, hrrq_flags);
5969 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 5970 scsi_cmd->scsi_done(scsi_cmd);
05a6538a 5971 spin_unlock_irqrestore(ioa_cfg->host->host_lock, hrrq_flags);
172cd6e1 5972 } else {
05a6538a 5973 spin_lock_irqsave(ioa_cfg->host->host_lock, hrrq_flags);
1da177e4 5974 ipr_erp_start(ioa_cfg, ipr_cmd);
05a6538a 5975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, hrrq_flags);
172cd6e1 5976 }
1da177e4
LT
5977}
5978
1da177e4
LT
5979/**
5980 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 5981 * @shost: scsi host struct
1da177e4 5982 * @scsi_cmd: scsi command struct
1da177e4
LT
5983 *
5984 * This function queues a request generated by the mid-layer.
5985 *
5986 * Return value:
5987 * 0 on success
5988 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5989 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5990 **/
00bfef2c
BK
5991static int ipr_queuecommand(struct Scsi_Host *shost,
5992 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5993{
5994 struct ipr_ioa_cfg *ioa_cfg;
5995 struct ipr_resource_entry *res;
5996 struct ipr_ioarcb *ioarcb;
5997 struct ipr_cmnd *ipr_cmd;
00bfef2c 5998 unsigned long lock_flags;
d12f1576 5999 int rc;
05a6538a 6000 struct ipr_hrr_queue *hrrq;
6001 int hrrq_id;
1da177e4 6002
00bfef2c
BK
6003 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6004
6005 spin_lock_irqsave(shost->host_lock, lock_flags);
1da177e4 6006 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6007 res = scsi_cmd->device->hostdata;
05a6538a 6008 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6009 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4
LT
6010
6011 /*
6012 * We are currently blocking all devices due to a host reset
6013 * We have told the host to stop giving us new requests, but
6014 * ERP ops don't count. FIXME
6015 */
00bfef2c
BK
6016 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
6017 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1da177e4 6018 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6019 }
1da177e4
LT
6020
6021 /*
6022 * FIXME - Create scsi_set_host_offline interface
6023 * and the ioa_is_dead check can be removed
6024 */
6025 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
00bfef2c
BK
6026 spin_unlock_irqrestore(shost->host_lock, lock_flags);
6027 goto err_nodev;
1da177e4
LT
6028 }
6029
d995e1b7
DC
6030 if (ipr_is_gata(res) && res->sata_port) {
6031 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6032 spin_unlock_irqrestore(shost->host_lock, lock_flags);
6033 return rc;
1da177e4
LT
6034 }
6035
05a6538a 6036 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6037 if (ipr_cmd == NULL) {
6038 spin_unlock_irqrestore(shost->host_lock, lock_flags);
6039 return SCSI_MLQUEUE_HOST_BUSY;
6040 }
00bfef2c 6041 spin_unlock_irqrestore(shost->host_lock, lock_flags);
35a39691 6042
172cd6e1 6043 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6044 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6045
6046 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6047 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6048 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4
LT
6049
6050 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6051 if (scsi_cmd->underflow == 0)
6052 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6053
1da177e4 6054 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
ab6c10b1
WB
6055 if (ipr_is_gscsi(res))
6056 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
1da177e4
LT
6057 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6058 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6059 }
6060
6061 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6062 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6063 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6064 }
1da177e4 6065
d12f1576
DC
6066 if (ioa_cfg->sis64)
6067 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6068 else
6069 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6070
00bfef2c
BK
6071 spin_lock_irqsave(shost->host_lock, lock_flags);
6072 if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
05a6538a 6073 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
00bfef2c
BK
6074 spin_unlock_irqrestore(shost->host_lock, lock_flags);
6075 if (!rc)
6076 scsi_dma_unmap(scsi_cmd);
a5fb407e 6077 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6078 }
6079
00bfef2c 6080 if (unlikely(ioa_cfg->ioa_is_dead)) {
05a6538a 6081 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
00bfef2c
BK
6082 spin_unlock_irqrestore(shost->host_lock, lock_flags);
6083 scsi_dma_unmap(scsi_cmd);
6084 goto err_nodev;
6085 }
6086
6087 ioarcb->res_handle = res->res_handle;
6088 if (res->needs_sync_complete) {
6089 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6090 res->needs_sync_complete = 0;
6091 }
05a6538a 6092 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6093 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6094 ipr_send_command(ipr_cmd);
00bfef2c 6095 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1da177e4 6096 return 0;
1da177e4 6097
00bfef2c
BK
6098err_nodev:
6099 spin_lock_irqsave(shost->host_lock, lock_flags);
6100 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6101 scsi_cmd->result = (DID_NO_CONNECT << 16);
6102 scsi_cmd->scsi_done(scsi_cmd);
6103 spin_unlock_irqrestore(shost->host_lock, lock_flags);
6104 return 0;
6105}
f281233d 6106
35a39691
BK
6107/**
6108 * ipr_ioctl - IOCTL handler
6109 * @sdev: scsi device struct
6110 * @cmd: IOCTL cmd
6111 * @arg: IOCTL arg
6112 *
6113 * Return value:
6114 * 0 on success / other on failure
6115 **/
bd705f2d 6116static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6117{
6118 struct ipr_resource_entry *res;
6119
6120 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6121 if (res && ipr_is_gata(res)) {
6122 if (cmd == HDIO_GET_IDENTITY)
6123 return -ENOTTY;
94be9a58 6124 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6125 }
35a39691
BK
6126
6127 return -EINVAL;
6128}
6129
1da177e4
LT
6130/**
6131 * ipr_info - Get information about the card/driver
6132 * @scsi_host: scsi host struct
6133 *
6134 * Return value:
6135 * pointer to buffer with description string
6136 **/
203fa3fe 6137static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6138{
6139 static char buffer[512];
6140 struct ipr_ioa_cfg *ioa_cfg;
6141 unsigned long lock_flags = 0;
6142
6143 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6144
6145 spin_lock_irqsave(host->host_lock, lock_flags);
6146 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6147 spin_unlock_irqrestore(host->host_lock, lock_flags);
6148
6149 return buffer;
6150}
6151
6152static struct scsi_host_template driver_template = {
6153 .module = THIS_MODULE,
6154 .name = "IPR",
6155 .info = ipr_ioa_info,
35a39691 6156 .ioctl = ipr_ioctl,
1da177e4
LT
6157 .queuecommand = ipr_queuecommand,
6158 .eh_abort_handler = ipr_eh_abort,
6159 .eh_device_reset_handler = ipr_eh_dev_reset,
6160 .eh_host_reset_handler = ipr_eh_host_reset,
6161 .slave_alloc = ipr_slave_alloc,
6162 .slave_configure = ipr_slave_configure,
6163 .slave_destroy = ipr_slave_destroy,
35a39691
BK
6164 .target_alloc = ipr_target_alloc,
6165 .target_destroy = ipr_target_destroy,
1da177e4
LT
6166 .change_queue_depth = ipr_change_queue_depth,
6167 .change_queue_type = ipr_change_queue_type,
6168 .bios_param = ipr_biosparam,
6169 .can_queue = IPR_MAX_COMMANDS,
6170 .this_id = -1,
6171 .sg_tablesize = IPR_MAX_SGLIST,
6172 .max_sectors = IPR_IOA_MAX_SECTORS,
6173 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6174 .use_clustering = ENABLE_CLUSTERING,
6175 .shost_attrs = ipr_ioa_attrs,
6176 .sdev_attrs = ipr_dev_attrs,
6177 .proc_name = IPR_NAME
6178};
6179
35a39691
BK
6180/**
6181 * ipr_ata_phy_reset - libata phy_reset handler
6182 * @ap: ata port to reset
6183 *
6184 **/
6185static void ipr_ata_phy_reset(struct ata_port *ap)
6186{
6187 unsigned long flags;
6188 struct ipr_sata_port *sata_port = ap->private_data;
6189 struct ipr_resource_entry *res = sata_port->res;
6190 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6191 int rc;
6192
6193 ENTER;
6194 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6195 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6197 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6198 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6199 }
6200
6201 if (!ioa_cfg->allow_cmds)
6202 goto out_unlock;
6203
6204 rc = ipr_device_reset(ioa_cfg, res);
6205
6206 if (rc) {
3e4ec344 6207 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6208 goto out_unlock;
6209 }
6210
3e7ebdfa
WB
6211 ap->link.device[0].class = res->ata_class;
6212 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6213 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6214
6215out_unlock:
6216 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6217 LEAVE;
6218}
6219
6220/**
6221 * ipr_ata_post_internal - Cleanup after an internal command
6222 * @qc: ATA queued command
6223 *
6224 * Return value:
6225 * none
6226 **/
6227static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6228{
6229 struct ipr_sata_port *sata_port = qc->ap->private_data;
6230 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6231 struct ipr_cmnd *ipr_cmd;
05a6538a 6232 struct ipr_hrr_queue *hrrq;
35a39691
BK
6233 unsigned long flags;
6234
6235 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6236 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6237 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6238 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6239 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6240 }
6241
05a6538a 6242 for_each_hrrq(hrrq, ioa_cfg) {
6243 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6244 if (ipr_cmd->qc == qc) {
6245 ipr_device_reset(ioa_cfg, sata_port->res);
6246 break;
6247 }
35a39691
BK
6248 }
6249 }
6250 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6251}
6252
35a39691
BK
6253/**
6254 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6255 * @regs: destination
6256 * @tf: source ATA taskfile
6257 *
6258 * Return value:
6259 * none
6260 **/
6261static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6262 struct ata_taskfile *tf)
6263{
6264 regs->feature = tf->feature;
6265 regs->nsect = tf->nsect;
6266 regs->lbal = tf->lbal;
6267 regs->lbam = tf->lbam;
6268 regs->lbah = tf->lbah;
6269 regs->device = tf->device;
6270 regs->command = tf->command;
6271 regs->hob_feature = tf->hob_feature;
6272 regs->hob_nsect = tf->hob_nsect;
6273 regs->hob_lbal = tf->hob_lbal;
6274 regs->hob_lbam = tf->hob_lbam;
6275 regs->hob_lbah = tf->hob_lbah;
6276 regs->ctl = tf->ctl;
6277}
6278
6279/**
6280 * ipr_sata_done - done function for SATA commands
6281 * @ipr_cmd: ipr command struct
6282 *
6283 * This function is invoked by the interrupt handler for
6284 * ops generated by the SCSI mid-layer to SATA devices
6285 *
6286 * Return value:
6287 * none
6288 **/
6289static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6290{
6291 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6292 struct ata_queued_cmd *qc = ipr_cmd->qc;
6293 struct ipr_sata_port *sata_port = qc->ap->private_data;
6294 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6295 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6296
96d21f00
WB
6297 if (ipr_cmd->ioa_cfg->sis64)
6298 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6299 sizeof(struct ipr_ioasa_gata));
6300 else
6301 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6302 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6303 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6304
96d21f00 6305 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6306 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6307
6308 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6309 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6310 else
96d21f00 6311 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6312 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
6313 ata_qc_complete(qc);
6314}
6315
a32c055f
WB
6316/**
6317 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6318 * @ipr_cmd: ipr command struct
6319 * @qc: ATA queued command
6320 *
6321 **/
6322static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6323 struct ata_queued_cmd *qc)
6324{
6325 u32 ioadl_flags = 0;
6326 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6327 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6328 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6329 int len = qc->nbytes;
6330 struct scatterlist *sg;
6331 unsigned int si;
6332 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6333
6334 if (len == 0)
6335 return;
6336
6337 if (qc->dma_dir == DMA_TO_DEVICE) {
6338 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6339 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6340 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6341 ioadl_flags = IPR_IOADL_FLAGS_READ;
6342
6343 ioarcb->data_transfer_length = cpu_to_be32(len);
6344 ioarcb->ioadl_len =
6345 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6346 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6347 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6348
6349 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6350 ioadl64->flags = cpu_to_be32(ioadl_flags);
6351 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6352 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6353
6354 last_ioadl64 = ioadl64;
6355 ioadl64++;
6356 }
6357
6358 if (likely(last_ioadl64))
6359 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6360}
6361
35a39691
BK
6362/**
6363 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6364 * @ipr_cmd: ipr command struct
6365 * @qc: ATA queued command
6366 *
6367 **/
6368static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6369 struct ata_queued_cmd *qc)
6370{
6371 u32 ioadl_flags = 0;
6372 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6373 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6374 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6375 int len = qc->nbytes;
35a39691 6376 struct scatterlist *sg;
ff2aeb1e 6377 unsigned int si;
35a39691
BK
6378
6379 if (len == 0)
6380 return;
6381
6382 if (qc->dma_dir == DMA_TO_DEVICE) {
6383 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6384 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6385 ioarcb->data_transfer_length = cpu_to_be32(len);
6386 ioarcb->ioadl_len =
35a39691
BK
6387 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6388 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6389 ioadl_flags = IPR_IOADL_FLAGS_READ;
6390 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6391 ioarcb->read_ioadl_len =
6392 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6393 }
6394
ff2aeb1e 6395 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6396 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6397 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6398
6399 last_ioadl = ioadl;
6400 ioadl++;
35a39691 6401 }
3be6cbd7
JG
6402
6403 if (likely(last_ioadl))
6404 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6405}
6406
6407/**
6408 * ipr_qc_issue - Issue a SATA qc to a device
6409 * @qc: queued command
6410 *
6411 * Return value:
6412 * 0 if success
6413 **/
6414static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6415{
6416 struct ata_port *ap = qc->ap;
6417 struct ipr_sata_port *sata_port = ap->private_data;
6418 struct ipr_resource_entry *res = sata_port->res;
6419 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6420 struct ipr_cmnd *ipr_cmd;
6421 struct ipr_ioarcb *ioarcb;
6422 struct ipr_ioarcb_ata_regs *regs;
05a6538a 6423 struct ipr_hrr_queue *hrrq;
6424 int hrrq_id;
35a39691
BK
6425
6426 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 6427 return AC_ERR_SYSTEM;
35a39691 6428
05a6538a 6429 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6430 hrrq = &ioa_cfg->hrrq[hrrq_id];
6431 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6432 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 6433 ioarcb = &ipr_cmd->ioarcb;
35a39691 6434
a32c055f
WB
6435 if (ioa_cfg->sis64) {
6436 regs = &ipr_cmd->i.ata_ioadl.regs;
6437 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6438 } else
6439 regs = &ioarcb->u.add_data.u.regs;
6440
6441 memset(regs, 0, sizeof(*regs));
6442 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 6443
05a6538a 6444 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
35a39691
BK
6445 ipr_cmd->qc = qc;
6446 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6447 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6448 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6449 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6450 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6451 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6452
a32c055f
WB
6453 if (ioa_cfg->sis64)
6454 ipr_build_ata_ioadl64(ipr_cmd, qc);
6455 else
6456 ipr_build_ata_ioadl(ipr_cmd, qc);
6457
35a39691
BK
6458 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6459 ipr_copy_sata_tf(regs, &qc->tf);
6460 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6461 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6462
6463 switch (qc->tf.protocol) {
6464 case ATA_PROT_NODATA:
6465 case ATA_PROT_PIO:
6466 break;
6467
6468 case ATA_PROT_DMA:
6469 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6470 break;
6471
0dc36888
TH
6472 case ATAPI_PROT_PIO:
6473 case ATAPI_PROT_NODATA:
35a39691
BK
6474 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6475 break;
6476
0dc36888 6477 case ATAPI_PROT_DMA:
35a39691
BK
6478 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6479 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6480 break;
6481
6482 default:
6483 WARN_ON(1);
0feeed82 6484 return AC_ERR_INVALID;
35a39691
BK
6485 }
6486
a32c055f
WB
6487 ipr_send_command(ipr_cmd);
6488
35a39691
BK
6489 return 0;
6490}
6491
4c9bf4e7
TH
6492/**
6493 * ipr_qc_fill_rtf - Read result TF
6494 * @qc: ATA queued command
6495 *
6496 * Return value:
6497 * true
6498 **/
6499static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6500{
6501 struct ipr_sata_port *sata_port = qc->ap->private_data;
6502 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6503 struct ata_taskfile *tf = &qc->result_tf;
6504
6505 tf->feature = g->error;
6506 tf->nsect = g->nsect;
6507 tf->lbal = g->lbal;
6508 tf->lbam = g->lbam;
6509 tf->lbah = g->lbah;
6510 tf->device = g->device;
6511 tf->command = g->status;
6512 tf->hob_nsect = g->hob_nsect;
6513 tf->hob_lbal = g->hob_lbal;
6514 tf->hob_lbam = g->hob_lbam;
6515 tf->hob_lbah = g->hob_lbah;
6516 tf->ctl = g->alt_status;
6517
6518 return true;
6519}
6520
35a39691 6521static struct ata_port_operations ipr_sata_ops = {
35a39691 6522 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6523 .hardreset = ipr_sata_reset,
35a39691 6524 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
6525 .qc_prep = ata_noop_qc_prep,
6526 .qc_issue = ipr_qc_issue,
4c9bf4e7 6527 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6528 .port_start = ata_sas_port_start,
6529 .port_stop = ata_sas_port_stop
6530};
6531
6532static struct ata_port_info sata_port_info = {
9cbe056f 6533 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0f2e0330
SS
6534 .pio_mask = ATA_PIO4_ONLY,
6535 .mwdma_mask = ATA_MWDMA2,
6536 .udma_mask = ATA_UDMA6,
35a39691
BK
6537 .port_ops = &ipr_sata_ops
6538};
6539
1da177e4
LT
6540#ifdef CONFIG_PPC_PSERIES
6541static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
6542 PVR_NORTHSTAR,
6543 PVR_PULSAR,
6544 PVR_POWER4,
6545 PVR_ICESTAR,
6546 PVR_SSTAR,
6547 PVR_POWER4p,
6548 PVR_630,
6549 PVR_630p
1da177e4
LT
6550};
6551
6552/**
6553 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6554 * @ioa_cfg: ioa cfg struct
6555 *
6556 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6557 * certain pSeries hardware. This function determines if the given
6558 * adapter is in one of these confgurations or not.
6559 *
6560 * Return value:
6561 * 1 if adapter is not supported / 0 if adapter is supported
6562 **/
6563static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6564{
1da177e4
LT
6565 int i;
6566
44c10138 6567 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 6568 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 6569 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 6570 return 1;
1da177e4
LT
6571 }
6572 }
6573 return 0;
6574}
6575#else
6576#define ipr_invalid_adapter(ioa_cfg) 0
6577#endif
6578
6579/**
6580 * ipr_ioa_bringdown_done - IOA bring down completion.
6581 * @ipr_cmd: ipr command struct
6582 *
6583 * This function processes the completion of an adapter bring down.
6584 * It wakes any reset sleepers.
6585 *
6586 * Return value:
6587 * IPR_RC_JOB_RETURN
6588 **/
6589static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6590{
6591 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6592
6593 ENTER;
6594 ioa_cfg->in_reset_reload = 0;
6595 ioa_cfg->reset_retries = 0;
05a6538a 6596 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6597 wake_up_all(&ioa_cfg->reset_wait_q);
6598
6599 spin_unlock_irq(ioa_cfg->host->host_lock);
6600 scsi_unblock_requests(ioa_cfg->host);
6601 spin_lock_irq(ioa_cfg->host->host_lock);
6602 LEAVE;
6603
6604 return IPR_RC_JOB_RETURN;
6605}
6606
6607/**
6608 * ipr_ioa_reset_done - IOA reset completion.
6609 * @ipr_cmd: ipr command struct
6610 *
6611 * This function processes the completion of an adapter reset.
6612 * It schedules any necessary mid-layer add/removes and
6613 * wakes any reset sleepers.
6614 *
6615 * Return value:
6616 * IPR_RC_JOB_RETURN
6617 **/
6618static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6619{
6620 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6621 struct ipr_resource_entry *res;
6622 struct ipr_hostrcb *hostrcb, *temp;
6623 int i = 0;
6624
6625 ENTER;
6626 ioa_cfg->in_reset_reload = 0;
6627 ioa_cfg->allow_cmds = 1;
6628 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6629 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6630
6631 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6632 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6633 ipr_trace;
6634 break;
6635 }
6636 }
6637 schedule_work(&ioa_cfg->work_q);
6638
6639 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6640 list_del(&hostrcb->queue);
6641 if (i++ < IPR_NUM_LOG_HCAMS)
6642 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6643 else
6644 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6645 }
6646
6bb04170 6647 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6648 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6649
6650 ioa_cfg->reset_retries = 0;
05a6538a 6651 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6652 wake_up_all(&ioa_cfg->reset_wait_q);
6653
30237853 6654 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6655 scsi_unblock_requests(ioa_cfg->host);
30237853 6656 spin_lock(ioa_cfg->host->host_lock);
1da177e4
LT
6657
6658 if (!ioa_cfg->allow_cmds)
6659 scsi_block_requests(ioa_cfg->host);
6660
6661 LEAVE;
6662 return IPR_RC_JOB_RETURN;
6663}
6664
6665/**
6666 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6667 * @supported_dev: supported device struct
6668 * @vpids: vendor product id struct
6669 *
6670 * Return value:
6671 * none
6672 **/
6673static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6674 struct ipr_std_inq_vpids *vpids)
6675{
6676 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6677 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6678 supported_dev->num_records = 1;
6679 supported_dev->data_length =
6680 cpu_to_be16(sizeof(struct ipr_supported_device));
6681 supported_dev->reserved = 0;
6682}
6683
6684/**
6685 * ipr_set_supported_devs - Send Set Supported Devices for a device
6686 * @ipr_cmd: ipr command struct
6687 *
a32c055f 6688 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6689 *
6690 * Return value:
6691 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6692 **/
6693static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6694{
6695 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6696 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6697 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6698 struct ipr_resource_entry *res = ipr_cmd->u.res;
6699
6700 ipr_cmd->job_step = ipr_ioa_reset_done;
6701
6702 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6703 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6704 continue;
6705
6706 ipr_cmd->u.res = res;
3e7ebdfa 6707 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6708
6709 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6710 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6711 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6712
6713 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6714 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6715 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6716 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6717
a32c055f
WB
6718 ipr_init_ioadl(ipr_cmd,
6719 ioa_cfg->vpd_cbs_dma +
6720 offsetof(struct ipr_misc_cbs, supp_dev),
6721 sizeof(struct ipr_supported_device),
6722 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6723
6724 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6725 IPR_SET_SUP_DEVICE_TIMEOUT);
6726
3e7ebdfa
WB
6727 if (!ioa_cfg->sis64)
6728 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 6729 LEAVE;
1da177e4
LT
6730 return IPR_RC_JOB_RETURN;
6731 }
6732
05a6538a 6733 LEAVE;
1da177e4
LT
6734 return IPR_RC_JOB_CONTINUE;
6735}
6736
6737/**
6738 * ipr_get_mode_page - Locate specified mode page
6739 * @mode_pages: mode page buffer
6740 * @page_code: page code to find
6741 * @len: minimum required length for mode page
6742 *
6743 * Return value:
6744 * pointer to mode page / NULL on failure
6745 **/
6746static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6747 u32 page_code, u32 len)
6748{
6749 struct ipr_mode_page_hdr *mode_hdr;
6750 u32 page_length;
6751 u32 length;
6752
6753 if (!mode_pages || (mode_pages->hdr.length == 0))
6754 return NULL;
6755
6756 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6757 mode_hdr = (struct ipr_mode_page_hdr *)
6758 (mode_pages->data + mode_pages->hdr.block_desc_len);
6759
6760 while (length) {
6761 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6762 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6763 return mode_hdr;
6764 break;
6765 } else {
6766 page_length = (sizeof(struct ipr_mode_page_hdr) +
6767 mode_hdr->page_length);
6768 length -= page_length;
6769 mode_hdr = (struct ipr_mode_page_hdr *)
6770 ((unsigned long)mode_hdr + page_length);
6771 }
6772 }
6773 return NULL;
6774}
6775
6776/**
6777 * ipr_check_term_power - Check for term power errors
6778 * @ioa_cfg: ioa config struct
6779 * @mode_pages: IOAFP mode pages buffer
6780 *
6781 * Check the IOAFP's mode page 28 for term power errors
6782 *
6783 * Return value:
6784 * nothing
6785 **/
6786static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6787 struct ipr_mode_pages *mode_pages)
6788{
6789 int i;
6790 int entry_length;
6791 struct ipr_dev_bus_entry *bus;
6792 struct ipr_mode_page28 *mode_page;
6793
6794 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6795 sizeof(struct ipr_mode_page28));
6796
6797 entry_length = mode_page->entry_length;
6798
6799 bus = mode_page->bus;
6800
6801 for (i = 0; i < mode_page->num_entries; i++) {
6802 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6803 dev_err(&ioa_cfg->pdev->dev,
6804 "Term power is absent on scsi bus %d\n",
6805 bus->res_addr.bus);
6806 }
6807
6808 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6809 }
6810}
6811
6812/**
6813 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6814 * @ioa_cfg: ioa config struct
6815 *
6816 * Looks through the config table checking for SES devices. If
6817 * the SES device is in the SES table indicating a maximum SCSI
6818 * bus speed, the speed is limited for the bus.
6819 *
6820 * Return value:
6821 * none
6822 **/
6823static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6824{
6825 u32 max_xfer_rate;
6826 int i;
6827
6828 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6829 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6830 ioa_cfg->bus_attr[i].bus_width);
6831
6832 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6833 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6834 }
6835}
6836
6837/**
6838 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6839 * @ioa_cfg: ioa config struct
6840 * @mode_pages: mode page 28 buffer
6841 *
6842 * Updates mode page 28 based on driver configuration
6843 *
6844 * Return value:
6845 * none
6846 **/
6847static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 6848 struct ipr_mode_pages *mode_pages)
1da177e4
LT
6849{
6850 int i, entry_length;
6851 struct ipr_dev_bus_entry *bus;
6852 struct ipr_bus_attributes *bus_attr;
6853 struct ipr_mode_page28 *mode_page;
6854
6855 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6856 sizeof(struct ipr_mode_page28));
6857
6858 entry_length = mode_page->entry_length;
6859
6860 /* Loop for each device bus entry */
6861 for (i = 0, bus = mode_page->bus;
6862 i < mode_page->num_entries;
6863 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6864 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6865 dev_err(&ioa_cfg->pdev->dev,
6866 "Invalid resource address reported: 0x%08X\n",
6867 IPR_GET_PHYS_LOC(bus->res_addr));
6868 continue;
6869 }
6870
6871 bus_attr = &ioa_cfg->bus_attr[i];
6872 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6873 bus->bus_width = bus_attr->bus_width;
6874 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6875 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6876 if (bus_attr->qas_enabled)
6877 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6878 else
6879 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6880 }
6881}
6882
6883/**
6884 * ipr_build_mode_select - Build a mode select command
6885 * @ipr_cmd: ipr command struct
6886 * @res_handle: resource handle to send command to
6887 * @parm: Byte 2 of Mode Sense command
6888 * @dma_addr: DMA buffer address
6889 * @xfer_len: data transfer length
6890 *
6891 * Return value:
6892 * none
6893 **/
6894static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6895 __be32 res_handle, u8 parm,
6896 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6897{
1da177e4
LT
6898 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6899
6900 ioarcb->res_handle = res_handle;
6901 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6902 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6903 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6904 ioarcb->cmd_pkt.cdb[1] = parm;
6905 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6906
a32c055f 6907 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6908}
6909
6910/**
6911 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6912 * @ipr_cmd: ipr command struct
6913 *
6914 * This function sets up the SCSI bus attributes and sends
6915 * a Mode Select for Page 28 to activate them.
6916 *
6917 * Return value:
6918 * IPR_RC_JOB_RETURN
6919 **/
6920static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6921{
6922 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6923 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6924 int length;
6925
6926 ENTER;
4733804c
BK
6927 ipr_scsi_bus_speed_limit(ioa_cfg);
6928 ipr_check_term_power(ioa_cfg, mode_pages);
6929 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6930 length = mode_pages->hdr.length + 1;
6931 mode_pages->hdr.length = 0;
1da177e4
LT
6932
6933 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6934 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6935 length);
6936
f72919ec
WB
6937 ipr_cmd->job_step = ipr_set_supported_devs;
6938 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6939 struct ipr_resource_entry, queue);
1da177e4
LT
6940 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6941
6942 LEAVE;
6943 return IPR_RC_JOB_RETURN;
6944}
6945
6946/**
6947 * ipr_build_mode_sense - Builds a mode sense command
6948 * @ipr_cmd: ipr command struct
6949 * @res: resource entry struct
6950 * @parm: Byte 2 of mode sense command
6951 * @dma_addr: DMA address of mode sense buffer
6952 * @xfer_len: Size of DMA buffer
6953 *
6954 * Return value:
6955 * none
6956 **/
6957static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6958 __be32 res_handle,
a32c055f 6959 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6960{
1da177e4
LT
6961 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6962
6963 ioarcb->res_handle = res_handle;
6964 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6965 ioarcb->cmd_pkt.cdb[2] = parm;
6966 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6967 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6968
a32c055f 6969 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6970}
6971
dfed823e
BK
6972/**
6973 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6974 * @ipr_cmd: ipr command struct
6975 *
6976 * This function handles the failure of an IOA bringup command.
6977 *
6978 * Return value:
6979 * IPR_RC_JOB_RETURN
6980 **/
6981static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6982{
6983 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6984 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
6985
6986 dev_err(&ioa_cfg->pdev->dev,
6987 "0x%02X failed with IOASC: 0x%08X\n",
6988 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6989
6990 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 6991 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e
BK
6992 return IPR_RC_JOB_RETURN;
6993}
6994
6995/**
6996 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6997 * @ipr_cmd: ipr command struct
6998 *
6999 * This function handles the failure of a Mode Sense to the IOAFP.
7000 * Some adapters do not handle all mode pages.
7001 *
7002 * Return value:
7003 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7004 **/
7005static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7006{
f72919ec 7007 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7008 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7009
7010 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7011 ipr_cmd->job_step = ipr_set_supported_devs;
7012 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7013 struct ipr_resource_entry, queue);
dfed823e
BK
7014 return IPR_RC_JOB_CONTINUE;
7015 }
7016
7017 return ipr_reset_cmd_failed(ipr_cmd);
7018}
7019
1da177e4
LT
7020/**
7021 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7022 * @ipr_cmd: ipr command struct
7023 *
7024 * This function send a Page 28 mode sense to the IOA to
7025 * retrieve SCSI bus attributes.
7026 *
7027 * Return value:
7028 * IPR_RC_JOB_RETURN
7029 **/
7030static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7031{
7032 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7033
7034 ENTER;
7035 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7036 0x28, ioa_cfg->vpd_cbs_dma +
7037 offsetof(struct ipr_misc_cbs, mode_pages),
7038 sizeof(struct ipr_mode_pages));
7039
7040 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7041 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7042
7043 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7044
7045 LEAVE;
7046 return IPR_RC_JOB_RETURN;
7047}
7048
ac09c349
BK
7049/**
7050 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7051 * @ipr_cmd: ipr command struct
7052 *
7053 * This function enables dual IOA RAID support if possible.
7054 *
7055 * Return value:
7056 * IPR_RC_JOB_RETURN
7057 **/
7058static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7059{
7060 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7061 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7062 struct ipr_mode_page24 *mode_page;
7063 int length;
7064
7065 ENTER;
7066 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7067 sizeof(struct ipr_mode_page24));
7068
7069 if (mode_page)
7070 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7071
7072 length = mode_pages->hdr.length + 1;
7073 mode_pages->hdr.length = 0;
7074
7075 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7076 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7077 length);
7078
7079 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7080 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7081
7082 LEAVE;
7083 return IPR_RC_JOB_RETURN;
7084}
7085
7086/**
7087 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7088 * @ipr_cmd: ipr command struct
7089 *
7090 * This function handles the failure of a Mode Sense to the IOAFP.
7091 * Some adapters do not handle all mode pages.
7092 *
7093 * Return value:
7094 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7095 **/
7096static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7097{
96d21f00 7098 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7099
7100 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7101 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7102 return IPR_RC_JOB_CONTINUE;
7103 }
7104
7105 return ipr_reset_cmd_failed(ipr_cmd);
7106}
7107
7108/**
7109 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7110 * @ipr_cmd: ipr command struct
7111 *
7112 * This function send a mode sense to the IOA to retrieve
7113 * the IOA Advanced Function Control mode page.
7114 *
7115 * Return value:
7116 * IPR_RC_JOB_RETURN
7117 **/
7118static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7119{
7120 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7121
7122 ENTER;
7123 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7124 0x24, ioa_cfg->vpd_cbs_dma +
7125 offsetof(struct ipr_misc_cbs, mode_pages),
7126 sizeof(struct ipr_mode_pages));
7127
7128 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7129 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7130
7131 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7132
7133 LEAVE;
7134 return IPR_RC_JOB_RETURN;
7135}
7136
1da177e4
LT
7137/**
7138 * ipr_init_res_table - Initialize the resource table
7139 * @ipr_cmd: ipr command struct
7140 *
7141 * This function looks through the existing resource table, comparing
7142 * it with the config table. This function will take care of old/new
7143 * devices and schedule adding/removing them from the mid-layer
7144 * as appropriate.
7145 *
7146 * Return value:
7147 * IPR_RC_JOB_CONTINUE
7148 **/
7149static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7150{
7151 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7152 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7153 struct ipr_config_table_entry_wrapper cfgtew;
7154 int entries, found, flag, i;
1da177e4
LT
7155 LIST_HEAD(old_res);
7156
7157 ENTER;
3e7ebdfa
WB
7158 if (ioa_cfg->sis64)
7159 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7160 else
7161 flag = ioa_cfg->u.cfg_table->hdr.flags;
7162
7163 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7164 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7165
7166 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7167 list_move_tail(&res->queue, &old_res);
7168
3e7ebdfa 7169 if (ioa_cfg->sis64)
438b0331 7170 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7171 else
7172 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7173
7174 for (i = 0; i < entries; i++) {
7175 if (ioa_cfg->sis64)
7176 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7177 else
7178 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7179 found = 0;
7180
7181 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7182 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7183 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7184 found = 1;
7185 break;
7186 }
7187 }
7188
7189 if (!found) {
7190 if (list_empty(&ioa_cfg->free_res_q)) {
7191 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7192 break;
7193 }
7194
7195 found = 1;
7196 res = list_entry(ioa_cfg->free_res_q.next,
7197 struct ipr_resource_entry, queue);
7198 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7199 ipr_init_res_entry(res, &cfgtew);
1da177e4 7200 res->add_to_ml = 1;
56115598
WB
7201 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7202 res->sdev->allow_restart = 1;
1da177e4
LT
7203
7204 if (found)
3e7ebdfa 7205 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7206 }
7207
7208 list_for_each_entry_safe(res, temp, &old_res, queue) {
7209 if (res->sdev) {
7210 res->del_from_ml = 1;
3e7ebdfa 7211 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7212 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7213 }
7214 }
7215
3e7ebdfa
WB
7216 list_for_each_entry_safe(res, temp, &old_res, queue) {
7217 ipr_clear_res_target(res);
7218 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7219 }
7220
ac09c349
BK
7221 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7222 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7223 else
7224 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7225
7226 LEAVE;
7227 return IPR_RC_JOB_CONTINUE;
7228}
7229
7230/**
7231 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7232 * @ipr_cmd: ipr command struct
7233 *
7234 * This function sends a Query IOA Configuration command
7235 * to the adapter to retrieve the IOA configuration table.
7236 *
7237 * Return value:
7238 * IPR_RC_JOB_RETURN
7239 **/
7240static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7241{
7242 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7243 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7244 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7245 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7246
7247 ENTER;
ac09c349
BK
7248 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7249 ioa_cfg->dual_raid = 1;
1da177e4
LT
7250 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7251 ucode_vpd->major_release, ucode_vpd->card_type,
7252 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7253 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7254 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7255
7256 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7257 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7258 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7259 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7260
3e7ebdfa 7261 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7262 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7263
7264 ipr_cmd->job_step = ipr_init_res_table;
7265
7266 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7267
7268 LEAVE;
7269 return IPR_RC_JOB_RETURN;
7270}
7271
7272/**
7273 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7274 * @ipr_cmd: ipr command struct
7275 *
7276 * This utility function sends an inquiry to the adapter.
7277 *
7278 * Return value:
7279 * none
7280 **/
7281static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7282 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7283{
7284 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7285
7286 ENTER;
7287 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7288 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7289
7290 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7291 ioarcb->cmd_pkt.cdb[1] = flags;
7292 ioarcb->cmd_pkt.cdb[2] = page;
7293 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7294
a32c055f 7295 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7296
7297 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7298 LEAVE;
7299}
7300
62275040
BK
7301/**
7302 * ipr_inquiry_page_supported - Is the given inquiry page supported
7303 * @page0: inquiry page 0 buffer
7304 * @page: page code.
7305 *
7306 * This function determines if the specified inquiry page is supported.
7307 *
7308 * Return value:
7309 * 1 if page is supported / 0 if not
7310 **/
7311static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7312{
7313 int i;
7314
7315 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7316 if (page0->page[i] == page)
7317 return 1;
7318
7319 return 0;
7320}
7321
ac09c349
BK
7322/**
7323 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7324 * @ipr_cmd: ipr command struct
7325 *
7326 * This function sends a Page 0xD0 inquiry to the adapter
7327 * to retrieve adapter capabilities.
7328 *
7329 * Return value:
7330 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7331 **/
7332static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7333{
7334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7335 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7336 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7337
7338 ENTER;
7339 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7340 memset(cap, 0, sizeof(*cap));
7341
7342 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7343 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7344 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7345 sizeof(struct ipr_inquiry_cap));
7346 return IPR_RC_JOB_RETURN;
7347 }
7348
7349 LEAVE;
7350 return IPR_RC_JOB_CONTINUE;
7351}
7352
1da177e4
LT
7353/**
7354 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7355 * @ipr_cmd: ipr command struct
7356 *
7357 * This function sends a Page 3 inquiry to the adapter
7358 * to retrieve software VPD information.
7359 *
7360 * Return value:
7361 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7362 **/
7363static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
7364{
7365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
7366
7367 ENTER;
7368
ac09c349 7369 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
7370
7371 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7372 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7373 sizeof(struct ipr_inquiry_page3));
7374
7375 LEAVE;
7376 return IPR_RC_JOB_RETURN;
7377}
7378
7379/**
7380 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7381 * @ipr_cmd: ipr command struct
7382 *
7383 * This function sends a Page 0 inquiry to the adapter
7384 * to retrieve supported inquiry pages.
7385 *
7386 * Return value:
7387 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7388 **/
7389static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7390{
7391 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7392 char type[5];
7393
7394 ENTER;
7395
7396 /* Grab the type out of the VPD and store it away */
7397 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7398 type[4] = '\0';
7399 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7400
62275040 7401 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7402
62275040
BK
7403 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7404 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7405 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7406
7407 LEAVE;
7408 return IPR_RC_JOB_RETURN;
7409}
7410
7411/**
7412 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7413 * @ipr_cmd: ipr command struct
7414 *
7415 * This function sends a standard inquiry to the adapter.
7416 *
7417 * Return value:
7418 * IPR_RC_JOB_RETURN
7419 **/
7420static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7421{
7422 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7423
7424 ENTER;
62275040 7425 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7426
7427 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7428 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7429 sizeof(struct ipr_ioa_vpd));
7430
7431 LEAVE;
7432 return IPR_RC_JOB_RETURN;
7433}
7434
7435/**
214777ba 7436 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7437 * @ipr_cmd: ipr command struct
7438 *
7439 * This function send an Identify Host Request Response Queue
7440 * command to establish the HRRQ with the adapter.
7441 *
7442 * Return value:
7443 * IPR_RC_JOB_RETURN
7444 **/
214777ba 7445static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7446{
7447 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7448 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 7449 struct ipr_hrr_queue *hrrq;
1da177e4
LT
7450
7451 ENTER;
05a6538a 7452 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
1da177e4
LT
7453 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7454
05a6538a 7455 if (ioa_cfg->hrrq_index < ioa_cfg->hrrq_num) {
7456 hrrq = &ioa_cfg->hrrq[ioa_cfg->hrrq_index];
1da177e4 7457
05a6538a 7458 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7459 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 7460
05a6538a 7461 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7462 if (ioa_cfg->sis64)
7463 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 7464
05a6538a 7465 if (ioa_cfg->nvectors == 1)
7466 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7467 else
7468 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7469
7470 ioarcb->cmd_pkt.cdb[2] =
7471 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7472 ioarcb->cmd_pkt.cdb[3] =
7473 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7474 ioarcb->cmd_pkt.cdb[4] =
7475 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7476 ioarcb->cmd_pkt.cdb[5] =
7477 ((u64) hrrq->host_rrq_dma) & 0xff;
7478 ioarcb->cmd_pkt.cdb[7] =
7479 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7480 ioarcb->cmd_pkt.cdb[8] =
7481 (sizeof(u32) * hrrq->size) & 0xff;
7482
7483 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7484 ioarcb->cmd_pkt.cdb[9] = ioa_cfg->hrrq_index;
1da177e4 7485
05a6538a 7486 if (ioa_cfg->sis64) {
7487 ioarcb->cmd_pkt.cdb[10] =
7488 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7489 ioarcb->cmd_pkt.cdb[11] =
7490 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7491 ioarcb->cmd_pkt.cdb[12] =
7492 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7493 ioarcb->cmd_pkt.cdb[13] =
7494 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7495 }
7496
7497 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7498 ioarcb->cmd_pkt.cdb[14] = ioa_cfg->hrrq_index;
7499
7500 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7501 IPR_INTERNAL_TIMEOUT);
7502
7503 if (++ioa_cfg->hrrq_index < ioa_cfg->hrrq_num)
7504 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7505
7506 LEAVE;
7507 return IPR_RC_JOB_RETURN;
7508
7509 }
7510
7511 if (ioa_cfg->hrrq_num == 1)
7512 ioa_cfg->hrrq_index = 0;
7513 else
7514 ioa_cfg->hrrq_index = 1;
1da177e4
LT
7515
7516 LEAVE;
05a6538a 7517 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
7518}
7519
7520/**
7521 * ipr_reset_timer_done - Adapter reset timer function
7522 * @ipr_cmd: ipr command struct
7523 *
7524 * Description: This function is used in adapter reset processing
7525 * for timing events. If the reset_cmd pointer in the IOA
7526 * config struct is not this adapter's we are doing nested
7527 * resets and fail_all_ops will take care of freeing the
7528 * command block.
7529 *
7530 * Return value:
7531 * none
7532 **/
7533static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7534{
7535 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7536 unsigned long lock_flags = 0;
7537
7538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7539
7540 if (ioa_cfg->reset_cmd == ipr_cmd) {
7541 list_del(&ipr_cmd->queue);
7542 ipr_cmd->done(ipr_cmd);
7543 }
7544
7545 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7546}
7547
7548/**
7549 * ipr_reset_start_timer - Start a timer for adapter reset job
7550 * @ipr_cmd: ipr command struct
7551 * @timeout: timeout value
7552 *
7553 * Description: This function is used in adapter reset processing
7554 * for timing events. If the reset_cmd pointer in the IOA
7555 * config struct is not this adapter's we are doing nested
7556 * resets and fail_all_ops will take care of freeing the
7557 * command block.
7558 *
7559 * Return value:
7560 * none
7561 **/
7562static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7563 unsigned long timeout)
7564{
05a6538a 7565
7566 ENTER;
7567 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7568 ipr_cmd->done = ipr_reset_ioa_job;
7569
7570 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7571 ipr_cmd->timer.expires = jiffies + timeout;
7572 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7573 add_timer(&ipr_cmd->timer);
05a6538a 7574 LEAVE;
1da177e4
LT
7575}
7576
7577/**
7578 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7579 * @ioa_cfg: ioa cfg struct
7580 *
7581 * Return value:
7582 * nothing
7583 **/
7584static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7585{
05a6538a 7586 struct ipr_hrr_queue *hrrq;
1da177e4 7587
05a6538a 7588 for_each_hrrq(hrrq, ioa_cfg) {
7589 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7590
7591 /* Initialize Host RRQ pointers */
7592 hrrq->hrrq_start = hrrq->host_rrq;
7593 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7594 hrrq->hrrq_curr = hrrq->hrrq_start;
7595 hrrq->toggle_bit = 1;
7596 }
7597
7598 ioa_cfg->hrrq_index = 0;
1da177e4
LT
7599
7600 /* Zero out config table */
3e7ebdfa 7601 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7602}
7603
214777ba
WB
7604/**
7605 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7606 * @ipr_cmd: ipr command struct
7607 *
7608 * Return value:
7609 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7610 **/
7611static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7612{
7613 unsigned long stage, stage_time;
7614 u32 feedback;
7615 volatile u32 int_reg;
7616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7617 u64 maskval = 0;
7618
7619 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7620 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7621 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7622
7623 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7624
7625 /* sanity check the stage_time value */
438b0331
WB
7626 if (stage_time == 0)
7627 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7628 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7629 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7630 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7631 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7632
7633 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7634 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7635 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7636 stage_time = ioa_cfg->transop_timeout;
7637 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7638 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
7639 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7640 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7641 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7642 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7643 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7644 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7645 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7646 return IPR_RC_JOB_CONTINUE;
7647 }
214777ba
WB
7648 }
7649
7650 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7651 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7652 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7653 ipr_cmd->done = ipr_reset_ioa_job;
7654 add_timer(&ipr_cmd->timer);
05a6538a 7655
7656 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
7657
7658 return IPR_RC_JOB_RETURN;
7659}
7660
1da177e4
LT
7661/**
7662 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7663 * @ipr_cmd: ipr command struct
7664 *
7665 * This function reinitializes some control blocks and
7666 * enables destructive diagnostics on the adapter.
7667 *
7668 * Return value:
7669 * IPR_RC_JOB_RETURN
7670 **/
7671static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7672{
7673 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7674 volatile u32 int_reg;
7be96900 7675 volatile u64 maskval;
1da177e4
LT
7676
7677 ENTER;
214777ba 7678 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7679 ipr_init_ioa_mem(ioa_cfg);
7680
7681 ioa_cfg->allow_interrupts = 1;
8701f185
WB
7682 if (ioa_cfg->sis64) {
7683 /* Set the adapter to the correct endian mode. */
7684 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7685 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7686 }
7687
7be96900 7688 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
7689
7690 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7691 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7692 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7693 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7694 return IPR_RC_JOB_CONTINUE;
7695 }
7696
7697 /* Enable destructive diagnostics on IOA */
214777ba
WB
7698 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7699
7be96900
WB
7700 if (ioa_cfg->sis64) {
7701 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7702 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7703 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7704 } else
7705 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 7706
1da177e4
LT
7707 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7708
7709 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7710
214777ba
WB
7711 if (ioa_cfg->sis64) {
7712 ipr_cmd->job_step = ipr_reset_next_stage;
7713 return IPR_RC_JOB_CONTINUE;
7714 }
7715
1da177e4 7716 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7717 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7718 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7719 ipr_cmd->done = ipr_reset_ioa_job;
7720 add_timer(&ipr_cmd->timer);
05a6538a 7721 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7722
7723 LEAVE;
7724 return IPR_RC_JOB_RETURN;
7725}
7726
7727/**
7728 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7729 * @ipr_cmd: ipr command struct
7730 *
7731 * This function is invoked when an adapter dump has run out
7732 * of processing time.
7733 *
7734 * Return value:
7735 * IPR_RC_JOB_CONTINUE
7736 **/
7737static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7738{
7739 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7740
7741 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
7742 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7743 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
7744 ioa_cfg->sdt_state = ABORT_DUMP;
7745
4c647e90 7746 ioa_cfg->dump_timeout = 1;
1da177e4
LT
7747 ipr_cmd->job_step = ipr_reset_alert;
7748
7749 return IPR_RC_JOB_CONTINUE;
7750}
7751
7752/**
7753 * ipr_unit_check_no_data - Log a unit check/no data error log
7754 * @ioa_cfg: ioa config struct
7755 *
7756 * Logs an error indicating the adapter unit checked, but for some
7757 * reason, we were unable to fetch the unit check buffer.
7758 *
7759 * Return value:
7760 * nothing
7761 **/
7762static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7763{
7764 ioa_cfg->errors_logged++;
7765 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7766}
7767
7768/**
7769 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7770 * @ioa_cfg: ioa config struct
7771 *
7772 * Fetches the unit check buffer from the adapter by clocking the data
7773 * through the mailbox register.
7774 *
7775 * Return value:
7776 * nothing
7777 **/
7778static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7779{
7780 unsigned long mailbox;
7781 struct ipr_hostrcb *hostrcb;
7782 struct ipr_uc_sdt sdt;
7783 int rc, length;
65f56475 7784 u32 ioasc;
1da177e4
LT
7785
7786 mailbox = readl(ioa_cfg->ioa_mailbox);
7787
dcbad00e 7788 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7789 ipr_unit_check_no_data(ioa_cfg);
7790 return;
7791 }
7792
7793 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7794 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7795 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7796
dcbad00e
WB
7797 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7798 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7799 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7800 ipr_unit_check_no_data(ioa_cfg);
7801 return;
7802 }
7803
7804 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7805 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7806 length = be32_to_cpu(sdt.entry[0].end_token);
7807 else
7808 length = (be32_to_cpu(sdt.entry[0].end_token) -
7809 be32_to_cpu(sdt.entry[0].start_token)) &
7810 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7811
7812 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7813 struct ipr_hostrcb, queue);
7814 list_del(&hostrcb->queue);
7815 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7816
7817 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7818 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7819 (__be32 *)&hostrcb->hcam,
7820 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7821
65f56475 7822 if (!rc) {
1da177e4 7823 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7824 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7825 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7826 ioa_cfg->sdt_state == GET_DUMP)
7827 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7828 } else
1da177e4
LT
7829 ipr_unit_check_no_data(ioa_cfg);
7830
7831 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7832}
7833
110def85
WB
7834/**
7835 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7836 * @ipr_cmd: ipr command struct
7837 *
7838 * Description: This function will call to get the unit check buffer.
7839 *
7840 * Return value:
7841 * IPR_RC_JOB_RETURN
7842 **/
7843static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7844{
7845 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7846
7847 ENTER;
7848 ioa_cfg->ioa_unit_checked = 0;
7849 ipr_get_unit_check_buffer(ioa_cfg);
7850 ipr_cmd->job_step = ipr_reset_alert;
7851 ipr_reset_start_timer(ipr_cmd, 0);
7852
7853 LEAVE;
7854 return IPR_RC_JOB_RETURN;
7855}
7856
1da177e4
LT
7857/**
7858 * ipr_reset_restore_cfg_space - Restore PCI config space.
7859 * @ipr_cmd: ipr command struct
7860 *
7861 * Description: This function restores the saved PCI config space of
7862 * the adapter, fails all outstanding ops back to the callers, and
7863 * fetches the dump/unit check if applicable to this reset.
7864 *
7865 * Return value:
7866 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7867 **/
7868static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7869{
7870 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 7871 u32 int_reg;
1da177e4
LT
7872
7873 ENTER;
99c965dd 7874 ioa_cfg->pdev->state_saved = true;
1d3c16a8 7875 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
7876
7877 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 7878 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7879 return IPR_RC_JOB_CONTINUE;
7880 }
7881
7882 ipr_fail_all_ops(ioa_cfg);
7883
8701f185
WB
7884 if (ioa_cfg->sis64) {
7885 /* Set the adapter to the correct endian mode. */
7886 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7887 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7888 }
7889
1da177e4 7890 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
7891 if (ioa_cfg->sis64) {
7892 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7893 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7894 return IPR_RC_JOB_RETURN;
7895 } else {
7896 ioa_cfg->ioa_unit_checked = 0;
7897 ipr_get_unit_check_buffer(ioa_cfg);
7898 ipr_cmd->job_step = ipr_reset_alert;
7899 ipr_reset_start_timer(ipr_cmd, 0);
7900 return IPR_RC_JOB_RETURN;
7901 }
1da177e4
LT
7902 }
7903
7904 if (ioa_cfg->in_ioa_bringdown) {
7905 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7906 } else {
7907 ipr_cmd->job_step = ipr_reset_enable_ioa;
7908
7909 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 7910 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 7911 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
7912 if (ioa_cfg->sis64)
7913 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7914 else
7915 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
7916 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7917 schedule_work(&ioa_cfg->work_q);
7918 return IPR_RC_JOB_RETURN;
7919 }
7920 }
7921
438b0331 7922 LEAVE;
1da177e4
LT
7923 return IPR_RC_JOB_CONTINUE;
7924}
7925
e619e1a7
BK
7926/**
7927 * ipr_reset_bist_done - BIST has completed on the adapter.
7928 * @ipr_cmd: ipr command struct
7929 *
7930 * Description: Unblock config space and resume the reset process.
7931 *
7932 * Return value:
7933 * IPR_RC_JOB_CONTINUE
7934 **/
7935static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7936{
fb51ccbf
JK
7937 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7938
e619e1a7 7939 ENTER;
fb51ccbf
JK
7940 if (ioa_cfg->cfg_locked)
7941 pci_cfg_access_unlock(ioa_cfg->pdev);
7942 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
7943 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7944 LEAVE;
7945 return IPR_RC_JOB_CONTINUE;
7946}
7947
1da177e4
LT
7948/**
7949 * ipr_reset_start_bist - Run BIST on the adapter.
7950 * @ipr_cmd: ipr command struct
7951 *
7952 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7953 *
7954 * Return value:
7955 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7956 **/
7957static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7958{
7959 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 7960 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
7961
7962 ENTER;
cb237ef7
WB
7963 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7964 writel(IPR_UPROCI_SIS64_START_BIST,
7965 ioa_cfg->regs.set_uproc_interrupt_reg32);
7966 else
7967 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7968
7969 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 7970 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
7971 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7972 rc = IPR_RC_JOB_RETURN;
cb237ef7 7973 } else {
fb51ccbf
JK
7974 if (ioa_cfg->cfg_locked)
7975 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7976 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
7977 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7978 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
7979 }
7980
7981 LEAVE;
7982 return rc;
7983}
7984
463fc696
BK
7985/**
7986 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7987 * @ipr_cmd: ipr command struct
7988 *
7989 * Description: This clears PCI reset to the adapter and delays two seconds.
7990 *
7991 * Return value:
7992 * IPR_RC_JOB_RETURN
7993 **/
7994static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7995{
7996 ENTER;
7997 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7998 ipr_cmd->job_step = ipr_reset_bist_done;
7999 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8000 LEAVE;
8001 return IPR_RC_JOB_RETURN;
8002}
8003
8004/**
8005 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8006 * @ipr_cmd: ipr command struct
8007 *
8008 * Description: This asserts PCI reset to the adapter.
8009 *
8010 * Return value:
8011 * IPR_RC_JOB_RETURN
8012 **/
8013static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8014{
8015 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8016 struct pci_dev *pdev = ioa_cfg->pdev;
8017
8018 ENTER;
463fc696
BK
8019 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8020 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8021 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8022 LEAVE;
8023 return IPR_RC_JOB_RETURN;
8024}
8025
fb51ccbf
JK
8026/**
8027 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8028 * @ipr_cmd: ipr command struct
8029 *
8030 * Description: This attempts to block config access to the IOA.
8031 *
8032 * Return value:
8033 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8034 **/
8035static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8036{
8037 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8038 int rc = IPR_RC_JOB_CONTINUE;
8039
8040 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8041 ioa_cfg->cfg_locked = 1;
8042 ipr_cmd->job_step = ioa_cfg->reset;
8043 } else {
8044 if (ipr_cmd->u.time_left) {
8045 rc = IPR_RC_JOB_RETURN;
8046 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8047 ipr_reset_start_timer(ipr_cmd,
8048 IPR_CHECK_FOR_RESET_TIMEOUT);
8049 } else {
8050 ipr_cmd->job_step = ioa_cfg->reset;
8051 dev_err(&ioa_cfg->pdev->dev,
8052 "Timed out waiting to lock config access. Resetting anyway.\n");
8053 }
8054 }
8055
8056 return rc;
8057}
8058
8059/**
8060 * ipr_reset_block_config_access - Block config access to the IOA
8061 * @ipr_cmd: ipr command struct
8062 *
8063 * Description: This attempts to block config access to the IOA
8064 *
8065 * Return value:
8066 * IPR_RC_JOB_CONTINUE
8067 **/
8068static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8069{
8070 ipr_cmd->ioa_cfg->cfg_locked = 0;
8071 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8072 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8073 return IPR_RC_JOB_CONTINUE;
8074}
8075
1da177e4
LT
8076/**
8077 * ipr_reset_allowed - Query whether or not IOA can be reset
8078 * @ioa_cfg: ioa config struct
8079 *
8080 * Return value:
8081 * 0 if reset not allowed / non-zero if reset is allowed
8082 **/
8083static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8084{
8085 volatile u32 temp_reg;
8086
8087 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8088 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8089}
8090
8091/**
8092 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8093 * @ipr_cmd: ipr command struct
8094 *
8095 * Description: This function waits for adapter permission to run BIST,
8096 * then runs BIST. If the adapter does not give permission after a
8097 * reasonable time, we will reset the adapter anyway. The impact of
8098 * resetting the adapter without warning the adapter is the risk of
8099 * losing the persistent error log on the adapter. If the adapter is
8100 * reset while it is writing to the flash on the adapter, the flash
8101 * segment will have bad ECC and be zeroed.
8102 *
8103 * Return value:
8104 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8105 **/
8106static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8107{
8108 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8109 int rc = IPR_RC_JOB_RETURN;
8110
8111 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8112 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8113 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8114 } else {
fb51ccbf 8115 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8116 rc = IPR_RC_JOB_CONTINUE;
8117 }
8118
8119 return rc;
8120}
8121
8122/**
8701f185 8123 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8124 * @ipr_cmd: ipr command struct
8125 *
8126 * Description: This function alerts the adapter that it will be reset.
8127 * If memory space is not currently enabled, proceed directly
8128 * to running BIST on the adapter. The timer must always be started
8129 * so we guarantee we do not run BIST from ipr_isr.
8130 *
8131 * Return value:
8132 * IPR_RC_JOB_RETURN
8133 **/
8134static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8135{
8136 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8137 u16 cmd_reg;
8138 int rc;
8139
8140 ENTER;
8141 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8142
8143 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8144 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8145 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8146 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8147 } else {
fb51ccbf 8148 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8149 }
8150
8151 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8152 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8153
8154 LEAVE;
8155 return IPR_RC_JOB_RETURN;
8156}
8157
8158/**
8159 * ipr_reset_ucode_download_done - Microcode download completion
8160 * @ipr_cmd: ipr command struct
8161 *
8162 * Description: This function unmaps the microcode download buffer.
8163 *
8164 * Return value:
8165 * IPR_RC_JOB_CONTINUE
8166 **/
8167static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8168{
8169 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8170 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8171
8172 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8173 sglist->num_sg, DMA_TO_DEVICE);
8174
8175 ipr_cmd->job_step = ipr_reset_alert;
8176 return IPR_RC_JOB_CONTINUE;
8177}
8178
8179/**
8180 * ipr_reset_ucode_download - Download microcode to the adapter
8181 * @ipr_cmd: ipr command struct
8182 *
8183 * Description: This function checks to see if it there is microcode
8184 * to download to the adapter. If there is, a download is performed.
8185 *
8186 * Return value:
8187 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8188 **/
8189static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8190{
8191 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8192 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8193
8194 ENTER;
8195 ipr_cmd->job_step = ipr_reset_alert;
8196
8197 if (!sglist)
8198 return IPR_RC_JOB_CONTINUE;
8199
8200 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8201 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8202 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8203 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8204 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8205 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8206 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8207
a32c055f
WB
8208 if (ioa_cfg->sis64)
8209 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8210 else
8211 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
8212 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8213
8214 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8215 IPR_WRITE_BUFFER_TIMEOUT);
8216
8217 LEAVE;
8218 return IPR_RC_JOB_RETURN;
8219}
8220
8221/**
8222 * ipr_reset_shutdown_ioa - Shutdown the adapter
8223 * @ipr_cmd: ipr command struct
8224 *
8225 * Description: This function issues an adapter shutdown of the
8226 * specified type to the specified adapter as part of the
8227 * adapter reset job.
8228 *
8229 * Return value:
8230 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8231 **/
8232static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8233{
8234 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8235 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8236 unsigned long timeout;
8237 int rc = IPR_RC_JOB_CONTINUE;
8238
8239 ENTER;
8240 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
8241 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8242 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8243 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8244 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8245
ac09c349
BK
8246 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8247 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
8248 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8249 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
8250 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8251 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 8252 else
ac09c349 8253 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
8254
8255 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8256
8257 rc = IPR_RC_JOB_RETURN;
8258 ipr_cmd->job_step = ipr_reset_ucode_download;
8259 } else
8260 ipr_cmd->job_step = ipr_reset_alert;
8261
8262 LEAVE;
8263 return rc;
8264}
8265
8266/**
8267 * ipr_reset_ioa_job - Adapter reset job
8268 * @ipr_cmd: ipr command struct
8269 *
8270 * Description: This function is the job router for the adapter reset job.
8271 *
8272 * Return value:
8273 * none
8274 **/
8275static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8276{
8277 u32 rc, ioasc;
1da177e4
LT
8278 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8279
8280 do {
96d21f00 8281 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
8282
8283 if (ioa_cfg->reset_cmd != ipr_cmd) {
8284 /*
8285 * We are doing nested adapter resets and this is
8286 * not the current reset job.
8287 */
05a6538a 8288 list_add_tail(&ipr_cmd->queue,
8289 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
8290 return;
8291 }
8292
8293 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
8294 rc = ipr_cmd->job_step_failed(ipr_cmd);
8295 if (rc == IPR_RC_JOB_RETURN)
8296 return;
1da177e4
LT
8297 }
8298
8299 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8300 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 8301 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 8302 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
8303}
8304
8305/**
8306 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8307 * @ioa_cfg: ioa config struct
8308 * @job_step: first job step of reset job
8309 * @shutdown_type: shutdown type
8310 *
8311 * Description: This function will initiate the reset of the given adapter
8312 * starting at the selected job step.
8313 * If the caller needs to wait on the completion of the reset,
8314 * the caller must sleep on the reset_wait_q.
8315 *
8316 * Return value:
8317 * none
8318 **/
8319static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8320 int (*job_step) (struct ipr_cmnd *),
8321 enum ipr_shutdown_type shutdown_type)
8322{
8323 struct ipr_cmnd *ipr_cmd;
8324
8325 ioa_cfg->in_reset_reload = 1;
8326 ioa_cfg->allow_cmds = 0;
8327 scsi_block_requests(ioa_cfg->host);
8328
8329 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8330 ioa_cfg->reset_cmd = ipr_cmd;
8331 ipr_cmd->job_step = job_step;
8332 ipr_cmd->u.shutdown_type = shutdown_type;
8333
8334 ipr_reset_ioa_job(ipr_cmd);
8335}
8336
8337/**
8338 * ipr_initiate_ioa_reset - Initiate an adapter reset
8339 * @ioa_cfg: ioa config struct
8340 * @shutdown_type: shutdown type
8341 *
8342 * Description: This function will initiate the reset of the given adapter.
8343 * If the caller needs to wait on the completion of the reset,
8344 * the caller must sleep on the reset_wait_q.
8345 *
8346 * Return value:
8347 * none
8348 **/
8349static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8350 enum ipr_shutdown_type shutdown_type)
8351{
8352 if (ioa_cfg->ioa_is_dead)
8353 return;
8354
41e9a696
BK
8355 if (ioa_cfg->in_reset_reload) {
8356 if (ioa_cfg->sdt_state == GET_DUMP)
8357 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8358 else if (ioa_cfg->sdt_state == READ_DUMP)
8359 ioa_cfg->sdt_state = ABORT_DUMP;
8360 }
1da177e4
LT
8361
8362 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8363 dev_err(&ioa_cfg->pdev->dev,
8364 "IOA taken offline - error recovery failed\n");
8365
8366 ioa_cfg->reset_retries = 0;
8367 ioa_cfg->ioa_is_dead = 1;
8368
8369 if (ioa_cfg->in_ioa_bringdown) {
8370 ioa_cfg->reset_cmd = NULL;
8371 ioa_cfg->in_reset_reload = 0;
8372 ipr_fail_all_ops(ioa_cfg);
8373 wake_up_all(&ioa_cfg->reset_wait_q);
8374
8375 spin_unlock_irq(ioa_cfg->host->host_lock);
8376 scsi_unblock_requests(ioa_cfg->host);
8377 spin_lock_irq(ioa_cfg->host->host_lock);
8378 return;
8379 } else {
8380 ioa_cfg->in_ioa_bringdown = 1;
8381 shutdown_type = IPR_SHUTDOWN_NONE;
8382 }
8383 }
8384
8385 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8386 shutdown_type);
8387}
8388
f8a88b19
LV
8389/**
8390 * ipr_reset_freeze - Hold off all I/O activity
8391 * @ipr_cmd: ipr command struct
8392 *
8393 * Description: If the PCI slot is frozen, hold off all I/O
8394 * activity; then, as soon as the slot is available again,
8395 * initiate an adapter reset.
8396 */
8397static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8398{
8399 /* Disallow new interrupts, avoid loop */
8400 ipr_cmd->ioa_cfg->allow_interrupts = 0;
05a6538a 8401 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
8402 ipr_cmd->done = ipr_reset_ioa_job;
8403 return IPR_RC_JOB_RETURN;
8404}
8405
8406/**
8407 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8408 * @pdev: PCI device struct
8409 *
8410 * Description: This routine is called to tell us that the PCI bus
8411 * is down. Can't do anything here, except put the device driver
8412 * into a holding pattern, waiting for the PCI bus to come back.
8413 */
8414static void ipr_pci_frozen(struct pci_dev *pdev)
8415{
8416 unsigned long flags = 0;
8417 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8418
8419 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8420 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8422}
8423
8424/**
8425 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8426 * @pdev: PCI device struct
8427 *
8428 * Description: This routine is called by the pci error recovery
8429 * code after the PCI slot has been reset, just before we
8430 * should resume normal operations.
8431 */
8432static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8433{
8434 unsigned long flags = 0;
8435 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8436
8437 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
8438 if (ioa_cfg->needs_warm_reset)
8439 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8440 else
8441 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8442 IPR_SHUTDOWN_NONE);
f8a88b19
LV
8443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8444 return PCI_ERS_RESULT_RECOVERED;
8445}
8446
8447/**
8448 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8449 * @pdev: PCI device struct
8450 *
8451 * Description: This routine is called when the PCI bus has
8452 * permanently failed.
8453 */
8454static void ipr_pci_perm_failure(struct pci_dev *pdev)
8455{
8456 unsigned long flags = 0;
8457 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8458
8459 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8460 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8461 ioa_cfg->sdt_state = ABORT_DUMP;
8462 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8463 ioa_cfg->in_ioa_bringdown = 1;
6ff63896 8464 ioa_cfg->allow_cmds = 0;
f8a88b19
LV
8465 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8466 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8467}
8468
8469/**
8470 * ipr_pci_error_detected - Called when a PCI error is detected.
8471 * @pdev: PCI device struct
8472 * @state: PCI channel state
8473 *
8474 * Description: Called when a PCI error is detected.
8475 *
8476 * Return value:
8477 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8478 */
8479static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8480 pci_channel_state_t state)
8481{
8482 switch (state) {
8483 case pci_channel_io_frozen:
8484 ipr_pci_frozen(pdev);
8485 return PCI_ERS_RESULT_NEED_RESET;
8486 case pci_channel_io_perm_failure:
8487 ipr_pci_perm_failure(pdev);
8488 return PCI_ERS_RESULT_DISCONNECT;
8489 break;
8490 default:
8491 break;
8492 }
8493 return PCI_ERS_RESULT_NEED_RESET;
8494}
8495
1da177e4
LT
8496/**
8497 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8498 * @ioa_cfg: ioa cfg struct
8499 *
8500 * Description: This is the second phase of adapter intialization
8501 * This function takes care of initilizing the adapter to the point
8502 * where it can accept new commands.
8503
8504 * Return value:
b1c11812 8505 * 0 on success / -EIO on failure
1da177e4 8506 **/
6f039790 8507static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8508{
8509 int rc = 0;
8510 unsigned long host_lock_flags = 0;
8511
8512 ENTER;
8513 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8514 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
8515 if (ioa_cfg->needs_hard_reset) {
8516 ioa_cfg->needs_hard_reset = 0;
8517 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8518 } else
8519 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8520 IPR_SHUTDOWN_NONE);
1da177e4
LT
8521 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8522 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8523 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8524
8525 if (ioa_cfg->ioa_is_dead) {
8526 rc = -EIO;
8527 } else if (ipr_invalid_adapter(ioa_cfg)) {
8528 if (!ipr_testmode)
8529 rc = -EIO;
8530
8531 dev_err(&ioa_cfg->pdev->dev,
8532 "Adapter not supported in this hardware configuration.\n");
8533 }
8534
8535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8536
8537 LEAVE;
8538 return rc;
8539}
8540
8541/**
8542 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8543 * @ioa_cfg: ioa config struct
8544 *
8545 * Return value:
8546 * none
8547 **/
8548static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8549{
8550 int i;
8551
8552 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8553 if (ioa_cfg->ipr_cmnd_list[i])
8554 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8555 ioa_cfg->ipr_cmnd_list[i],
8556 ioa_cfg->ipr_cmnd_list_dma[i]);
8557
8558 ioa_cfg->ipr_cmnd_list[i] = NULL;
8559 }
8560
8561 if (ioa_cfg->ipr_cmd_pool)
203fa3fe 8562 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 8563
89aad428
BK
8564 kfree(ioa_cfg->ipr_cmnd_list);
8565 kfree(ioa_cfg->ipr_cmnd_list_dma);
8566 ioa_cfg->ipr_cmnd_list = NULL;
8567 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
8568 ioa_cfg->ipr_cmd_pool = NULL;
8569}
8570
8571/**
8572 * ipr_free_mem - Frees memory allocated for an adapter
8573 * @ioa_cfg: ioa cfg struct
8574 *
8575 * Return value:
8576 * nothing
8577 **/
8578static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8579{
8580 int i;
8581
8582 kfree(ioa_cfg->res_entries);
8583 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8584 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8585 ipr_free_cmd_blks(ioa_cfg);
05a6538a 8586
8587 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8588 pci_free_consistent(ioa_cfg->pdev,
8589 sizeof(u32) * ioa_cfg->hrrq[i].size,
8590 ioa_cfg->hrrq[i].host_rrq,
8591 ioa_cfg->hrrq[i].host_rrq_dma);
8592
3e7ebdfa
WB
8593 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8594 ioa_cfg->u.cfg_table,
1da177e4
LT
8595 ioa_cfg->cfg_table_dma);
8596
8597 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8598 pci_free_consistent(ioa_cfg->pdev,
8599 sizeof(struct ipr_hostrcb),
8600 ioa_cfg->hostrcb[i],
8601 ioa_cfg->hostrcb_dma[i]);
8602 }
8603
8604 ipr_free_dump(ioa_cfg);
1da177e4
LT
8605 kfree(ioa_cfg->trace);
8606}
8607
8608/**
8609 * ipr_free_all_resources - Free all allocated resources for an adapter.
8610 * @ipr_cmd: ipr command struct
8611 *
8612 * This function frees all allocated resources for the
8613 * specified adapter.
8614 *
8615 * Return value:
8616 * none
8617 **/
8618static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8619{
8620 struct pci_dev *pdev = ioa_cfg->pdev;
8621
8622 ENTER;
05a6538a 8623 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8624 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8625 int i;
8626 for (i = 0; i < ioa_cfg->nvectors; i++)
8627 free_irq(ioa_cfg->vectors_info[i].vec,
8628 &ioa_cfg->hrrq[i]);
8629 } else
8630 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8631
8632 if (ioa_cfg->intr_flag == IPR_USE_MSI)
8633 pci_disable_msi(pdev);
8634 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
8635 pci_disable_msix(pdev);
8636
1da177e4
LT
8637 iounmap(ioa_cfg->hdw_dma_regs);
8638 pci_release_regions(pdev);
8639 ipr_free_mem(ioa_cfg);
8640 scsi_host_put(ioa_cfg->host);
8641 pci_disable_device(pdev);
8642 LEAVE;
8643}
8644
8645/**
8646 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8647 * @ioa_cfg: ioa config struct
8648 *
8649 * Return value:
8650 * 0 on success / -ENOMEM on allocation failure
8651 **/
6f039790 8652static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8653{
8654 struct ipr_cmnd *ipr_cmd;
8655 struct ipr_ioarcb *ioarcb;
8656 dma_addr_t dma_addr;
05a6538a 8657 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 8658
203fa3fe
KSS
8659 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8660 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
8661
8662 if (!ioa_cfg->ipr_cmd_pool)
8663 return -ENOMEM;
8664
89aad428
BK
8665 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8666 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8667
8668 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8669 ipr_free_cmd_blks(ioa_cfg);
8670 return -ENOMEM;
8671 }
8672
05a6538a 8673 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8674 if (ioa_cfg->hrrq_num > 1) {
8675 if (i == 0) {
8676 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8677 ioa_cfg->hrrq[i].min_cmd_id = 0;
8678 ioa_cfg->hrrq[i].max_cmd_id =
8679 (entries_each_hrrq - 1);
8680 } else {
8681 entries_each_hrrq =
8682 IPR_NUM_BASE_CMD_BLKS/
8683 (ioa_cfg->hrrq_num - 1);
8684 ioa_cfg->hrrq[i].min_cmd_id =
8685 IPR_NUM_INTERNAL_CMD_BLKS +
8686 (i - 1) * entries_each_hrrq;
8687 ioa_cfg->hrrq[i].max_cmd_id =
8688 (IPR_NUM_INTERNAL_CMD_BLKS +
8689 i * entries_each_hrrq - 1);
8690 }
8691 } else {
8692 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8693 ioa_cfg->hrrq[i].min_cmd_id = 0;
8694 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8695 }
8696 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8697 }
8698
8699 BUG_ON(ioa_cfg->hrrq_num == 0);
8700
8701 i = IPR_NUM_CMD_BLKS -
8702 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8703 if (i > 0) {
8704 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8705 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8706 }
8707
1da177e4 8708 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
203fa3fe 8709 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8710
8711 if (!ipr_cmd) {
8712 ipr_free_cmd_blks(ioa_cfg);
8713 return -ENOMEM;
8714 }
8715
8716 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8717 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8718 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8719
8720 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8721 ipr_cmd->dma_addr = dma_addr;
8722 if (ioa_cfg->sis64)
8723 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8724 else
8725 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8726
1da177e4 8727 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8728 if (ioa_cfg->sis64) {
8729 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8730 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8731 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 8732 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
8733 } else {
8734 ioarcb->write_ioadl_addr =
8735 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8736 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8737 ioarcb->ioasa_host_pci_addr =
96d21f00 8738 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 8739 }
1da177e4
LT
8740 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8741 ipr_cmd->cmd_index = i;
8742 ipr_cmd->ioa_cfg = ioa_cfg;
8743 ipr_cmd->sense_buffer_dma = dma_addr +
8744 offsetof(struct ipr_cmnd, sense_buffer);
8745
05a6538a 8746 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8747 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8748 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8749 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8750 hrrq_id++;
1da177e4
LT
8751 }
8752
8753 return 0;
8754}
8755
8756/**
8757 * ipr_alloc_mem - Allocate memory for an adapter
8758 * @ioa_cfg: ioa config struct
8759 *
8760 * Return value:
8761 * 0 on success / non-zero for error
8762 **/
6f039790 8763static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8764{
8765 struct pci_dev *pdev = ioa_cfg->pdev;
8766 int i, rc = -ENOMEM;
8767
8768 ENTER;
0bc42e35 8769 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8770 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8771
8772 if (!ioa_cfg->res_entries)
8773 goto out;
8774
3e7ebdfa
WB
8775 if (ioa_cfg->sis64) {
8776 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8777 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8778 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8779 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8780 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8781 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
a2e49cb2
BK
8782
8783 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
8784 || !ioa_cfg->vset_ids)
8785 goto out_free_res_entries;
3e7ebdfa
WB
8786 }
8787
8788 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8789 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8790 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8791 }
1da177e4
LT
8792
8793 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8794 sizeof(struct ipr_misc_cbs),
8795 &ioa_cfg->vpd_cbs_dma);
8796
8797 if (!ioa_cfg->vpd_cbs)
8798 goto out_free_res_entries;
8799
05a6538a 8800 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8801 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
8802 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
8803 }
8804
1da177e4
LT
8805 if (ipr_alloc_cmd_blks(ioa_cfg))
8806 goto out_free_vpd_cbs;
8807
05a6538a 8808 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8809 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8810 sizeof(u32) * ioa_cfg->hrrq[i].size,
8811 &ioa_cfg->hrrq[i].host_rrq_dma);
8812
8813 if (!ioa_cfg->hrrq[i].host_rrq) {
8814 while (--i > 0)
8815 pci_free_consistent(pdev,
8816 sizeof(u32) * ioa_cfg->hrrq[i].size,
8817 ioa_cfg->hrrq[i].host_rrq,
8818 ioa_cfg->hrrq[i].host_rrq_dma);
8819 goto out_ipr_free_cmd_blocks;
8820 }
8821 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
8822 }
1da177e4 8823
3e7ebdfa
WB
8824 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8825 ioa_cfg->cfg_table_size,
8826 &ioa_cfg->cfg_table_dma);
1da177e4 8827
3e7ebdfa 8828 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8829 goto out_free_host_rrq;
8830
8831 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8832 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8833 sizeof(struct ipr_hostrcb),
8834 &ioa_cfg->hostrcb_dma[i]);
8835
8836 if (!ioa_cfg->hostrcb[i])
8837 goto out_free_hostrcb_dma;
8838
8839 ioa_cfg->hostrcb[i]->hostrcb_dma =
8840 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8841 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8842 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8843 }
8844
0bc42e35 8845 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8846 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8847
8848 if (!ioa_cfg->trace)
8849 goto out_free_hostrcb_dma;
8850
1da177e4
LT
8851 rc = 0;
8852out:
8853 LEAVE;
8854 return rc;
8855
8856out_free_hostrcb_dma:
8857 while (i-- > 0) {
8858 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8859 ioa_cfg->hostrcb[i],
8860 ioa_cfg->hostrcb_dma[i]);
8861 }
3e7ebdfa
WB
8862 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8863 ioa_cfg->u.cfg_table,
8864 ioa_cfg->cfg_table_dma);
1da177e4 8865out_free_host_rrq:
05a6538a 8866 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8867 pci_free_consistent(pdev,
8868 sizeof(u32) * ioa_cfg->hrrq[i].size,
8869 ioa_cfg->hrrq[i].host_rrq,
8870 ioa_cfg->hrrq[i].host_rrq_dma);
8871 }
1da177e4
LT
8872out_ipr_free_cmd_blocks:
8873 ipr_free_cmd_blks(ioa_cfg);
8874out_free_vpd_cbs:
8875 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8876 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8877out_free_res_entries:
8878 kfree(ioa_cfg->res_entries);
a2e49cb2
BK
8879 kfree(ioa_cfg->target_ids);
8880 kfree(ioa_cfg->array_ids);
8881 kfree(ioa_cfg->vset_ids);
1da177e4
LT
8882 goto out;
8883}
8884
8885/**
8886 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8887 * @ioa_cfg: ioa config struct
8888 *
8889 * Return value:
8890 * none
8891 **/
6f039790 8892static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8893{
8894 int i;
8895
8896 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8897 ioa_cfg->bus_attr[i].bus = i;
8898 ioa_cfg->bus_attr[i].qas_enabled = 0;
8899 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8900 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8901 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8902 else
8903 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8904 }
8905}
8906
8907/**
8908 * ipr_init_ioa_cfg - Initialize IOA config struct
8909 * @ioa_cfg: ioa config struct
8910 * @host: scsi host struct
8911 * @pdev: PCI dev struct
8912 *
8913 * Return value:
8914 * none
8915 **/
6f039790
GKH
8916static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8917 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4
LT
8918{
8919 const struct ipr_interrupt_offsets *p;
8920 struct ipr_interrupts *t;
8921 void __iomem *base;
8922
8923 ioa_cfg->host = host;
8924 ioa_cfg->pdev = pdev;
8925 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 8926 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
8927 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8928 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
8929 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8930 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8931 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8932 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8933
1da177e4
LT
8934 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8935 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8936 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8937 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 8938 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 8939 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 8940 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
8941 ioa_cfg->sdt_state = INACTIVE;
8942
8943 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 8944 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 8945
3e7ebdfa
WB
8946 if (ioa_cfg->sis64) {
8947 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8948 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8949 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8950 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8951 } else {
8952 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8953 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8954 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8955 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8956 }
1da177e4
LT
8957 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8958 host->unique_id = host->host_no;
8959 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 8960 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
8961 pci_set_drvdata(pdev, ioa_cfg);
8962
8963 p = &ioa_cfg->chip_cfg->regs;
8964 t = &ioa_cfg->regs;
8965 base = ioa_cfg->hdw_dma_regs;
8966
8967 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8968 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 8969 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 8970 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 8971 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 8972 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 8973 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 8974 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 8975 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
8976 t->ioarrin_reg = base + p->ioarrin_reg;
8977 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 8978 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 8979 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 8980 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 8981 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 8982 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
8983
8984 if (ioa_cfg->sis64) {
214777ba 8985 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
8986 t->dump_addr_reg = base + p->dump_addr_reg;
8987 t->dump_data_reg = base + p->dump_data_reg;
8701f185 8988 t->endian_swap_reg = base + p->endian_swap_reg;
dcbad00e 8989 }
1da177e4
LT
8990}
8991
8992/**
1be7bd82 8993 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
8994 * @dev_id: PCI device id struct
8995 *
8996 * Return value:
1be7bd82 8997 * ptr to chip information on success / NULL on failure
1da177e4 8998 **/
6f039790 8999static const struct ipr_chip_t *
1be7bd82 9000ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9001{
9002 int i;
9003
1da177e4
LT
9004 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9005 if (ipr_chip[i].vendor == dev_id->vendor &&
9006 ipr_chip[i].device == dev_id->device)
1be7bd82 9007 return &ipr_chip[i];
1da177e4
LT
9008 return NULL;
9009}
9010
05a6538a 9011static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9012{
9013 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9014 int i, err, vectors;
9015
9016 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9017 entries[i].entry = i;
9018
9019 vectors = ipr_number_of_msix;
9020
9021 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9022 vectors = err;
9023
9024 if (err < 0) {
9025 pci_disable_msix(ioa_cfg->pdev);
9026 return err;
9027 }
9028
9029 if (!err) {
9030 for (i = 0; i < vectors; i++)
9031 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9032 ioa_cfg->nvectors = vectors;
9033 }
9034
9035 return err;
9036}
9037
9038static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9039{
9040 int i, err, vectors;
9041
9042 vectors = ipr_number_of_msix;
9043
9044 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9045 vectors = err;
9046
9047 if (err < 0) {
9048 pci_disable_msi(ioa_cfg->pdev);
9049 return err;
9050 }
9051
9052 if (!err) {
9053 for (i = 0; i < vectors; i++)
9054 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9055 ioa_cfg->nvectors = vectors;
9056 }
9057
9058 return err;
9059}
9060
9061static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9062{
9063 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9064
9065 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9066 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9067 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9068 ioa_cfg->vectors_info[vec_idx].
9069 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9070 }
9071}
9072
9073static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9074{
9075 int i, rc;
9076
9077 for (i = 1; i < ioa_cfg->nvectors; i++) {
9078 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9079 ipr_isr_mhrrq,
9080 0,
9081 ioa_cfg->vectors_info[i].desc,
9082 &ioa_cfg->hrrq[i]);
9083 if (rc) {
9084 while (--i >= 0)
9085 free_irq(ioa_cfg->vectors_info[i].vec,
9086 &ioa_cfg->hrrq[i]);
9087 return rc;
9088 }
9089 }
9090 return 0;
9091}
9092
95fecd90
WB
9093/**
9094 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9095 * @pdev: PCI device struct
9096 *
9097 * Description: Simply set the msi_received flag to 1 indicating that
9098 * Message Signaled Interrupts are supported.
9099 *
9100 * Return value:
9101 * 0 on success / non-zero on failure
9102 **/
6f039790 9103static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
9104{
9105 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9106 unsigned long lock_flags = 0;
9107 irqreturn_t rc = IRQ_HANDLED;
9108
05a6538a 9109 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
9110 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9111
9112 ioa_cfg->msi_received = 1;
9113 wake_up(&ioa_cfg->msi_wait_q);
9114
9115 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9116 return rc;
9117}
9118
9119/**
9120 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9121 * @pdev: PCI device struct
9122 *
9123 * Description: The return value from pci_enable_msi() can not always be
9124 * trusted. This routine sets up and initiates a test interrupt to determine
9125 * if the interrupt is received via the ipr_test_intr() service routine.
9126 * If the tests fails, the driver will fall back to LSI.
9127 *
9128 * Return value:
9129 * 0 on success / non-zero on failure
9130 **/
6f039790 9131static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
9132{
9133 int rc;
9134 volatile u32 int_reg;
9135 unsigned long lock_flags = 0;
9136
9137 ENTER;
9138
9139 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9140 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9141 ioa_cfg->msi_received = 0;
9142 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 9143 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
9144 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9145 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9146
9147 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9148 if (rc) {
9149 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9150 return rc;
9151 } else if (ipr_debug)
9152 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9153
214777ba 9154 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
9155 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9156 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9157 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9158
9159 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9160 if (!ioa_cfg->msi_received) {
9161 /* MSI test failed */
9162 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9163 rc = -EOPNOTSUPP;
9164 } else if (ipr_debug)
9165 dev_info(&pdev->dev, "MSI test succeeded.\n");
9166
9167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9168
9169 free_irq(pdev->irq, ioa_cfg);
9170
9171 LEAVE;
9172
9173 return rc;
9174}
9175
05a6538a 9176 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
9177 * @pdev: PCI device struct
9178 * @dev_id: PCI device id struct
9179 *
9180 * Return value:
9181 * 0 on success / non-zero on failure
9182 **/
6f039790
GKH
9183static int ipr_probe_ioa(struct pci_dev *pdev,
9184 const struct pci_device_id *dev_id)
1da177e4
LT
9185{
9186 struct ipr_ioa_cfg *ioa_cfg;
9187 struct Scsi_Host *host;
9188 unsigned long ipr_regs_pci;
9189 void __iomem *ipr_regs;
a2a65a3e 9190 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 9191 volatile u32 mask, uproc, interrupts;
1da177e4
LT
9192
9193 ENTER;
9194
9195 if ((rc = pci_enable_device(pdev))) {
9196 dev_err(&pdev->dev, "Cannot enable adapter\n");
9197 goto out;
9198 }
9199
9200 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9201
9202 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9203
9204 if (!host) {
9205 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9206 rc = -ENOMEM;
9207 goto out_disable;
9208 }
9209
9210 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9211 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 9212 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 9213
1be7bd82 9214 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 9215
1be7bd82 9216 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
9217 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9218 dev_id->vendor, dev_id->device);
9219 goto out_scsi_host_put;
9220 }
9221
a32c055f
WB
9222 /* set SIS 32 or SIS 64 */
9223 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 9224 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 9225 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 9226 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 9227
5469cb5b
BK
9228 if (ipr_transop_timeout)
9229 ioa_cfg->transop_timeout = ipr_transop_timeout;
9230 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9231 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9232 else
9233 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9234
44c10138 9235 ioa_cfg->revid = pdev->revision;
463fc696 9236
1da177e4
LT
9237 ipr_regs_pci = pci_resource_start(pdev, 0);
9238
9239 rc = pci_request_regions(pdev, IPR_NAME);
9240 if (rc < 0) {
9241 dev_err(&pdev->dev,
9242 "Couldn't register memory range of registers\n");
9243 goto out_scsi_host_put;
9244 }
9245
25729a7f 9246 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
9247
9248 if (!ipr_regs) {
9249 dev_err(&pdev->dev,
9250 "Couldn't map memory range of registers\n");
9251 rc = -ENOMEM;
9252 goto out_release_regions;
9253 }
9254
9255 ioa_cfg->hdw_dma_regs = ipr_regs;
9256 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9257 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9258
9259 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9260
9261 pci_set_master(pdev);
9262
a32c055f
WB
9263 if (ioa_cfg->sis64) {
9264 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9265 if (rc < 0) {
9266 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9267 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9268 }
9269
9270 } else
9271 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9272
1da177e4
LT
9273 if (rc < 0) {
9274 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9275 goto cleanup_nomem;
9276 }
9277
9278 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9279 ioa_cfg->chip_cfg->cache_line_size);
9280
9281 if (rc != PCIBIOS_SUCCESSFUL) {
9282 dev_err(&pdev->dev, "Write of cache line size failed\n");
9283 rc = -EIO;
9284 goto cleanup_nomem;
9285 }
9286
05a6538a 9287 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9288 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9289 IPR_MAX_MSIX_VECTORS);
9290 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9291 }
9292
9293 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9294 ipr_enable_msix(ioa_cfg) == 0)
9295 ioa_cfg->intr_flag = IPR_USE_MSIX;
9296 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9297 ipr_enable_msi(ioa_cfg) == 0)
9298 ioa_cfg->intr_flag = IPR_USE_MSI;
9299 else {
9300 ioa_cfg->intr_flag = IPR_USE_LSI;
9301 ioa_cfg->nvectors = 1;
9302 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9303 }
9304
9305 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9306 ioa_cfg->intr_flag == IPR_USE_MSIX) {
95fecd90 9307 rc = ipr_test_msi(ioa_cfg, pdev);
05a6538a 9308 if (rc == -EOPNOTSUPP) {
9309 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9310 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9311 pci_disable_msi(pdev);
9312 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9313 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9314 pci_disable_msix(pdev);
9315 }
9316
9317 ioa_cfg->intr_flag = IPR_USE_LSI;
9318 ioa_cfg->nvectors = 1;
9319 }
95fecd90
WB
9320 else if (rc)
9321 goto out_msi_disable;
05a6538a 9322 else {
9323 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9324 dev_info(&pdev->dev,
9325 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9326 ioa_cfg->nvectors, pdev->irq);
9327 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9328 dev_info(&pdev->dev,
9329 "Request for %d MSIXs succeeded.",
9330 ioa_cfg->nvectors);
9331 }
9332 }
9333
9334 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9335 (unsigned int)num_online_cpus(),
9336 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 9337
1da177e4
LT
9338 /* Save away PCI config space for use following IOA reset */
9339 rc = pci_save_state(pdev);
9340
9341 if (rc != PCIBIOS_SUCCESSFUL) {
9342 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9343 rc = -EIO;
f170c684 9344 goto out_msi_disable;
1da177e4
LT
9345 }
9346
9347 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 9348 goto out_msi_disable;
1da177e4
LT
9349
9350 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 9351 goto out_msi_disable;
1da177e4 9352
3e7ebdfa
WB
9353 if (ioa_cfg->sis64)
9354 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9355 + ((sizeof(struct ipr_config_table_entry64)
9356 * ioa_cfg->max_devs_supported)));
9357 else
9358 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9359 + ((sizeof(struct ipr_config_table_entry)
9360 * ioa_cfg->max_devs_supported)));
9361
1da177e4
LT
9362 rc = ipr_alloc_mem(ioa_cfg);
9363 if (rc < 0) {
9364 dev_err(&pdev->dev,
9365 "Couldn't allocate enough memory for device driver!\n");
f170c684 9366 goto out_msi_disable;
1da177e4
LT
9367 }
9368
ce155cce
BK
9369 /*
9370 * If HRRQ updated interrupt is not masked, or reset alert is set,
9371 * the card is in an unknown state and needs a hard reset
9372 */
214777ba
WB
9373 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9374 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9375 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
9376 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9377 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 9378 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
9379 ioa_cfg->needs_hard_reset = 1;
9380 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9381 ioa_cfg->ioa_unit_checked = 1;
ce155cce 9382
1da177e4 9383 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
1da177e4 9384
05a6538a 9385 if (ioa_cfg->intr_flag == IPR_USE_MSI
9386 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9387 name_msi_vectors(ioa_cfg);
9388 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9389 0,
9390 ioa_cfg->vectors_info[0].desc,
9391 &ioa_cfg->hrrq[0]);
9392 if (!rc)
9393 rc = ipr_request_other_msi_irqs(ioa_cfg);
9394 } else {
9395 rc = request_irq(pdev->irq, ipr_isr,
9396 IRQF_SHARED,
9397 IPR_NAME, &ioa_cfg->hrrq[0]);
9398 }
1da177e4
LT
9399 if (rc) {
9400 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9401 pdev->irq, rc);
9402 goto cleanup_nolog;
9403 }
9404
463fc696
BK
9405 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9406 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9407 ioa_cfg->needs_warm_reset = 1;
9408 ioa_cfg->reset = ipr_reset_slot_reset;
9409 } else
9410 ioa_cfg->reset = ipr_reset_start_bist;
9411
1da177e4
LT
9412 spin_lock(&ipr_driver_lock);
9413 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9414 spin_unlock(&ipr_driver_lock);
9415
9416 LEAVE;
9417out:
9418 return rc;
9419
9420cleanup_nolog:
9421 ipr_free_mem(ioa_cfg);
95fecd90 9422out_msi_disable:
05a6538a 9423 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9424 pci_disable_msi(pdev);
9425 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9426 pci_disable_msix(pdev);
f170c684
JL
9427cleanup_nomem:
9428 iounmap(ipr_regs);
1da177e4
LT
9429out_release_regions:
9430 pci_release_regions(pdev);
9431out_scsi_host_put:
9432 scsi_host_put(host);
9433out_disable:
9434 pci_disable_device(pdev);
9435 goto out;
9436}
9437
9438/**
9439 * ipr_scan_vsets - Scans for VSET devices
9440 * @ioa_cfg: ioa config struct
9441 *
9442 * Description: Since the VSET resources do not follow SAM in that we can have
9443 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9444 *
9445 * Return value:
9446 * none
9447 **/
9448static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9449{
9450 int target, lun;
9451
9452 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
203fa3fe 9453 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
1da177e4
LT
9454 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9455}
9456
9457/**
9458 * ipr_initiate_ioa_bringdown - Bring down an adapter
9459 * @ioa_cfg: ioa config struct
9460 * @shutdown_type: shutdown type
9461 *
9462 * Description: This function will initiate bringing down the adapter.
9463 * This consists of issuing an IOA shutdown to the adapter
9464 * to flush the cache, and running BIST.
9465 * If the caller needs to wait on the completion of the reset,
9466 * the caller must sleep on the reset_wait_q.
9467 *
9468 * Return value:
9469 * none
9470 **/
9471static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9472 enum ipr_shutdown_type shutdown_type)
9473{
9474 ENTER;
9475 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9476 ioa_cfg->sdt_state = ABORT_DUMP;
9477 ioa_cfg->reset_retries = 0;
9478 ioa_cfg->in_ioa_bringdown = 1;
9479 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9480 LEAVE;
9481}
9482
9483/**
9484 * __ipr_remove - Remove a single adapter
9485 * @pdev: pci device struct
9486 *
9487 * Adapter hot plug remove entry point.
9488 *
9489 * Return value:
9490 * none
9491 **/
9492static void __ipr_remove(struct pci_dev *pdev)
9493{
9494 unsigned long host_lock_flags = 0;
9495 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9496 ENTER;
9497
9498 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 9499 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9501 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9502 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9503 }
9504
1da177e4
LT
9505 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9506
9507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9508 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 9509 flush_work(&ioa_cfg->work_q);
1da177e4
LT
9510 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9511
9512 spin_lock(&ipr_driver_lock);
9513 list_del(&ioa_cfg->queue);
9514 spin_unlock(&ipr_driver_lock);
9515
9516 if (ioa_cfg->sdt_state == ABORT_DUMP)
9517 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9518 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9519
9520 ipr_free_all_resources(ioa_cfg);
9521
9522 LEAVE;
9523}
9524
9525/**
9526 * ipr_remove - IOA hot plug remove entry point
9527 * @pdev: pci device struct
9528 *
9529 * Adapter hot plug remove entry point.
9530 *
9531 * Return value:
9532 * none
9533 **/
6f039790 9534static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
9535{
9536 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9537
9538 ENTER;
9539
ee959b00 9540 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 9541 &ipr_trace_attr);
ee959b00 9542 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9543 &ipr_dump_attr);
9544 scsi_remove_host(ioa_cfg->host);
9545
9546 __ipr_remove(pdev);
9547
9548 LEAVE;
9549}
9550
9551/**
9552 * ipr_probe - Adapter hot plug add entry point
9553 *
9554 * Return value:
9555 * 0 on success / non-zero on failure
9556 **/
6f039790 9557static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
9558{
9559 struct ipr_ioa_cfg *ioa_cfg;
9560 int rc;
9561
9562 rc = ipr_probe_ioa(pdev, dev_id);
9563
9564 if (rc)
9565 return rc;
9566
9567 ioa_cfg = pci_get_drvdata(pdev);
9568 rc = ipr_probe_ioa_part2(ioa_cfg);
9569
9570 if (rc) {
9571 __ipr_remove(pdev);
9572 return rc;
9573 }
9574
9575 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9576
9577 if (rc) {
9578 __ipr_remove(pdev);
9579 return rc;
9580 }
9581
ee959b00 9582 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9583 &ipr_trace_attr);
9584
9585 if (rc) {
9586 scsi_remove_host(ioa_cfg->host);
9587 __ipr_remove(pdev);
9588 return rc;
9589 }
9590
ee959b00 9591 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9592 &ipr_dump_attr);
9593
9594 if (rc) {
ee959b00 9595 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9596 &ipr_trace_attr);
9597 scsi_remove_host(ioa_cfg->host);
9598 __ipr_remove(pdev);
9599 return rc;
9600 }
9601
9602 scsi_scan_host(ioa_cfg->host);
9603 ipr_scan_vsets(ioa_cfg);
9604 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9605 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 9606 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9607 schedule_work(&ioa_cfg->work_q);
9608 return 0;
9609}
9610
9611/**
9612 * ipr_shutdown - Shutdown handler.
d18c3db5 9613 * @pdev: pci device struct
1da177e4
LT
9614 *
9615 * This function is invoked upon system shutdown/reboot. It will issue
9616 * an adapter shutdown to the adapter to flush the write cache.
9617 *
9618 * Return value:
9619 * none
9620 **/
d18c3db5 9621static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 9622{
d18c3db5 9623 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
9624 unsigned long lock_flags = 0;
9625
9626 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 9627 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9629 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9631 }
9632
1da177e4
LT
9633 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9634 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9635 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9636}
9637
6f039790 9638static struct pci_device_id ipr_pci_table[] = {
1da177e4 9639 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9640 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 9641 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9642 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 9643 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9644 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 9645 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9646 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 9647 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9648 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 9649 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9650 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 9651 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9652 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 9653 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
9654 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9655 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9656 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 9657 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9658 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
9659 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9660 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9661 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
9662 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9663 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9664 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 9665 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9666 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
9667 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9668 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 9669 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
9670 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9671 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 9672 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
9673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9674 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
9675 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
9677 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 9679 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 9680 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 9681 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 9682 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 9683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 9684 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 9685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 9686 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9688 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9689 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9690 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9691 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
9692 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9694 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9695 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 9698 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
9700 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9701 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
9702 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9703 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 9704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 9706 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 9708 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
9710 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9711 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9712 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 9714 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9715 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9716 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9717 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9718 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9720 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9721 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
1da177e4
LT
9722 { }
9723};
9724MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9725
a55b2d21 9726static const struct pci_error_handlers ipr_err_handler = {
f8a88b19
LV
9727 .error_detected = ipr_pci_error_detected,
9728 .slot_reset = ipr_pci_slot_reset,
9729};
9730
1da177e4
LT
9731static struct pci_driver ipr_driver = {
9732 .name = IPR_NAME,
9733 .id_table = ipr_pci_table,
9734 .probe = ipr_probe,
6f039790 9735 .remove = ipr_remove,
d18c3db5 9736 .shutdown = ipr_shutdown,
f8a88b19 9737 .err_handler = &ipr_err_handler,
1da177e4
LT
9738};
9739
f72919ec
WB
9740/**
9741 * ipr_halt_done - Shutdown prepare completion
9742 *
9743 * Return value:
9744 * none
9745 **/
9746static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9747{
05a6538a 9748 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
9749}
9750
9751/**
9752 * ipr_halt - Issue shutdown prepare to all adapters
9753 *
9754 * Return value:
9755 * NOTIFY_OK on success / NOTIFY_DONE on failure
9756 **/
9757static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9758{
9759 struct ipr_cmnd *ipr_cmd;
9760 struct ipr_ioa_cfg *ioa_cfg;
9761 unsigned long flags = 0;
9762
9763 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9764 return NOTIFY_DONE;
9765
9766 spin_lock(&ipr_driver_lock);
9767
9768 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9769 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9770 if (!ioa_cfg->allow_cmds) {
9771 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9772 continue;
9773 }
9774
9775 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9776 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9777 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9778 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9779 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9780
9781 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9782 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9783 }
9784 spin_unlock(&ipr_driver_lock);
9785
9786 return NOTIFY_OK;
9787}
9788
9789static struct notifier_block ipr_notifier = {
9790 ipr_halt, NULL, 0
9791};
9792
1da177e4
LT
9793/**
9794 * ipr_init - Module entry point
9795 *
9796 * Return value:
9797 * 0 on success / negative value on failure
9798 **/
9799static int __init ipr_init(void)
9800{
9801 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9802 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9803
f72919ec 9804 register_reboot_notifier(&ipr_notifier);
dcbccbde 9805 return pci_register_driver(&ipr_driver);
1da177e4
LT
9806}
9807
9808/**
9809 * ipr_exit - Module unload
9810 *
9811 * Module unload entry point.
9812 *
9813 * Return value:
9814 * none
9815 **/
9816static void __exit ipr_exit(void)
9817{
f72919ec 9818 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
9819 pci_unregister_driver(&ipr_driver);
9820}
9821
9822module_init(ipr_init);
9823module_exit(ipr_exit);